VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 45485

最後變更 在這個檔案從45485是 45485,由 vboxsync 提交於 12 年 前
  • *: Where possible, drop the #ifdef VBOX_WITH_RAW_RING1 when EMIsRawRing1Enabled is used.
  • SELM: Don't shadow TSS.esp1/ss1 unless ring-1 compression is enabled (also fixed a log statement there).
  • SELM: selmGuestToShadowDesc should not push ring-1 selectors into ring-2 unless EMIsRawRing1Enabled() holds true.
  • REM: Don't set CPU_INTERRUPT_EXTERNAL_EXIT in helper_ltr() for now.
  • 屬性 svn:eol-style 設為 native
檔案大小: 197.8 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274 sc->flags = e2;
275#ifdef VBOX
276 sc->newselector = 0;
277 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
278#endif
279}
280
281/* init the segment cache in vm86 mode. */
282static inline void load_seg_vm(int seg, int selector)
283{
284 selector &= 0xffff;
285#ifdef VBOX
286 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
287 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
288 flags |= (3 << DESC_DPL_SHIFT);
289
290 cpu_x86_load_seg_cache(env, seg, selector,
291 (selector << 4), 0xffff, flags);
292#else /* VBOX */
293 cpu_x86_load_seg_cache(env, seg, selector,
294 (selector << 4), 0xffff, 0);
295#endif /* VBOX */
296}
297
298static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
299 uint32_t *esp_ptr, int dpl)
300{
301#ifndef VBOX
302 int type, index, shift;
303#else
304 unsigned int type, index, shift;
305#endif
306
307#if 0
308 {
309 int i;
310 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
311 for(i=0;i<env->tr.limit;i++) {
312 printf("%02x ", env->tr.base[i]);
313 if ((i & 7) == 7) printf("\n");
314 }
315 printf("\n");
316 }
317#endif
318
319 if (!(env->tr.flags & DESC_P_MASK))
320 cpu_abort(env, "invalid tss");
321 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322 if ((type & 7) != 1)
323 cpu_abort(env, "invalid tss type");
324 shift = type >> 3;
325 index = (dpl * 4 + 2) << shift;
326 if (index + (4 << shift) - 1 > env->tr.limit)
327 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
328 if (shift == 0) {
329 *esp_ptr = lduw_kernel(env->tr.base + index);
330 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
331 } else {
332 *esp_ptr = ldl_kernel(env->tr.base + index);
333 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
334 }
335}
336
337/* XXX: merge with load_seg() */
338static void tss_load_seg(int seg_reg, int selector)
339{
340 uint32_t e1, e2;
341 int rpl, dpl, cpl;
342
343#ifdef VBOX
344 e1 = e2 = 0; /* gcc warning? */
345 cpl = env->hflags & HF_CPL_MASK;
346 /* Trying to load a selector with CPL=1? */
347 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
348 {
349 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
350 selector = selector & 0xfffc;
351 }
352#endif /* VBOX */
353
354 if ((selector & 0xfffc) != 0) {
355 if (load_segment(&e1, &e2, selector) != 0)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 if (!(e2 & DESC_S_MASK))
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 rpl = selector & 3;
360 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
361 cpl = env->hflags & HF_CPL_MASK;
362 if (seg_reg == R_CS) {
363 if (!(e2 & DESC_CS_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 /* XXX: is it correct ? */
366 if (dpl != rpl)
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 if ((e2 & DESC_C_MASK) && dpl > rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 } else if (seg_reg == R_SS) {
371 /* SS must be writable data */
372 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
373 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
374 if (dpl != cpl || dpl != rpl)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 } else {
377 /* not readable code */
378 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
379 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
380 /* if data or non conforming code, checks the rights */
381 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
382 if (dpl < cpl || dpl < rpl)
383 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
384 }
385 }
386 if (!(e2 & DESC_P_MASK))
387 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
388 cpu_x86_load_seg_cache(env, seg_reg, selector,
389 get_seg_base(e1, e2),
390 get_seg_limit(e1, e2),
391 e2);
392 } else {
393 if (seg_reg == R_SS || seg_reg == R_CS)
394 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
395#ifdef VBOX
396# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
397 cpu_x86_load_seg_cache(env, seg_reg, selector,
398 0, 0, 0);
399# endif
400#endif /* VBOX */
401 }
402}
403
404#define SWITCH_TSS_JMP 0
405#define SWITCH_TSS_IRET 1
406#define SWITCH_TSS_CALL 2
407
408/* XXX: restore CPU state in registers (PowerPC case) */
409static void switch_tss(int tss_selector,
410 uint32_t e1, uint32_t e2, int source,
411 uint32_t next_eip)
412{
413 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
414 target_ulong tss_base;
415 uint32_t new_regs[8], new_segs[6];
416 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
417 uint32_t old_eflags, eflags_mask;
418 SegmentCache *dt;
419#ifndef VBOX
420 int index;
421#else
422 unsigned int index;
423#endif
424 target_ulong ptr;
425
426 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
427 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
428
429 /* if task gate, we read the TSS segment and we load it */
430 if (type == 5) {
431 if (!(e2 & DESC_P_MASK))
432 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
433 tss_selector = e1 >> 16;
434 if (tss_selector & 4)
435 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
436 if (load_segment(&e1, &e2, tss_selector) != 0)
437 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
438 if (e2 & DESC_S_MASK)
439 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
440 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
441 if ((type & 7) != 1)
442 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
443 }
444
445 if (!(e2 & DESC_P_MASK))
446 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
447
448 if (type & 8)
449 tss_limit_max = 103;
450 else
451 tss_limit_max = 43;
452 tss_limit = get_seg_limit(e1, e2);
453 tss_base = get_seg_base(e1, e2);
454 if ((tss_selector & 4) != 0 ||
455 tss_limit < tss_limit_max)
456 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
457 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
458 if (old_type & 8)
459 old_tss_limit_max = 103;
460 else
461 old_tss_limit_max = 43;
462
463 /* read all the registers from the new TSS */
464 if (type & 8) {
465 /* 32 bit */
466 new_cr3 = ldl_kernel(tss_base + 0x1c);
467 new_eip = ldl_kernel(tss_base + 0x20);
468 new_eflags = ldl_kernel(tss_base + 0x24);
469 for(i = 0; i < 8; i++)
470 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
471 for(i = 0; i < 6; i++)
472 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
473 new_ldt = lduw_kernel(tss_base + 0x60);
474 new_trap = ldl_kernel(tss_base + 0x64);
475 } else {
476 /* 16 bit */
477 new_cr3 = 0;
478 new_eip = lduw_kernel(tss_base + 0x0e);
479 new_eflags = lduw_kernel(tss_base + 0x10);
480 for(i = 0; i < 8; i++)
481 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
482 for(i = 0; i < 4; i++)
483 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
484 new_ldt = lduw_kernel(tss_base + 0x2a);
485 new_segs[R_FS] = 0;
486 new_segs[R_GS] = 0;
487 new_trap = 0;
488 }
489
490 /* NOTE: we must avoid memory exceptions during the task switch,
491 so we make dummy accesses before */
492 /* XXX: it can still fail in some cases, so a bigger hack is
493 necessary to valid the TLB after having done the accesses */
494
495 v1 = ldub_kernel(env->tr.base);
496 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
497 stb_kernel(env->tr.base, v1);
498 stb_kernel(env->tr.base + old_tss_limit_max, v2);
499
500 /* clear busy bit (it is restartable) */
501 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
502 target_ulong ptr;
503 uint32_t e2;
504 ptr = env->gdt.base + (env->tr.selector & ~7);
505 e2 = ldl_kernel(ptr + 4);
506 e2 &= ~DESC_TSS_BUSY_MASK;
507 stl_kernel(ptr + 4, e2);
508 }
509 old_eflags = compute_eflags();
510 if (source == SWITCH_TSS_IRET)
511 old_eflags &= ~NT_MASK;
512
513 /* save the current state in the old TSS */
514 if (type & 8) {
515 /* 32 bit */
516 stl_kernel(env->tr.base + 0x20, next_eip);
517 stl_kernel(env->tr.base + 0x24, old_eflags);
518 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
519 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
520 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
521 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
522 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
523 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
524 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
525 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
526 for(i = 0; i < 6; i++)
527 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
528#ifdef VBOX
529 /* Must store the ldt as it gets reloaded and might have been changed. */
530 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
531#endif
532#if defined(VBOX) && defined(DEBUG)
533 printf("TSS 32 bits switch\n");
534 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
535#endif
536 } else {
537 /* 16 bit */
538 stw_kernel(env->tr.base + 0x0e, next_eip);
539 stw_kernel(env->tr.base + 0x10, old_eflags);
540 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
541 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
542 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
543 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
544 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
545 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
546 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
547 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
548 for(i = 0; i < 4; i++)
549 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
550#ifdef VBOX
551 /* Must store the ldt as it gets reloaded and might have been changed. */
552 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
553#endif
554 }
555
556 /* now if an exception occurs, it will occurs in the next task
557 context */
558
559 if (source == SWITCH_TSS_CALL) {
560 stw_kernel(tss_base, env->tr.selector);
561 new_eflags |= NT_MASK;
562 }
563
564 /* set busy bit */
565 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
566 target_ulong ptr;
567 uint32_t e2;
568 ptr = env->gdt.base + (tss_selector & ~7);
569 e2 = ldl_kernel(ptr + 4);
570 e2 |= DESC_TSS_BUSY_MASK;
571 stl_kernel(ptr + 4, e2);
572 }
573
574 /* set the new CPU state */
575 /* from this point, any exception which occurs can give problems */
576 env->cr[0] |= CR0_TS_MASK;
577 env->hflags |= HF_TS_MASK;
578 env->tr.selector = tss_selector;
579 env->tr.base = tss_base;
580 env->tr.limit = tss_limit;
581 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
582#ifdef VBOX
583 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
584 env->tr.newselector = 0;
585#endif
586
587 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
588 cpu_x86_update_cr3(env, new_cr3);
589 }
590
591 /* load all registers without an exception, then reload them with
592 possible exception */
593 env->eip = new_eip;
594 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
595 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
596 if (!(type & 8))
597 eflags_mask &= 0xffff;
598 load_eflags(new_eflags, eflags_mask);
599 /* XXX: what to do in 16 bit case ? */
600 EAX = new_regs[0];
601 ECX = new_regs[1];
602 EDX = new_regs[2];
603 EBX = new_regs[3];
604 ESP = new_regs[4];
605 EBP = new_regs[5];
606 ESI = new_regs[6];
607 EDI = new_regs[7];
608 if (new_eflags & VM_MASK) {
609 for(i = 0; i < 6; i++)
610 load_seg_vm(i, new_segs[i]);
611 /* in vm86, CPL is always 3 */
612 cpu_x86_set_cpl(env, 3);
613 } else {
614 /* CPL is set the RPL of CS */
615 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
616 /* first just selectors as the rest may trigger exceptions */
617 for(i = 0; i < 6; i++)
618 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
619 }
620
621 env->ldt.selector = new_ldt & ~4;
622 env->ldt.base = 0;
623 env->ldt.limit = 0;
624 env->ldt.flags = 0;
625#ifdef VBOX
626 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
627 env->ldt.newselector = 0;
628#endif
629
630 /* load the LDT */
631 if (new_ldt & 4)
632 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
633
634 if ((new_ldt & 0xfffc) != 0) {
635 dt = &env->gdt;
636 index = new_ldt & ~7;
637 if ((index + 7) > dt->limit)
638 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
639 ptr = dt->base + index;
640 e1 = ldl_kernel(ptr);
641 e2 = ldl_kernel(ptr + 4);
642 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
643 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
644 if (!(e2 & DESC_P_MASK))
645 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
646 load_seg_cache_raw_dt(&env->ldt, e1, e2);
647 }
648
649 /* load the segments */
650 if (!(new_eflags & VM_MASK)) {
651 tss_load_seg(R_CS, new_segs[R_CS]);
652 tss_load_seg(R_SS, new_segs[R_SS]);
653 tss_load_seg(R_ES, new_segs[R_ES]);
654 tss_load_seg(R_DS, new_segs[R_DS]);
655 tss_load_seg(R_FS, new_segs[R_FS]);
656 tss_load_seg(R_GS, new_segs[R_GS]);
657 }
658
659 /* check that EIP is in the CS segment limits */
660 if (new_eip > env->segs[R_CS].limit) {
661 /* XXX: different exception if CALL ? */
662 raise_exception_err(EXCP0D_GPF, 0);
663 }
664
665#ifndef CONFIG_USER_ONLY
666 /* reset local breakpoints */
667 if (env->dr[7] & 0x55) {
668 for (i = 0; i < 4; i++) {
669 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
670 hw_breakpoint_remove(env, i);
671 }
672 env->dr[7] &= ~0x55;
673 }
674#endif
675}
676
677/* check if Port I/O is allowed in TSS */
678static inline void check_io(int addr, int size)
679{
680#ifndef VBOX
681 int io_offset, val, mask;
682#else
683 int val, mask;
684 unsigned int io_offset;
685#endif /* VBOX */
686
687 /* TSS must be a valid 32 bit one */
688 if (!(env->tr.flags & DESC_P_MASK) ||
689 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
690 env->tr.limit < 103)
691 goto fail;
692 io_offset = lduw_kernel(env->tr.base + 0x66);
693 io_offset += (addr >> 3);
694 /* Note: the check needs two bytes */
695 if ((io_offset + 1) > env->tr.limit)
696 goto fail;
697 val = lduw_kernel(env->tr.base + io_offset);
698 val >>= (addr & 7);
699 mask = (1 << size) - 1;
700 /* all bits must be zero to allow the I/O */
701 if ((val & mask) != 0) {
702 fail:
703 raise_exception_err(EXCP0D_GPF, 0);
704 }
705}
706
707#ifdef VBOX
708
709/* Keep in sync with gen_check_external_event() */
710void helper_check_external_event()
711{
712 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
713 | CPU_INTERRUPT_EXTERNAL_EXIT
714 | CPU_INTERRUPT_EXTERNAL_TIMER
715 | CPU_INTERRUPT_EXTERNAL_DMA))
716 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
717 && (env->eflags & IF_MASK)
718 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
719 {
720 helper_external_event();
721 }
722
723}
724
725void helper_sync_seg(uint32_t reg)
726{
727 if (env->segs[reg].newselector)
728 sync_seg(env, reg, env->segs[reg].newselector);
729}
730
731#endif /* VBOX */
732
733void helper_check_iob(uint32_t t0)
734{
735 check_io(t0, 1);
736}
737
738void helper_check_iow(uint32_t t0)
739{
740 check_io(t0, 2);
741}
742
743void helper_check_iol(uint32_t t0)
744{
745 check_io(t0, 4);
746}
747
748void helper_outb(uint32_t port, uint32_t data)
749{
750#ifndef VBOX
751 cpu_outb(port, data & 0xff);
752#else
753 cpu_outb(env, port, data & 0xff);
754#endif
755}
756
757target_ulong helper_inb(uint32_t port)
758{
759#ifndef VBOX
760 return cpu_inb(port);
761#else
762 return cpu_inb(env, port);
763#endif
764}
765
766void helper_outw(uint32_t port, uint32_t data)
767{
768#ifndef VBOX
769 cpu_outw(port, data & 0xffff);
770#else
771 cpu_outw(env, port, data & 0xffff);
772#endif
773}
774
775target_ulong helper_inw(uint32_t port)
776{
777#ifndef VBOX
778 return cpu_inw(port);
779#else
780 return cpu_inw(env, port);
781#endif
782}
783
784void helper_outl(uint32_t port, uint32_t data)
785{
786#ifndef VBOX
787 cpu_outl(port, data);
788#else
789 cpu_outl(env, port, data);
790#endif
791}
792
793target_ulong helper_inl(uint32_t port)
794{
795#ifndef VBOX
796 return cpu_inl(port);
797#else
798 return cpu_inl(env, port);
799#endif
800}
801
802static inline unsigned int get_sp_mask(unsigned int e2)
803{
804 if (e2 & DESC_B_MASK)
805 return 0xffffffff;
806 else
807 return 0xffff;
808}
809
810static int exeption_has_error_code(int intno)
811{
812 switch(intno) {
813 case 8:
814 case 10:
815 case 11:
816 case 12:
817 case 13:
818 case 14:
819 case 17:
820 return 1;
821 }
822 return 0;
823}
824
825#ifdef TARGET_X86_64
826#define SET_ESP(val, sp_mask)\
827do {\
828 if ((sp_mask) == 0xffff)\
829 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
830 else if ((sp_mask) == 0xffffffffLL)\
831 ESP = (uint32_t)(val);\
832 else\
833 ESP = (val);\
834} while (0)
835#else
836#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
837#endif
838
839/* in 64-bit machines, this can overflow. So this segment addition macro
840 * can be used to trim the value to 32-bit whenever needed */
841#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
842
843/* XXX: add a is_user flag to have proper security support */
844#define PUSHW(ssp, sp, sp_mask, val)\
845{\
846 sp -= 2;\
847 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
848}
849
850#define PUSHL(ssp, sp, sp_mask, val)\
851{\
852 sp -= 4;\
853 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
854}
855
856#define POPW(ssp, sp, sp_mask, val)\
857{\
858 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
859 sp += 2;\
860}
861
862#define POPL(ssp, sp, sp_mask, val)\
863{\
864 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
865 sp += 4;\
866}
867
868/* protected mode interrupt */
869static void do_interrupt_protected(int intno, int is_int, int error_code,
870 unsigned int next_eip, int is_hw)
871{
872 SegmentCache *dt;
873 target_ulong ptr, ssp;
874 int type, dpl, selector, ss_dpl, cpl;
875 int has_error_code, new_stack, shift;
876 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
877 uint32_t old_eip, sp_mask;
878
879#ifdef VBOX
880 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
881 cpu_loop_exit();
882#endif
883
884 has_error_code = 0;
885 if (!is_int && !is_hw)
886 has_error_code = exeption_has_error_code(intno);
887 if (is_int)
888 old_eip = next_eip;
889 else
890 old_eip = env->eip;
891
892 dt = &env->idt;
893#ifndef VBOX
894 if (intno * 8 + 7 > dt->limit)
895#else
896 if ((unsigned)intno * 8 + 7 > dt->limit)
897#endif
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 ptr = dt->base + intno * 8;
900 e1 = ldl_kernel(ptr);
901 e2 = ldl_kernel(ptr + 4);
902 /* check gate type */
903 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
904 switch(type) {
905 case 5: /* task gate */
906#ifdef VBOX
907 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
908 cpl = env->hflags & HF_CPL_MASK;
909 /* check privilege if software int */
910 if (is_int && dpl < cpl)
911 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
912#endif
913 /* must do that check here to return the correct error code */
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
916 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
917 if (has_error_code) {
918 int type;
919 uint32_t mask;
920 /* push the error code */
921 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
922 shift = type >> 3;
923 if (env->segs[R_SS].flags & DESC_B_MASK)
924 mask = 0xffffffff;
925 else
926 mask = 0xffff;
927 esp = (ESP - (2 << shift)) & mask;
928 ssp = env->segs[R_SS].base + esp;
929 if (shift)
930 stl_kernel(ssp, error_code);
931 else
932 stw_kernel(ssp, error_code);
933 SET_ESP(esp, mask);
934 }
935 return;
936 case 6: /* 286 interrupt gate */
937 case 7: /* 286 trap gate */
938 case 14: /* 386 interrupt gate */
939 case 15: /* 386 trap gate */
940 break;
941 default:
942 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
943 break;
944 }
945 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
946 cpl = env->hflags & HF_CPL_MASK;
947 /* check privilege if software int */
948 if (is_int && dpl < cpl)
949 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
950 /* check valid bit */
951 if (!(e2 & DESC_P_MASK))
952 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
953 selector = e1 >> 16;
954 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
955 if ((selector & 0xfffc) == 0)
956 raise_exception_err(EXCP0D_GPF, 0);
957
958 if (load_segment(&e1, &e2, selector) != 0)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
963 if (dpl > cpl)
964 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
965 if (!(e2 & DESC_P_MASK))
966 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
967 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
968 /* to inner privilege */
969 get_ss_esp_from_tss(&ss, &esp, dpl);
970 if ((ss & 0xfffc) == 0)
971 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
972 if ((ss & 3) != dpl)
973 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
974 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
976 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
977 if (ss_dpl != dpl)
978 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
979 if (!(ss_e2 & DESC_S_MASK) ||
980 (ss_e2 & DESC_CS_MASK) ||
981 !(ss_e2 & DESC_W_MASK))
982 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
983 if (!(ss_e2 & DESC_P_MASK))
984#ifdef VBOX /* See page 3-477 of 253666.pdf */
985 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
986#else
987 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
988#endif
989 new_stack = 1;
990 sp_mask = get_sp_mask(ss_e2);
991 ssp = get_seg_base(ss_e1, ss_e2);
992#if defined(VBOX) && defined(DEBUG)
993 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
994#endif
995 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
996 /* to same privilege */
997 if (env->eflags & VM_MASK)
998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
999 new_stack = 0;
1000 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1001 ssp = env->segs[R_SS].base;
1002 esp = ESP;
1003 dpl = cpl;
1004 } else {
1005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1006 new_stack = 0; /* avoid warning */
1007 sp_mask = 0; /* avoid warning */
1008 ssp = 0; /* avoid warning */
1009 esp = 0; /* avoid warning */
1010 }
1011
1012 shift = type >> 3;
1013
1014#if 0
1015 /* XXX: check that enough room is available */
1016 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1017 if (env->eflags & VM_MASK)
1018 push_size += 8;
1019 push_size <<= shift;
1020#endif
1021 if (shift == 1) {
1022 if (new_stack) {
1023 if (env->eflags & VM_MASK) {
1024 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1025 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1026 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1027 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1028 }
1029 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1030 PUSHL(ssp, esp, sp_mask, ESP);
1031 }
1032 PUSHL(ssp, esp, sp_mask, compute_eflags());
1033 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1034 PUSHL(ssp, esp, sp_mask, old_eip);
1035 if (has_error_code) {
1036 PUSHL(ssp, esp, sp_mask, error_code);
1037 }
1038 } else {
1039 if (new_stack) {
1040 if (env->eflags & VM_MASK) {
1041 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1042 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1043 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1044 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1045 }
1046 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1047 PUSHW(ssp, esp, sp_mask, ESP);
1048 }
1049 PUSHW(ssp, esp, sp_mask, compute_eflags());
1050 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1051 PUSHW(ssp, esp, sp_mask, old_eip);
1052 if (has_error_code) {
1053 PUSHW(ssp, esp, sp_mask, error_code);
1054 }
1055 }
1056
1057 if (new_stack) {
1058 if (env->eflags & VM_MASK) {
1059 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1060 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1061 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1062 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1063 }
1064 ss = (ss & ~3) | dpl;
1065 cpu_x86_load_seg_cache(env, R_SS, ss,
1066 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1067 }
1068 SET_ESP(esp, sp_mask);
1069
1070 selector = (selector & ~3) | dpl;
1071 cpu_x86_load_seg_cache(env, R_CS, selector,
1072 get_seg_base(e1, e2),
1073 get_seg_limit(e1, e2),
1074 e2);
1075 cpu_x86_set_cpl(env, dpl);
1076 env->eip = offset;
1077
1078 /* interrupt gate clear IF mask */
1079 if ((type & 1) == 0) {
1080 env->eflags &= ~IF_MASK;
1081 }
1082#ifndef VBOX
1083 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1084#else
1085 /*
1086 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1087 * gets confused by seemingly changed EFLAGS. See #3491 and
1088 * public bug #2341.
1089 */
1090 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1091#endif
1092}
1093
1094#ifdef VBOX
1095
1096/* check if VME interrupt redirection is enabled in TSS */
1097DECLINLINE(bool) is_vme_irq_redirected(int intno)
1098{
1099 unsigned int io_offset, intredir_offset;
1100 unsigned char val, mask;
1101
1102 /* TSS must be a valid 32 bit one */
1103 if (!(env->tr.flags & DESC_P_MASK) ||
1104 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1105 env->tr.limit < 103)
1106 goto fail;
1107 io_offset = lduw_kernel(env->tr.base + 0x66);
1108 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1109 if (io_offset < 0x68 + 0x20)
1110 io_offset = 0x68 + 0x20;
1111 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1112 intredir_offset = io_offset - 0x20;
1113
1114 intredir_offset += (intno >> 3);
1115 if ((intredir_offset) > env->tr.limit)
1116 goto fail;
1117
1118 val = ldub_kernel(env->tr.base + intredir_offset);
1119 mask = 1 << (unsigned char)(intno & 7);
1120
1121 /* bit set means no redirection. */
1122 if ((val & mask) != 0) {
1123 return false;
1124 }
1125 return true;
1126
1127fail:
1128 raise_exception_err(EXCP0D_GPF, 0);
1129 return true;
1130}
1131
1132/* V86 mode software interrupt with CR4.VME=1 */
1133static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1134{
1135 target_ulong ptr, ssp;
1136 int selector;
1137 uint32_t offset, esp;
1138 uint32_t old_cs, old_eflags;
1139 uint32_t iopl;
1140
1141 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1142
1143 if (!is_vme_irq_redirected(intno))
1144 {
1145 if (iopl == 3)
1146 {
1147 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1148 return;
1149 }
1150 else
1151 raise_exception_err(EXCP0D_GPF, 0);
1152 }
1153
1154 /* virtual mode idt is at linear address 0 */
1155 ptr = 0 + intno * 4;
1156 offset = lduw_kernel(ptr);
1157 selector = lduw_kernel(ptr + 2);
1158 esp = ESP;
1159 ssp = env->segs[R_SS].base;
1160 old_cs = env->segs[R_CS].selector;
1161
1162 old_eflags = compute_eflags();
1163 if (iopl < 3)
1164 {
1165 /* copy VIF into IF and set IOPL to 3 */
1166 if (env->eflags & VIF_MASK)
1167 old_eflags |= IF_MASK;
1168 else
1169 old_eflags &= ~IF_MASK;
1170
1171 old_eflags |= (3 << IOPL_SHIFT);
1172 }
1173
1174 /* XXX: use SS segment size ? */
1175 PUSHW(ssp, esp, 0xffff, old_eflags);
1176 PUSHW(ssp, esp, 0xffff, old_cs);
1177 PUSHW(ssp, esp, 0xffff, next_eip);
1178
1179 /* update processor state */
1180 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1181 env->eip = offset;
1182 env->segs[R_CS].selector = selector;
1183 env->segs[R_CS].base = (selector << 4);
1184 env->eflags &= ~(TF_MASK | RF_MASK);
1185
1186 if (iopl < 3)
1187 env->eflags &= ~VIF_MASK;
1188 else
1189 env->eflags &= ~IF_MASK;
1190}
1191
1192#endif /* VBOX */
1193
1194#ifdef TARGET_X86_64
1195
1196#define PUSHQ(sp, val)\
1197{\
1198 sp -= 8;\
1199 stq_kernel(sp, (val));\
1200}
1201
1202#define POPQ(sp, val)\
1203{\
1204 val = ldq_kernel(sp);\
1205 sp += 8;\
1206}
1207
1208static inline target_ulong get_rsp_from_tss(int level)
1209{
1210 int index;
1211
1212#if 0
1213 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1214 env->tr.base, env->tr.limit);
1215#endif
1216
1217 if (!(env->tr.flags & DESC_P_MASK))
1218 cpu_abort(env, "invalid tss");
1219 index = 8 * level + 4;
1220 if ((index + 7) > env->tr.limit)
1221 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1222 return ldq_kernel(env->tr.base + index);
1223}
1224
1225/* 64 bit interrupt */
1226static void do_interrupt64(int intno, int is_int, int error_code,
1227 target_ulong next_eip, int is_hw)
1228{
1229 SegmentCache *dt;
1230 target_ulong ptr;
1231 int type, dpl, selector, cpl, ist;
1232 int has_error_code, new_stack;
1233 uint32_t e1, e2, e3, ss;
1234 target_ulong old_eip, esp, offset;
1235
1236#ifdef VBOX
1237 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1238 cpu_loop_exit();
1239#endif
1240
1241 has_error_code = 0;
1242 if (!is_int && !is_hw)
1243 has_error_code = exeption_has_error_code(intno);
1244 if (is_int)
1245 old_eip = next_eip;
1246 else
1247 old_eip = env->eip;
1248
1249 dt = &env->idt;
1250 if (intno * 16 + 15 > dt->limit)
1251 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1252 ptr = dt->base + intno * 16;
1253 e1 = ldl_kernel(ptr);
1254 e2 = ldl_kernel(ptr + 4);
1255 e3 = ldl_kernel(ptr + 8);
1256 /* check gate type */
1257 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1258 switch(type) {
1259 case 14: /* 386 interrupt gate */
1260 case 15: /* 386 trap gate */
1261 break;
1262 default:
1263 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1264 break;
1265 }
1266 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1267 cpl = env->hflags & HF_CPL_MASK;
1268 /* check privilege if software int */
1269 if (is_int && dpl < cpl)
1270 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1271 /* check valid bit */
1272 if (!(e2 & DESC_P_MASK))
1273 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1274 selector = e1 >> 16;
1275 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1276 ist = e2 & 7;
1277 if ((selector & 0xfffc) == 0)
1278 raise_exception_err(EXCP0D_GPF, 0);
1279
1280 if (load_segment(&e1, &e2, selector) != 0)
1281 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1282 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1283 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1284 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1285 if (dpl > cpl)
1286 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1287 if (!(e2 & DESC_P_MASK))
1288 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1289 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1290 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1291 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1292 /* to inner privilege */
1293 if (ist != 0)
1294 esp = get_rsp_from_tss(ist + 3);
1295 else
1296 esp = get_rsp_from_tss(dpl);
1297 esp &= ~0xfLL; /* align stack */
1298 ss = 0;
1299 new_stack = 1;
1300 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1301 /* to same privilege */
1302 if (env->eflags & VM_MASK)
1303 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1304 new_stack = 0;
1305 if (ist != 0)
1306 esp = get_rsp_from_tss(ist + 3);
1307 else
1308 esp = ESP;
1309 esp &= ~0xfLL; /* align stack */
1310 dpl = cpl;
1311 } else {
1312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1313 new_stack = 0; /* avoid warning */
1314 esp = 0; /* avoid warning */
1315 }
1316
1317 PUSHQ(esp, env->segs[R_SS].selector);
1318 PUSHQ(esp, ESP);
1319 PUSHQ(esp, compute_eflags());
1320 PUSHQ(esp, env->segs[R_CS].selector);
1321 PUSHQ(esp, old_eip);
1322 if (has_error_code) {
1323 PUSHQ(esp, error_code);
1324 }
1325
1326 if (new_stack) {
1327 ss = 0 | dpl;
1328 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1329 }
1330 ESP = esp;
1331
1332 selector = (selector & ~3) | dpl;
1333 cpu_x86_load_seg_cache(env, R_CS, selector,
1334 get_seg_base(e1, e2),
1335 get_seg_limit(e1, e2),
1336 e2);
1337 cpu_x86_set_cpl(env, dpl);
1338 env->eip = offset;
1339
1340 /* interrupt gate clear IF mask */
1341 if ((type & 1) == 0) {
1342 env->eflags &= ~IF_MASK;
1343 }
1344#ifndef VBOX
1345 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1346#else /* VBOX */
1347 /*
1348 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1349 * gets confused by seemingly changed EFLAGS. See #3491 and
1350 * public bug #2341.
1351 */
1352 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1353#endif /* VBOX */
1354}
1355#endif
1356
1357#ifdef TARGET_X86_64
1358#if defined(CONFIG_USER_ONLY)
1359void helper_syscall(int next_eip_addend)
1360{
1361 env->exception_index = EXCP_SYSCALL;
1362 env->exception_next_eip = env->eip + next_eip_addend;
1363 cpu_loop_exit();
1364}
1365#else
1366void helper_syscall(int next_eip_addend)
1367{
1368 int selector;
1369
1370 if (!(env->efer & MSR_EFER_SCE)) {
1371 raise_exception_err(EXCP06_ILLOP, 0);
1372 }
1373 selector = (env->star >> 32) & 0xffff;
1374 if (env->hflags & HF_LMA_MASK) {
1375 int code64;
1376
1377 ECX = env->eip + next_eip_addend;
1378 env->regs[11] = compute_eflags();
1379
1380 code64 = env->hflags & HF_CS64_MASK;
1381
1382 cpu_x86_set_cpl(env, 0);
1383 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1384 0, 0xffffffff,
1385 DESC_G_MASK | DESC_P_MASK |
1386 DESC_S_MASK |
1387 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1388 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1389 0, 0xffffffff,
1390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1391 DESC_S_MASK |
1392 DESC_W_MASK | DESC_A_MASK);
1393 env->eflags &= ~env->fmask;
1394 load_eflags(env->eflags, 0);
1395 if (code64)
1396 env->eip = env->lstar;
1397 else
1398 env->eip = env->cstar;
1399 } else {
1400 ECX = (uint32_t)(env->eip + next_eip_addend);
1401
1402 cpu_x86_set_cpl(env, 0);
1403 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1404 0, 0xffffffff,
1405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1406 DESC_S_MASK |
1407 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1408 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1409 0, 0xffffffff,
1410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1411 DESC_S_MASK |
1412 DESC_W_MASK | DESC_A_MASK);
1413 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1414 env->eip = (uint32_t)env->star;
1415 }
1416}
1417#endif
1418#endif
1419
1420#ifdef TARGET_X86_64
1421void helper_sysret(int dflag)
1422{
1423 int cpl, selector;
1424
1425 if (!(env->efer & MSR_EFER_SCE)) {
1426 raise_exception_err(EXCP06_ILLOP, 0);
1427 }
1428 cpl = env->hflags & HF_CPL_MASK;
1429 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1430 raise_exception_err(EXCP0D_GPF, 0);
1431 }
1432 selector = (env->star >> 48) & 0xffff;
1433 if (env->hflags & HF_LMA_MASK) {
1434 if (dflag == 2) {
1435 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1436 0, 0xffffffff,
1437 DESC_G_MASK | DESC_P_MASK |
1438 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1439 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1440 DESC_L_MASK);
1441 env->eip = ECX;
1442 } else {
1443 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1444 0, 0xffffffff,
1445 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1446 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1447 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1448 env->eip = (uint32_t)ECX;
1449 }
1450 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1451 0, 0xffffffff,
1452 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1453 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1454 DESC_W_MASK | DESC_A_MASK);
1455 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1456 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1457 cpu_x86_set_cpl(env, 3);
1458 } else {
1459 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1460 0, 0xffffffff,
1461 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1462 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1463 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1464 env->eip = (uint32_t)ECX;
1465 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1466 0, 0xffffffff,
1467 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1468 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1469 DESC_W_MASK | DESC_A_MASK);
1470 env->eflags |= IF_MASK;
1471 cpu_x86_set_cpl(env, 3);
1472 }
1473}
1474#endif
1475
1476#ifdef VBOX
1477
1478/**
1479 * Checks and processes external VMM events.
1480 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1481 */
1482void helper_external_event(void)
1483{
1484# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1485 uintptr_t uSP;
1486# ifdef RT_ARCH_AMD64
1487 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1488# else
1489 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1490# endif
1491 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1492# endif
1493 /* Keep in sync with flags checked by gen_check_external_event() */
1494 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1495 {
1496 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1497 ~CPU_INTERRUPT_EXTERNAL_HARD);
1498 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1499 }
1500 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1501 {
1502 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1503 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1504 cpu_exit(env);
1505 }
1506 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1507 {
1508 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1509 ~CPU_INTERRUPT_EXTERNAL_DMA);
1510 remR3DmaRun(env);
1511 }
1512 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1513 {
1514 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1515 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1516 remR3TimersRun(env);
1517 }
1518 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1519 {
1520 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1521 ~CPU_INTERRUPT_EXTERNAL_HARD);
1522 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1523 }
1524}
1525
1526/* helper for recording call instruction addresses for later scanning */
1527void helper_record_call()
1528{
1529 if ( !(env->state & CPU_RAW_RING0)
1530 && (env->cr[0] & CR0_PG_MASK)
1531 && !(env->eflags & X86_EFL_IF))
1532 remR3RecordCall(env);
1533}
1534
1535#endif /* VBOX */
1536
1537/* real mode interrupt */
1538static void do_interrupt_real(int intno, int is_int, int error_code,
1539 unsigned int next_eip)
1540{
1541 SegmentCache *dt;
1542 target_ulong ptr, ssp;
1543 int selector;
1544 uint32_t offset, esp;
1545 uint32_t old_cs, old_eip;
1546
1547 /* real mode (simpler !) */
1548 dt = &env->idt;
1549#ifndef VBOX
1550 if (intno * 4 + 3 > dt->limit)
1551#else
1552 if ((unsigned)intno * 4 + 3 > dt->limit)
1553#endif
1554 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1555 ptr = dt->base + intno * 4;
1556 offset = lduw_kernel(ptr);
1557 selector = lduw_kernel(ptr + 2);
1558 esp = ESP;
1559 ssp = env->segs[R_SS].base;
1560 if (is_int)
1561 old_eip = next_eip;
1562 else
1563 old_eip = env->eip;
1564 old_cs = env->segs[R_CS].selector;
1565 /* XXX: use SS segment size ? */
1566 PUSHW(ssp, esp, 0xffff, compute_eflags());
1567 PUSHW(ssp, esp, 0xffff, old_cs);
1568 PUSHW(ssp, esp, 0xffff, old_eip);
1569
1570 /* update processor state */
1571 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1572 env->eip = offset;
1573 env->segs[R_CS].selector = selector;
1574 env->segs[R_CS].base = (selector << 4);
1575 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1576}
1577
1578/* fake user mode interrupt */
1579void do_interrupt_user(int intno, int is_int, int error_code,
1580 target_ulong next_eip)
1581{
1582 SegmentCache *dt;
1583 target_ulong ptr;
1584 int dpl, cpl, shift;
1585 uint32_t e2;
1586
1587 dt = &env->idt;
1588 if (env->hflags & HF_LMA_MASK) {
1589 shift = 4;
1590 } else {
1591 shift = 3;
1592 }
1593 ptr = dt->base + (intno << shift);
1594 e2 = ldl_kernel(ptr + 4);
1595
1596 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1597 cpl = env->hflags & HF_CPL_MASK;
1598 /* check privilege if software int */
1599 if (is_int && dpl < cpl)
1600 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1601
1602 /* Since we emulate only user space, we cannot do more than
1603 exiting the emulation with the suitable exception and error
1604 code */
1605 if (is_int)
1606 EIP = next_eip;
1607}
1608
1609#if !defined(CONFIG_USER_ONLY)
1610static void handle_even_inj(int intno, int is_int, int error_code,
1611 int is_hw, int rm)
1612{
1613 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1614 if (!(event_inj & SVM_EVTINJ_VALID)) {
1615 int type;
1616 if (is_int)
1617 type = SVM_EVTINJ_TYPE_SOFT;
1618 else
1619 type = SVM_EVTINJ_TYPE_EXEPT;
1620 event_inj = intno | type | SVM_EVTINJ_VALID;
1621 if (!rm && exeption_has_error_code(intno)) {
1622 event_inj |= SVM_EVTINJ_VALID_ERR;
1623 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1624 }
1625 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1626 }
1627}
1628#endif
1629
1630/*
1631 * Begin execution of an interruption. is_int is TRUE if coming from
1632 * the int instruction. next_eip is the EIP value AFTER the interrupt
1633 * instruction. It is only relevant if is_int is TRUE.
1634 */
1635void do_interrupt(int intno, int is_int, int error_code,
1636 target_ulong next_eip, int is_hw)
1637{
1638 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1639 if ((env->cr[0] & CR0_PE_MASK)) {
1640 static int count;
1641 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1642 count, intno, error_code, is_int,
1643 env->hflags & HF_CPL_MASK,
1644 env->segs[R_CS].selector, EIP,
1645 (int)env->segs[R_CS].base + EIP,
1646 env->segs[R_SS].selector, ESP);
1647 if (intno == 0x0e) {
1648 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1649 } else {
1650 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1651 }
1652 qemu_log("\n");
1653 log_cpu_state(env, X86_DUMP_CCOP);
1654#if 0
1655 {
1656 int i;
1657 uint8_t *ptr;
1658 qemu_log(" code=");
1659 ptr = env->segs[R_CS].base + env->eip;
1660 for(i = 0; i < 16; i++) {
1661 qemu_log(" %02x", ldub(ptr + i));
1662 }
1663 qemu_log("\n");
1664 }
1665#endif
1666 count++;
1667 }
1668 }
1669#ifdef VBOX
1670 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1671 if (is_int) {
1672 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1673 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1674 } else {
1675 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1676 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1677 }
1678 }
1679#endif
1680 if (env->cr[0] & CR0_PE_MASK) {
1681#if !defined(CONFIG_USER_ONLY)
1682 if (env->hflags & HF_SVMI_MASK)
1683 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1684#endif
1685#ifdef TARGET_X86_64
1686 if (env->hflags & HF_LMA_MASK) {
1687 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1688 } else
1689#endif
1690 {
1691#ifdef VBOX
1692 /* int xx *, v86 code and VME enabled? */
1693 if ( (env->eflags & VM_MASK)
1694 && (env->cr[4] & CR4_VME_MASK)
1695 && is_int
1696 && !is_hw
1697 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1698 )
1699 do_soft_interrupt_vme(intno, error_code, next_eip);
1700 else
1701#endif /* VBOX */
1702 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1703 }
1704 } else {
1705#if !defined(CONFIG_USER_ONLY)
1706 if (env->hflags & HF_SVMI_MASK)
1707 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1708#endif
1709 do_interrupt_real(intno, is_int, error_code, next_eip);
1710 }
1711
1712#if !defined(CONFIG_USER_ONLY)
1713 if (env->hflags & HF_SVMI_MASK) {
1714 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1715 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1716 }
1717#endif
1718}
1719
1720/* This should come from sysemu.h - if we could include it here... */
1721void qemu_system_reset_request(void);
1722
1723/*
1724 * Check nested exceptions and change to double or triple fault if
1725 * needed. It should only be called, if this is not an interrupt.
1726 * Returns the new exception number.
1727 */
1728static int check_exception(int intno, int *error_code)
1729{
1730 int first_contributory = env->old_exception == 0 ||
1731 (env->old_exception >= 10 &&
1732 env->old_exception <= 13);
1733 int second_contributory = intno == 0 ||
1734 (intno >= 10 && intno <= 13);
1735
1736 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1737 env->old_exception, intno);
1738
1739#if !defined(CONFIG_USER_ONLY)
1740 if (env->old_exception == EXCP08_DBLE) {
1741 if (env->hflags & HF_SVMI_MASK)
1742 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1743
1744 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1745
1746# ifndef VBOX
1747 qemu_system_reset_request();
1748# else
1749 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1750# endif
1751 return EXCP_HLT;
1752 }
1753#endif
1754
1755 if ((first_contributory && second_contributory)
1756 || (env->old_exception == EXCP0E_PAGE &&
1757 (second_contributory || (intno == EXCP0E_PAGE)))) {
1758 intno = EXCP08_DBLE;
1759 *error_code = 0;
1760 }
1761
1762 if (second_contributory || (intno == EXCP0E_PAGE) ||
1763 (intno == EXCP08_DBLE))
1764 env->old_exception = intno;
1765
1766 return intno;
1767}
1768
1769/*
1770 * Signal an interruption. It is executed in the main CPU loop.
1771 * is_int is TRUE if coming from the int instruction. next_eip is the
1772 * EIP value AFTER the interrupt instruction. It is only relevant if
1773 * is_int is TRUE.
1774 */
1775static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1776 int next_eip_addend)
1777{
1778#if defined(VBOX) && defined(DEBUG)
1779 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1780#endif
1781 if (!is_int) {
1782 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1783 intno = check_exception(intno, &error_code);
1784 } else {
1785 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1786 }
1787
1788 env->exception_index = intno;
1789 env->error_code = error_code;
1790 env->exception_is_int = is_int;
1791 env->exception_next_eip = env->eip + next_eip_addend;
1792 cpu_loop_exit();
1793}
1794
1795/* shortcuts to generate exceptions */
1796
1797void raise_exception_err(int exception_index, int error_code)
1798{
1799 raise_interrupt(exception_index, 0, error_code, 0);
1800}
1801
1802void raise_exception(int exception_index)
1803{
1804 raise_interrupt(exception_index, 0, 0, 0);
1805}
1806
1807void raise_exception_env(int exception_index, CPUState *nenv)
1808{
1809 env = nenv;
1810 raise_exception(exception_index);
1811}
1812/* SMM support */
1813
1814#if defined(CONFIG_USER_ONLY)
1815
1816void do_smm_enter(void)
1817{
1818}
1819
1820void helper_rsm(void)
1821{
1822}
1823
1824#else
1825
1826#ifdef TARGET_X86_64
1827#define SMM_REVISION_ID 0x00020064
1828#else
1829#define SMM_REVISION_ID 0x00020000
1830#endif
1831
1832void do_smm_enter(void)
1833{
1834 target_ulong sm_state;
1835 SegmentCache *dt;
1836 int i, offset;
1837
1838 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1839 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1840
1841 env->hflags |= HF_SMM_MASK;
1842 cpu_smm_update(env);
1843
1844 sm_state = env->smbase + 0x8000;
1845
1846#ifdef TARGET_X86_64
1847 for(i = 0; i < 6; i++) {
1848 dt = &env->segs[i];
1849 offset = 0x7e00 + i * 16;
1850 stw_phys(sm_state + offset, dt->selector);
1851 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1852 stl_phys(sm_state + offset + 4, dt->limit);
1853 stq_phys(sm_state + offset + 8, dt->base);
1854 }
1855
1856 stq_phys(sm_state + 0x7e68, env->gdt.base);
1857 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1858
1859 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1860 stq_phys(sm_state + 0x7e78, env->ldt.base);
1861 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1862 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1863
1864 stq_phys(sm_state + 0x7e88, env->idt.base);
1865 stl_phys(sm_state + 0x7e84, env->idt.limit);
1866
1867 stw_phys(sm_state + 0x7e90, env->tr.selector);
1868 stq_phys(sm_state + 0x7e98, env->tr.base);
1869 stl_phys(sm_state + 0x7e94, env->tr.limit);
1870 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1871
1872 stq_phys(sm_state + 0x7ed0, env->efer);
1873
1874 stq_phys(sm_state + 0x7ff8, EAX);
1875 stq_phys(sm_state + 0x7ff0, ECX);
1876 stq_phys(sm_state + 0x7fe8, EDX);
1877 stq_phys(sm_state + 0x7fe0, EBX);
1878 stq_phys(sm_state + 0x7fd8, ESP);
1879 stq_phys(sm_state + 0x7fd0, EBP);
1880 stq_phys(sm_state + 0x7fc8, ESI);
1881 stq_phys(sm_state + 0x7fc0, EDI);
1882 for(i = 8; i < 16; i++)
1883 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1884 stq_phys(sm_state + 0x7f78, env->eip);
1885 stl_phys(sm_state + 0x7f70, compute_eflags());
1886 stl_phys(sm_state + 0x7f68, env->dr[6]);
1887 stl_phys(sm_state + 0x7f60, env->dr[7]);
1888
1889 stl_phys(sm_state + 0x7f48, env->cr[4]);
1890 stl_phys(sm_state + 0x7f50, env->cr[3]);
1891 stl_phys(sm_state + 0x7f58, env->cr[0]);
1892
1893 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1894 stl_phys(sm_state + 0x7f00, env->smbase);
1895#else
1896 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1897 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1898 stl_phys(sm_state + 0x7ff4, compute_eflags());
1899 stl_phys(sm_state + 0x7ff0, env->eip);
1900 stl_phys(sm_state + 0x7fec, EDI);
1901 stl_phys(sm_state + 0x7fe8, ESI);
1902 stl_phys(sm_state + 0x7fe4, EBP);
1903 stl_phys(sm_state + 0x7fe0, ESP);
1904 stl_phys(sm_state + 0x7fdc, EBX);
1905 stl_phys(sm_state + 0x7fd8, EDX);
1906 stl_phys(sm_state + 0x7fd4, ECX);
1907 stl_phys(sm_state + 0x7fd0, EAX);
1908 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1909 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1910
1911 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1912 stl_phys(sm_state + 0x7f64, env->tr.base);
1913 stl_phys(sm_state + 0x7f60, env->tr.limit);
1914 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1915
1916 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1917 stl_phys(sm_state + 0x7f80, env->ldt.base);
1918 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1919 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1920
1921 stl_phys(sm_state + 0x7f74, env->gdt.base);
1922 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1923
1924 stl_phys(sm_state + 0x7f58, env->idt.base);
1925 stl_phys(sm_state + 0x7f54, env->idt.limit);
1926
1927 for(i = 0; i < 6; i++) {
1928 dt = &env->segs[i];
1929 if (i < 3)
1930 offset = 0x7f84 + i * 12;
1931 else
1932 offset = 0x7f2c + (i - 3) * 12;
1933 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1934 stl_phys(sm_state + offset + 8, dt->base);
1935 stl_phys(sm_state + offset + 4, dt->limit);
1936 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1937 }
1938 stl_phys(sm_state + 0x7f14, env->cr[4]);
1939
1940 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1941 stl_phys(sm_state + 0x7ef8, env->smbase);
1942#endif
1943 /* init SMM cpu state */
1944
1945#ifdef TARGET_X86_64
1946 cpu_load_efer(env, 0);
1947#endif
1948 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1949 env->eip = 0x00008000;
1950 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1951 0xffffffff, 0);
1952 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1953 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1954 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1955 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1956 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1957
1958 cpu_x86_update_cr0(env,
1959 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1960 cpu_x86_update_cr4(env, 0);
1961 env->dr[7] = 0x00000400;
1962 CC_OP = CC_OP_EFLAGS;
1963}
1964
1965void helper_rsm(void)
1966{
1967#ifdef VBOX
1968 cpu_abort(env, "helper_rsm");
1969#else /* !VBOX */
1970 target_ulong sm_state;
1971 int i, offset;
1972 uint32_t val;
1973
1974 sm_state = env->smbase + 0x8000;
1975#ifdef TARGET_X86_64
1976 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1977
1978 for(i = 0; i < 6; i++) {
1979 offset = 0x7e00 + i * 16;
1980 cpu_x86_load_seg_cache(env, i,
1981 lduw_phys(sm_state + offset),
1982 ldq_phys(sm_state + offset + 8),
1983 ldl_phys(sm_state + offset + 4),
1984 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1985 }
1986
1987 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1988 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1989
1990 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1991 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1992 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1993 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1994#ifdef VBOX
1995 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1996 env->ldt.newselector = 0;
1997#endif
1998
1999 env->idt.base = ldq_phys(sm_state + 0x7e88);
2000 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2001
2002 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2003 env->tr.base = ldq_phys(sm_state + 0x7e98);
2004 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2005 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2006#ifdef VBOX
2007 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2008 env->tr.newselector = 0;
2009#endif
2010
2011 EAX = ldq_phys(sm_state + 0x7ff8);
2012 ECX = ldq_phys(sm_state + 0x7ff0);
2013 EDX = ldq_phys(sm_state + 0x7fe8);
2014 EBX = ldq_phys(sm_state + 0x7fe0);
2015 ESP = ldq_phys(sm_state + 0x7fd8);
2016 EBP = ldq_phys(sm_state + 0x7fd0);
2017 ESI = ldq_phys(sm_state + 0x7fc8);
2018 EDI = ldq_phys(sm_state + 0x7fc0);
2019 for(i = 8; i < 16; i++)
2020 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2021 env->eip = ldq_phys(sm_state + 0x7f78);
2022 load_eflags(ldl_phys(sm_state + 0x7f70),
2023 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2024 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2025 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2026
2027 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2028 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2029 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2030
2031 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2032 if (val & 0x20000) {
2033 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2034 }
2035#else
2036 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2037 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2038 load_eflags(ldl_phys(sm_state + 0x7ff4),
2039 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2040 env->eip = ldl_phys(sm_state + 0x7ff0);
2041 EDI = ldl_phys(sm_state + 0x7fec);
2042 ESI = ldl_phys(sm_state + 0x7fe8);
2043 EBP = ldl_phys(sm_state + 0x7fe4);
2044 ESP = ldl_phys(sm_state + 0x7fe0);
2045 EBX = ldl_phys(sm_state + 0x7fdc);
2046 EDX = ldl_phys(sm_state + 0x7fd8);
2047 ECX = ldl_phys(sm_state + 0x7fd4);
2048 EAX = ldl_phys(sm_state + 0x7fd0);
2049 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2050 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2051
2052 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2053 env->tr.base = ldl_phys(sm_state + 0x7f64);
2054 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2055 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2056#ifdef VBOX
2057 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2058 env->tr.newselector = 0;
2059#endif
2060
2061 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2062 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2063 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2064 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2065#ifdef VBOX
2066 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2067 env->ldt.newselector = 0;
2068#endif
2069
2070 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2071 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2072
2073 env->idt.base = ldl_phys(sm_state + 0x7f58);
2074 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2075
2076 for(i = 0; i < 6; i++) {
2077 if (i < 3)
2078 offset = 0x7f84 + i * 12;
2079 else
2080 offset = 0x7f2c + (i - 3) * 12;
2081 cpu_x86_load_seg_cache(env, i,
2082 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2083 ldl_phys(sm_state + offset + 8),
2084 ldl_phys(sm_state + offset + 4),
2085 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2086 }
2087 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2088
2089 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2090 if (val & 0x20000) {
2091 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2092 }
2093#endif
2094 CC_OP = CC_OP_EFLAGS;
2095 env->hflags &= ~HF_SMM_MASK;
2096 cpu_smm_update(env);
2097
2098 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2099 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2100#endif /* !VBOX */
2101}
2102
2103#endif /* !CONFIG_USER_ONLY */
2104
2105
2106/* division, flags are undefined */
2107
2108void helper_divb_AL(target_ulong t0)
2109{
2110 unsigned int num, den, q, r;
2111
2112 num = (EAX & 0xffff);
2113 den = (t0 & 0xff);
2114 if (den == 0) {
2115 raise_exception(EXCP00_DIVZ);
2116 }
2117 q = (num / den);
2118 if (q > 0xff)
2119 raise_exception(EXCP00_DIVZ);
2120 q &= 0xff;
2121 r = (num % den) & 0xff;
2122 EAX = (EAX & ~0xffff) | (r << 8) | q;
2123}
2124
2125void helper_idivb_AL(target_ulong t0)
2126{
2127 int num, den, q, r;
2128
2129 num = (int16_t)EAX;
2130 den = (int8_t)t0;
2131 if (den == 0) {
2132 raise_exception(EXCP00_DIVZ);
2133 }
2134 q = (num / den);
2135 if (q != (int8_t)q)
2136 raise_exception(EXCP00_DIVZ);
2137 q &= 0xff;
2138 r = (num % den) & 0xff;
2139 EAX = (EAX & ~0xffff) | (r << 8) | q;
2140}
2141
2142void helper_divw_AX(target_ulong t0)
2143{
2144 unsigned int num, den, q, r;
2145
2146 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2147 den = (t0 & 0xffff);
2148 if (den == 0) {
2149 raise_exception(EXCP00_DIVZ);
2150 }
2151 q = (num / den);
2152 if (q > 0xffff)
2153 raise_exception(EXCP00_DIVZ);
2154 q &= 0xffff;
2155 r = (num % den) & 0xffff;
2156 EAX = (EAX & ~0xffff) | q;
2157 EDX = (EDX & ~0xffff) | r;
2158}
2159
2160void helper_idivw_AX(target_ulong t0)
2161{
2162 int num, den, q, r;
2163
2164 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2165 den = (int16_t)t0;
2166 if (den == 0) {
2167 raise_exception(EXCP00_DIVZ);
2168 }
2169 q = (num / den);
2170 if (q != (int16_t)q)
2171 raise_exception(EXCP00_DIVZ);
2172 q &= 0xffff;
2173 r = (num % den) & 0xffff;
2174 EAX = (EAX & ~0xffff) | q;
2175 EDX = (EDX & ~0xffff) | r;
2176}
2177
2178void helper_divl_EAX(target_ulong t0)
2179{
2180 unsigned int den, r;
2181 uint64_t num, q;
2182
2183 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2184 den = t0;
2185 if (den == 0) {
2186 raise_exception(EXCP00_DIVZ);
2187 }
2188 q = (num / den);
2189 r = (num % den);
2190 if (q > 0xffffffff)
2191 raise_exception(EXCP00_DIVZ);
2192 EAX = (uint32_t)q;
2193 EDX = (uint32_t)r;
2194}
2195
2196void helper_idivl_EAX(target_ulong t0)
2197{
2198 int den, r;
2199 int64_t num, q;
2200
2201 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2202 den = t0;
2203 if (den == 0) {
2204 raise_exception(EXCP00_DIVZ);
2205 }
2206 q = (num / den);
2207 r = (num % den);
2208 if (q != (int32_t)q)
2209 raise_exception(EXCP00_DIVZ);
2210 EAX = (uint32_t)q;
2211 EDX = (uint32_t)r;
2212}
2213
2214/* bcd */
2215
2216/* XXX: exception */
2217void helper_aam(int base)
2218{
2219 int al, ah;
2220 al = EAX & 0xff;
2221 ah = al / base;
2222 al = al % base;
2223 EAX = (EAX & ~0xffff) | al | (ah << 8);
2224 CC_DST = al;
2225}
2226
2227void helper_aad(int base)
2228{
2229 int al, ah;
2230 al = EAX & 0xff;
2231 ah = (EAX >> 8) & 0xff;
2232 al = ((ah * base) + al) & 0xff;
2233 EAX = (EAX & ~0xffff) | al;
2234 CC_DST = al;
2235}
2236
2237void helper_aaa(void)
2238{
2239 int icarry;
2240 int al, ah, af;
2241 int eflags;
2242
2243 eflags = helper_cc_compute_all(CC_OP);
2244 af = eflags & CC_A;
2245 al = EAX & 0xff;
2246 ah = (EAX >> 8) & 0xff;
2247
2248 icarry = (al > 0xf9);
2249 if (((al & 0x0f) > 9 ) || af) {
2250 al = (al + 6) & 0x0f;
2251 ah = (ah + 1 + icarry) & 0xff;
2252 eflags |= CC_C | CC_A;
2253 } else {
2254 eflags &= ~(CC_C | CC_A);
2255 al &= 0x0f;
2256 }
2257 EAX = (EAX & ~0xffff) | al | (ah << 8);
2258 CC_SRC = eflags;
2259}
2260
2261void helper_aas(void)
2262{
2263 int icarry;
2264 int al, ah, af;
2265 int eflags;
2266
2267 eflags = helper_cc_compute_all(CC_OP);
2268 af = eflags & CC_A;
2269 al = EAX & 0xff;
2270 ah = (EAX >> 8) & 0xff;
2271
2272 icarry = (al < 6);
2273 if (((al & 0x0f) > 9 ) || af) {
2274 al = (al - 6) & 0x0f;
2275 ah = (ah - 1 - icarry) & 0xff;
2276 eflags |= CC_C | CC_A;
2277 } else {
2278 eflags &= ~(CC_C | CC_A);
2279 al &= 0x0f;
2280 }
2281 EAX = (EAX & ~0xffff) | al | (ah << 8);
2282 CC_SRC = eflags;
2283}
2284
2285void helper_daa(void)
2286{
2287 int al, af, cf;
2288 int eflags;
2289
2290 eflags = helper_cc_compute_all(CC_OP);
2291 cf = eflags & CC_C;
2292 af = eflags & CC_A;
2293 al = EAX & 0xff;
2294
2295 eflags = 0;
2296 if (((al & 0x0f) > 9 ) || af) {
2297 al = (al + 6) & 0xff;
2298 eflags |= CC_A;
2299 }
2300 if ((al > 0x9f) || cf) {
2301 al = (al + 0x60) & 0xff;
2302 eflags |= CC_C;
2303 }
2304 EAX = (EAX & ~0xff) | al;
2305 /* well, speed is not an issue here, so we compute the flags by hand */
2306 eflags |= (al == 0) << 6; /* zf */
2307 eflags |= parity_table[al]; /* pf */
2308 eflags |= (al & 0x80); /* sf */
2309 CC_SRC = eflags;
2310}
2311
2312void helper_das(void)
2313{
2314 int al, al1, af, cf;
2315 int eflags;
2316
2317 eflags = helper_cc_compute_all(CC_OP);
2318 cf = eflags & CC_C;
2319 af = eflags & CC_A;
2320 al = EAX & 0xff;
2321
2322 eflags = 0;
2323 al1 = al;
2324 if (((al & 0x0f) > 9 ) || af) {
2325 eflags |= CC_A;
2326 if (al < 6 || cf)
2327 eflags |= CC_C;
2328 al = (al - 6) & 0xff;
2329 }
2330 if ((al1 > 0x99) || cf) {
2331 al = (al - 0x60) & 0xff;
2332 eflags |= CC_C;
2333 }
2334 EAX = (EAX & ~0xff) | al;
2335 /* well, speed is not an issue here, so we compute the flags by hand */
2336 eflags |= (al == 0) << 6; /* zf */
2337 eflags |= parity_table[al]; /* pf */
2338 eflags |= (al & 0x80); /* sf */
2339 CC_SRC = eflags;
2340}
2341
2342void helper_into(int next_eip_addend)
2343{
2344 int eflags;
2345 eflags = helper_cc_compute_all(CC_OP);
2346 if (eflags & CC_O) {
2347 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2348 }
2349}
2350
2351void helper_cmpxchg8b(target_ulong a0)
2352{
2353 uint64_t d;
2354 int eflags;
2355
2356 eflags = helper_cc_compute_all(CC_OP);
2357 d = ldq(a0);
2358 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2359 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2360 eflags |= CC_Z;
2361 } else {
2362 /* always do the store */
2363 stq(a0, d);
2364 EDX = (uint32_t)(d >> 32);
2365 EAX = (uint32_t)d;
2366 eflags &= ~CC_Z;
2367 }
2368 CC_SRC = eflags;
2369}
2370
2371#ifdef TARGET_X86_64
2372void helper_cmpxchg16b(target_ulong a0)
2373{
2374 uint64_t d0, d1;
2375 int eflags;
2376
2377 if ((a0 & 0xf) != 0)
2378 raise_exception(EXCP0D_GPF);
2379 eflags = helper_cc_compute_all(CC_OP);
2380 d0 = ldq(a0);
2381 d1 = ldq(a0 + 8);
2382 if (d0 == EAX && d1 == EDX) {
2383 stq(a0, EBX);
2384 stq(a0 + 8, ECX);
2385 eflags |= CC_Z;
2386 } else {
2387 /* always do the store */
2388 stq(a0, d0);
2389 stq(a0 + 8, d1);
2390 EDX = d1;
2391 EAX = d0;
2392 eflags &= ~CC_Z;
2393 }
2394 CC_SRC = eflags;
2395}
2396#endif
2397
2398void helper_single_step(void)
2399{
2400#ifndef CONFIG_USER_ONLY
2401 check_hw_breakpoints(env, 1);
2402 env->dr[6] |= DR6_BS;
2403#endif
2404 raise_exception(EXCP01_DB);
2405}
2406
2407void helper_cpuid(void)
2408{
2409 uint32_t eax, ebx, ecx, edx;
2410
2411 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2412
2413 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2414 EAX = eax;
2415 EBX = ebx;
2416 ECX = ecx;
2417 EDX = edx;
2418}
2419
2420void helper_enter_level(int level, int data32, target_ulong t1)
2421{
2422 target_ulong ssp;
2423 uint32_t esp_mask, esp, ebp;
2424
2425 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2426 ssp = env->segs[R_SS].base;
2427 ebp = EBP;
2428 esp = ESP;
2429 if (data32) {
2430 /* 32 bit */
2431 esp -= 4;
2432 while (--level) {
2433 esp -= 4;
2434 ebp -= 4;
2435 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2436 }
2437 esp -= 4;
2438 stl(ssp + (esp & esp_mask), t1);
2439 } else {
2440 /* 16 bit */
2441 esp -= 2;
2442 while (--level) {
2443 esp -= 2;
2444 ebp -= 2;
2445 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2446 }
2447 esp -= 2;
2448 stw(ssp + (esp & esp_mask), t1);
2449 }
2450}
2451
2452#ifdef TARGET_X86_64
2453void helper_enter64_level(int level, int data64, target_ulong t1)
2454{
2455 target_ulong esp, ebp;
2456 ebp = EBP;
2457 esp = ESP;
2458
2459 if (data64) {
2460 /* 64 bit */
2461 esp -= 8;
2462 while (--level) {
2463 esp -= 8;
2464 ebp -= 8;
2465 stq(esp, ldq(ebp));
2466 }
2467 esp -= 8;
2468 stq(esp, t1);
2469 } else {
2470 /* 16 bit */
2471 esp -= 2;
2472 while (--level) {
2473 esp -= 2;
2474 ebp -= 2;
2475 stw(esp, lduw(ebp));
2476 }
2477 esp -= 2;
2478 stw(esp, t1);
2479 }
2480}
2481#endif
2482
2483void helper_lldt(int selector)
2484{
2485 SegmentCache *dt;
2486 uint32_t e1, e2;
2487#ifndef VBOX
2488 int index, entry_limit;
2489#else
2490 unsigned int index, entry_limit;
2491#endif
2492 target_ulong ptr;
2493
2494#ifdef VBOX
2495 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2496 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2497#endif
2498
2499 selector &= 0xffff;
2500 if ((selector & 0xfffc) == 0) {
2501 /* XXX: NULL selector case: invalid LDT */
2502 env->ldt.base = 0;
2503 env->ldt.limit = 0;
2504#ifdef VBOX
2505 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2506 env->ldt.newselector = 0;
2507#endif
2508 } else {
2509 if (selector & 0x4)
2510 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511 dt = &env->gdt;
2512 index = selector & ~7;
2513#ifdef TARGET_X86_64
2514 if (env->hflags & HF_LMA_MASK)
2515 entry_limit = 15;
2516 else
2517#endif
2518 entry_limit = 7;
2519 if ((index + entry_limit) > dt->limit)
2520 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2521 ptr = dt->base + index;
2522 e1 = ldl_kernel(ptr);
2523 e2 = ldl_kernel(ptr + 4);
2524 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2525 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2526 if (!(e2 & DESC_P_MASK))
2527 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2528#ifdef TARGET_X86_64
2529 if (env->hflags & HF_LMA_MASK) {
2530 uint32_t e3;
2531 e3 = ldl_kernel(ptr + 8);
2532 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2533 env->ldt.base |= (target_ulong)e3 << 32;
2534 } else
2535#endif
2536 {
2537 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2538 }
2539 }
2540 env->ldt.selector = selector;
2541#ifdef VBOX
2542 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2543 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2544#endif
2545}
2546
2547void helper_ltr(int selector)
2548{
2549 SegmentCache *dt;
2550 uint32_t e1, e2;
2551#ifndef VBOX
2552 int index, type, entry_limit;
2553#else
2554 unsigned int index;
2555 int type, entry_limit;
2556#endif
2557 target_ulong ptr;
2558
2559#ifdef VBOX
2560 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2561 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2562 env->tr.flags, (RTSEL)(selector & 0xffff)));
2563# if 0 /** @todo r=bird: This looks very fishy, need good reason to re-enable it. */
2564 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request,
2565 CPU_INTERRUPT_EXTERNAL_EXIT);
2566# endif
2567#endif
2568 selector &= 0xffff;
2569 if ((selector & 0xfffc) == 0) {
2570 /* NULL selector case: invalid TR */
2571 env->tr.base = 0;
2572 env->tr.limit = 0;
2573 env->tr.flags = 0;
2574#ifdef VBOX
2575 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2576 env->tr.newselector = 0;
2577#endif
2578 } else {
2579 if (selector & 0x4)
2580 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2581 dt = &env->gdt;
2582 index = selector & ~7;
2583#ifdef TARGET_X86_64
2584 if (env->hflags & HF_LMA_MASK)
2585 entry_limit = 15;
2586 else
2587#endif
2588 entry_limit = 7;
2589 if ((index + entry_limit) > dt->limit)
2590 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2591 ptr = dt->base + index;
2592 e1 = ldl_kernel(ptr);
2593 e2 = ldl_kernel(ptr + 4);
2594 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2595 if ((e2 & DESC_S_MASK) ||
2596 (type != 1 && type != 9))
2597 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2598 if (!(e2 & DESC_P_MASK))
2599 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2600#ifdef TARGET_X86_64
2601 if (env->hflags & HF_LMA_MASK) {
2602 uint32_t e3, e4;
2603 e3 = ldl_kernel(ptr + 8);
2604 e4 = ldl_kernel(ptr + 12);
2605 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2606 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2607 load_seg_cache_raw_dt(&env->tr, e1, e2);
2608 env->tr.base |= (target_ulong)e3 << 32;
2609 } else
2610#endif
2611 {
2612 load_seg_cache_raw_dt(&env->tr, e1, e2);
2613 }
2614 e2 |= DESC_TSS_BUSY_MASK;
2615 stl_kernel(ptr + 4, e2);
2616 }
2617 env->tr.selector = selector;
2618#ifdef VBOX
2619 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2620 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2621 env->tr.flags, (RTSEL)(selector & 0xffff)));
2622#endif
2623}
2624
2625/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2626void helper_load_seg(int seg_reg, int selector)
2627{
2628 uint32_t e1, e2;
2629 int cpl, dpl, rpl;
2630 SegmentCache *dt;
2631#ifndef VBOX
2632 int index;
2633#else
2634 unsigned int index;
2635#endif
2636 target_ulong ptr;
2637
2638 selector &= 0xffff;
2639 cpl = env->hflags & HF_CPL_MASK;
2640#ifdef VBOX
2641
2642 /* Trying to load a selector with CPL=1? */
2643 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2644 {
2645 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2646 selector = selector & 0xfffc;
2647 }
2648#endif /* VBOX */
2649 if ((selector & 0xfffc) == 0) {
2650 /* null selector case */
2651 if (seg_reg == R_SS
2652#ifdef TARGET_X86_64
2653 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2654#endif
2655 )
2656 raise_exception_err(EXCP0D_GPF, 0);
2657 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2658 } else {
2659
2660 if (selector & 0x4)
2661 dt = &env->ldt;
2662 else
2663 dt = &env->gdt;
2664 index = selector & ~7;
2665 if ((index + 7) > dt->limit)
2666 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2667 ptr = dt->base + index;
2668 e1 = ldl_kernel(ptr);
2669 e2 = ldl_kernel(ptr + 4);
2670
2671 if (!(e2 & DESC_S_MASK))
2672 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2673 rpl = selector & 3;
2674 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2675 if (seg_reg == R_SS) {
2676 /* must be writable segment */
2677 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2678 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2679 if (rpl != cpl || dpl != cpl)
2680 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2681 } else {
2682 /* must be readable segment */
2683 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2684 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2685
2686 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2687 /* if not conforming code, test rights */
2688 if (dpl < cpl || dpl < rpl)
2689 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2690 }
2691 }
2692
2693 if (!(e2 & DESC_P_MASK)) {
2694 if (seg_reg == R_SS)
2695 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2696 else
2697 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2698 }
2699
2700 /* set the access bit if not already set */
2701 if (!(e2 & DESC_A_MASK)) {
2702 e2 |= DESC_A_MASK;
2703 stl_kernel(ptr + 4, e2);
2704 }
2705
2706 cpu_x86_load_seg_cache(env, seg_reg, selector,
2707 get_seg_base(e1, e2),
2708 get_seg_limit(e1, e2),
2709 e2);
2710#if 0
2711 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2712 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2713#endif
2714 }
2715}
2716
2717/* protected mode jump */
2718void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2719 int next_eip_addend)
2720{
2721 int gate_cs, type;
2722 uint32_t e1, e2, cpl, dpl, rpl, limit;
2723 target_ulong next_eip;
2724
2725#ifdef VBOX /** @todo Why do we do this? */
2726 e1 = e2 = 0;
2727#endif
2728 if ((new_cs & 0xfffc) == 0)
2729 raise_exception_err(EXCP0D_GPF, 0);
2730 if (load_segment(&e1, &e2, new_cs) != 0)
2731 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2732 cpl = env->hflags & HF_CPL_MASK;
2733 if (e2 & DESC_S_MASK) {
2734 if (!(e2 & DESC_CS_MASK))
2735 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2736 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2737 if (e2 & DESC_C_MASK) {
2738 /* conforming code segment */
2739 if (dpl > cpl)
2740 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2741 } else {
2742 /* non conforming code segment */
2743 rpl = new_cs & 3;
2744 if (rpl > cpl)
2745 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2746 if (dpl != cpl)
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 }
2749 if (!(e2 & DESC_P_MASK))
2750 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2751 limit = get_seg_limit(e1, e2);
2752 if (new_eip > limit &&
2753 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2754 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2755 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2756 get_seg_base(e1, e2), limit, e2);
2757 EIP = new_eip;
2758 } else {
2759 /* jump to call or task gate */
2760 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2761 rpl = new_cs & 3;
2762 cpl = env->hflags & HF_CPL_MASK;
2763 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2764 switch(type) {
2765 case 1: /* 286 TSS */
2766 case 9: /* 386 TSS */
2767 case 5: /* task gate */
2768 if (dpl < cpl || dpl < rpl)
2769 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2770 next_eip = env->eip + next_eip_addend;
2771 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2772 CC_OP = CC_OP_EFLAGS;
2773 break;
2774 case 4: /* 286 call gate */
2775 case 12: /* 386 call gate */
2776 if ((dpl < cpl) || (dpl < rpl))
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778 if (!(e2 & DESC_P_MASK))
2779 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2780 gate_cs = e1 >> 16;
2781 new_eip = (e1 & 0xffff);
2782 if (type == 12)
2783 new_eip |= (e2 & 0xffff0000);
2784 if (load_segment(&e1, &e2, gate_cs) != 0)
2785 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2786 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2787 /* must be code segment */
2788 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2789 (DESC_S_MASK | DESC_CS_MASK)))
2790 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2791 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2792 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2793 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2794 if (!(e2 & DESC_P_MASK))
2795#ifdef VBOX /* See page 3-514 of 253666.pdf */
2796 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2797#else
2798 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2799#endif
2800 limit = get_seg_limit(e1, e2);
2801 if (new_eip > limit)
2802 raise_exception_err(EXCP0D_GPF, 0);
2803 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2804 get_seg_base(e1, e2), limit, e2);
2805 EIP = new_eip;
2806 break;
2807 default:
2808 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2809 break;
2810 }
2811 }
2812}
2813
2814/* real mode call */
2815void helper_lcall_real(int new_cs, target_ulong new_eip1,
2816 int shift, int next_eip)
2817{
2818 int new_eip;
2819 uint32_t esp, esp_mask;
2820 target_ulong ssp;
2821
2822 new_eip = new_eip1;
2823 esp = ESP;
2824 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2825 ssp = env->segs[R_SS].base;
2826 if (shift) {
2827 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2828 PUSHL(ssp, esp, esp_mask, next_eip);
2829 } else {
2830 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2831 PUSHW(ssp, esp, esp_mask, next_eip);
2832 }
2833
2834 SET_ESP(esp, esp_mask);
2835 env->eip = new_eip;
2836 env->segs[R_CS].selector = new_cs;
2837 env->segs[R_CS].base = (new_cs << 4);
2838}
2839
2840/* protected mode call */
2841void helper_lcall_protected(int new_cs, target_ulong new_eip,
2842 int shift, int next_eip_addend)
2843{
2844 int new_stack, i;
2845 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2846 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2847 uint32_t val, limit, old_sp_mask;
2848 target_ulong ssp, old_ssp, next_eip;
2849
2850#ifdef VBOX /** @todo Why do we do this? */
2851 e1 = e2 = 0;
2852#endif
2853 next_eip = env->eip + next_eip_addend;
2854 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2855 LOG_PCALL_STATE(env);
2856 if ((new_cs & 0xfffc) == 0)
2857 raise_exception_err(EXCP0D_GPF, 0);
2858 if (load_segment(&e1, &e2, new_cs) != 0)
2859 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2860 cpl = env->hflags & HF_CPL_MASK;
2861 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2862 if (e2 & DESC_S_MASK) {
2863 if (!(e2 & DESC_CS_MASK))
2864 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2865 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2866 if (e2 & DESC_C_MASK) {
2867 /* conforming code segment */
2868 if (dpl > cpl)
2869 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2870 } else {
2871 /* non conforming code segment */
2872 rpl = new_cs & 3;
2873 if (rpl > cpl)
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 if (dpl != cpl)
2876 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2877 }
2878 if (!(e2 & DESC_P_MASK))
2879 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2880
2881#ifdef TARGET_X86_64
2882 /* XXX: check 16/32 bit cases in long mode */
2883 if (shift == 2) {
2884 target_ulong rsp;
2885 /* 64 bit case */
2886 rsp = ESP;
2887 PUSHQ(rsp, env->segs[R_CS].selector);
2888 PUSHQ(rsp, next_eip);
2889 /* from this point, not restartable */
2890 ESP = rsp;
2891 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2892 get_seg_base(e1, e2),
2893 get_seg_limit(e1, e2), e2);
2894 EIP = new_eip;
2895 } else
2896#endif
2897 {
2898 sp = ESP;
2899 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2900 ssp = env->segs[R_SS].base;
2901 if (shift) {
2902 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2903 PUSHL(ssp, sp, sp_mask, next_eip);
2904 } else {
2905 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2906 PUSHW(ssp, sp, sp_mask, next_eip);
2907 }
2908
2909 limit = get_seg_limit(e1, e2);
2910 if (new_eip > limit)
2911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2912 /* from this point, not restartable */
2913 SET_ESP(sp, sp_mask);
2914 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2915 get_seg_base(e1, e2), limit, e2);
2916 EIP = new_eip;
2917 }
2918 } else {
2919 /* check gate type */
2920 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2922 rpl = new_cs & 3;
2923 switch(type) {
2924 case 1: /* available 286 TSS */
2925 case 9: /* available 386 TSS */
2926 case 5: /* task gate */
2927 if (dpl < cpl || dpl < rpl)
2928 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2929 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2930 CC_OP = CC_OP_EFLAGS;
2931 return;
2932 case 4: /* 286 call gate */
2933 case 12: /* 386 call gate */
2934 break;
2935 default:
2936 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2937 break;
2938 }
2939 shift = type >> 3;
2940
2941 if (dpl < cpl || dpl < rpl)
2942 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2943 /* check valid bit */
2944 if (!(e2 & DESC_P_MASK))
2945 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2946 selector = e1 >> 16;
2947 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2948 param_count = e2 & 0x1f;
2949 if ((selector & 0xfffc) == 0)
2950 raise_exception_err(EXCP0D_GPF, 0);
2951
2952 if (load_segment(&e1, &e2, selector) != 0)
2953 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2954 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2956 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2957 if (dpl > cpl)
2958 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2959 if (!(e2 & DESC_P_MASK))
2960 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2961
2962 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2963 /* to inner privilege */
2964 get_ss_esp_from_tss(&ss, &sp, dpl);
2965 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2966 ss, sp, param_count, ESP);
2967 if ((ss & 0xfffc) == 0)
2968 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2969 if ((ss & 3) != dpl)
2970 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2971 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2972 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2973 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2974 if (ss_dpl != dpl)
2975 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2976 if (!(ss_e2 & DESC_S_MASK) ||
2977 (ss_e2 & DESC_CS_MASK) ||
2978 !(ss_e2 & DESC_W_MASK))
2979 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2980 if (!(ss_e2 & DESC_P_MASK))
2981#ifdef VBOX /* See page 3-99 of 253666.pdf */
2982 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2983#else
2984 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2985#endif
2986
2987 // push_size = ((param_count * 2) + 8) << shift;
2988
2989 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2990 old_ssp = env->segs[R_SS].base;
2991
2992 sp_mask = get_sp_mask(ss_e2);
2993 ssp = get_seg_base(ss_e1, ss_e2);
2994 if (shift) {
2995 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2996 PUSHL(ssp, sp, sp_mask, ESP);
2997 for(i = param_count - 1; i >= 0; i--) {
2998 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2999 PUSHL(ssp, sp, sp_mask, val);
3000 }
3001 } else {
3002 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3003 PUSHW(ssp, sp, sp_mask, ESP);
3004 for(i = param_count - 1; i >= 0; i--) {
3005 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3006 PUSHW(ssp, sp, sp_mask, val);
3007 }
3008 }
3009 new_stack = 1;
3010 } else {
3011 /* to same privilege */
3012 sp = ESP;
3013 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3014 ssp = env->segs[R_SS].base;
3015 // push_size = (4 << shift);
3016 new_stack = 0;
3017 }
3018
3019 if (shift) {
3020 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3021 PUSHL(ssp, sp, sp_mask, next_eip);
3022 } else {
3023 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3024 PUSHW(ssp, sp, sp_mask, next_eip);
3025 }
3026
3027 /* from this point, not restartable */
3028
3029 if (new_stack) {
3030 ss = (ss & ~3) | dpl;
3031 cpu_x86_load_seg_cache(env, R_SS, ss,
3032 ssp,
3033 get_seg_limit(ss_e1, ss_e2),
3034 ss_e2);
3035 }
3036
3037 selector = (selector & ~3) | dpl;
3038 cpu_x86_load_seg_cache(env, R_CS, selector,
3039 get_seg_base(e1, e2),
3040 get_seg_limit(e1, e2),
3041 e2);
3042 cpu_x86_set_cpl(env, dpl);
3043 SET_ESP(sp, sp_mask);
3044 EIP = offset;
3045 }
3046}
3047
3048/* real and vm86 mode iret */
3049void helper_iret_real(int shift)
3050{
3051 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3052 target_ulong ssp;
3053 int eflags_mask;
3054#ifdef VBOX
3055 bool fVME = false;
3056
3057 remR3TrapClear(env->pVM);
3058#endif /* VBOX */
3059
3060 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3061 sp = ESP;
3062 ssp = env->segs[R_SS].base;
3063 if (shift == 1) {
3064 /* 32 bits */
3065 POPL(ssp, sp, sp_mask, new_eip);
3066 POPL(ssp, sp, sp_mask, new_cs);
3067 new_cs &= 0xffff;
3068 POPL(ssp, sp, sp_mask, new_eflags);
3069 } else {
3070 /* 16 bits */
3071 POPW(ssp, sp, sp_mask, new_eip);
3072 POPW(ssp, sp, sp_mask, new_cs);
3073 POPW(ssp, sp, sp_mask, new_eflags);
3074 }
3075#ifdef VBOX
3076 if ( (env->eflags & VM_MASK)
3077 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3078 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3079 {
3080 fVME = true;
3081 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3082 /* if TF will be set -> #GP */
3083 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3084 || (new_eflags & TF_MASK))
3085 raise_exception(EXCP0D_GPF);
3086 }
3087#endif /* VBOX */
3088 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3089 env->segs[R_CS].selector = new_cs;
3090 env->segs[R_CS].base = (new_cs << 4);
3091 env->eip = new_eip;
3092#ifdef VBOX
3093 if (fVME)
3094 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3095 else
3096#endif
3097 if (env->eflags & VM_MASK)
3098 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3099 else
3100 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3101 if (shift == 0)
3102 eflags_mask &= 0xffff;
3103 load_eflags(new_eflags, eflags_mask);
3104 env->hflags2 &= ~HF2_NMI_MASK;
3105#ifdef VBOX
3106 if (fVME)
3107 {
3108 if (new_eflags & IF_MASK)
3109 env->eflags |= VIF_MASK;
3110 else
3111 env->eflags &= ~VIF_MASK;
3112 }
3113#endif /* VBOX */
3114}
3115
3116static inline void validate_seg(int seg_reg, int cpl)
3117{
3118 int dpl;
3119 uint32_t e2;
3120
3121 /* XXX: on x86_64, we do not want to nullify FS and GS because
3122 they may still contain a valid base. I would be interested to
3123 know how a real x86_64 CPU behaves */
3124 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3125 (env->segs[seg_reg].selector & 0xfffc) == 0)
3126 return;
3127
3128 e2 = env->segs[seg_reg].flags;
3129 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3130 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3131 /* data or non conforming code segment */
3132 if (dpl < cpl) {
3133 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3134 }
3135 }
3136}
3137
3138/* protected mode iret */
3139static inline void helper_ret_protected(int shift, int is_iret, int addend)
3140{
3141 uint32_t new_cs, new_eflags, new_ss;
3142 uint32_t new_es, new_ds, new_fs, new_gs;
3143 uint32_t e1, e2, ss_e1, ss_e2;
3144 int cpl, dpl, rpl, eflags_mask, iopl;
3145 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3146
3147#ifdef VBOX /** @todo Why do we do this? */
3148 ss_e1 = ss_e2 = e1 = e2 = 0;
3149#endif
3150
3151#ifdef TARGET_X86_64
3152 if (shift == 2)
3153 sp_mask = -1;
3154 else
3155#endif
3156 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3157 sp = ESP;
3158 ssp = env->segs[R_SS].base;
3159 new_eflags = 0; /* avoid warning */
3160#ifdef TARGET_X86_64
3161 if (shift == 2) {
3162 POPQ(sp, new_eip);
3163 POPQ(sp, new_cs);
3164 new_cs &= 0xffff;
3165 if (is_iret) {
3166 POPQ(sp, new_eflags);
3167 }
3168 } else
3169#endif
3170 if (shift == 1) {
3171 /* 32 bits */
3172 POPL(ssp, sp, sp_mask, new_eip);
3173 POPL(ssp, sp, sp_mask, new_cs);
3174 new_cs &= 0xffff;
3175 if (is_iret) {
3176 POPL(ssp, sp, sp_mask, new_eflags);
3177#define LOG_GROUP LOG_GROUP_REM
3178#if defined(VBOX) && defined(DEBUG)
3179 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3180 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3181 Log(("iret: new EFLAGS %08X\n", new_eflags));
3182 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3183#endif
3184 if (new_eflags & VM_MASK)
3185 goto return_to_vm86;
3186 }
3187#ifdef VBOX
3188 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3189 {
3190 if ( !EMIsRawRing1Enabled(env->pVM)
3191 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3192 {
3193 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3194 new_cs = new_cs & 0xfffc;
3195 }
3196 else
3197 {
3198 /* Ugly assumption: assume a genuine switch to ring-1. */
3199 Log(("Genuine switch to ring-1 (iret)\n"));
3200 }
3201 }
3202 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3203 {
3204 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3205 new_cs = (new_cs & 0xfffc) | 1;
3206 }
3207#endif
3208 } else {
3209 /* 16 bits */
3210 POPW(ssp, sp, sp_mask, new_eip);
3211 POPW(ssp, sp, sp_mask, new_cs);
3212 if (is_iret)
3213 POPW(ssp, sp, sp_mask, new_eflags);
3214 }
3215 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3216 new_cs, new_eip, shift, addend);
3217 LOG_PCALL_STATE(env);
3218 if ((new_cs & 0xfffc) == 0)
3219 {
3220#if defined(VBOX) && defined(DEBUG)
3221 Log(("new_cs & 0xfffc) == 0\n"));
3222#endif
3223 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3224 }
3225 if (load_segment(&e1, &e2, new_cs) != 0)
3226 {
3227#if defined(VBOX) && defined(DEBUG)
3228 Log(("load_segment failed\n"));
3229#endif
3230 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3231 }
3232 if (!(e2 & DESC_S_MASK) ||
3233 !(e2 & DESC_CS_MASK))
3234 {
3235#if defined(VBOX) && defined(DEBUG)
3236 Log(("e2 mask %08x\n", e2));
3237#endif
3238 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3239 }
3240 cpl = env->hflags & HF_CPL_MASK;
3241 rpl = new_cs & 3;
3242 if (rpl < cpl)
3243 {
3244#if defined(VBOX) && defined(DEBUG)
3245 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3246#endif
3247 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3248 }
3249 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3250
3251 if (e2 & DESC_C_MASK) {
3252 if (dpl > rpl)
3253 {
3254#if defined(VBOX) && defined(DEBUG)
3255 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3256#endif
3257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3258 }
3259 } else {
3260 if (dpl != rpl)
3261 {
3262#if defined(VBOX) && defined(DEBUG)
3263 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3264#endif
3265 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3266 }
3267 }
3268 if (!(e2 & DESC_P_MASK))
3269 {
3270#if defined(VBOX) && defined(DEBUG)
3271 Log(("DESC_P_MASK e2=%08x\n", e2));
3272#endif
3273 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3274 }
3275
3276 sp += addend;
3277 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3278 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3279 /* return to same privilege level */
3280#ifdef VBOX
3281 if (!(e2 & DESC_A_MASK))
3282 e2 = set_segment_accessed(new_cs, e2);
3283#endif
3284 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3285 get_seg_base(e1, e2),
3286 get_seg_limit(e1, e2),
3287 e2);
3288 } else {
3289 /* return to different privilege level */
3290#ifdef TARGET_X86_64
3291 if (shift == 2) {
3292 POPQ(sp, new_esp);
3293 POPQ(sp, new_ss);
3294 new_ss &= 0xffff;
3295 } else
3296#endif
3297 if (shift == 1) {
3298 /* 32 bits */
3299 POPL(ssp, sp, sp_mask, new_esp);
3300 POPL(ssp, sp, sp_mask, new_ss);
3301 new_ss &= 0xffff;
3302 } else {
3303 /* 16 bits */
3304 POPW(ssp, sp, sp_mask, new_esp);
3305 POPW(ssp, sp, sp_mask, new_ss);
3306 }
3307 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3308 new_ss, new_esp);
3309 if ((new_ss & 0xfffc) == 0) {
3310#ifdef TARGET_X86_64
3311 /* NULL ss is allowed in long mode if cpl != 3*/
3312 /* XXX: test CS64 ? */
3313 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3314# ifdef VBOX
3315 if (!(e2 & DESC_A_MASK))
3316 e2 = set_segment_accessed(new_cs, e2);
3317# endif
3318 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3319 0, 0xffffffff,
3320 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3321 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3322 DESC_W_MASK | DESC_A_MASK);
3323 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3324 } else
3325#endif
3326 {
3327 raise_exception_err(EXCP0D_GPF, 0);
3328 }
3329 } else {
3330 if ((new_ss & 3) != rpl)
3331 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3332 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3333 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3334 if (!(ss_e2 & DESC_S_MASK) ||
3335 (ss_e2 & DESC_CS_MASK) ||
3336 !(ss_e2 & DESC_W_MASK))
3337 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3338 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3339 if (dpl != rpl)
3340 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3341 if (!(ss_e2 & DESC_P_MASK))
3342 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3343#ifdef VBOX
3344 if (!(e2 & DESC_A_MASK))
3345 e2 = set_segment_accessed(new_cs, e2);
3346 if (!(ss_e2 & DESC_A_MASK))
3347 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3348#endif
3349 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3350 get_seg_base(ss_e1, ss_e2),
3351 get_seg_limit(ss_e1, ss_e2),
3352 ss_e2);
3353 }
3354
3355 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3356 get_seg_base(e1, e2),
3357 get_seg_limit(e1, e2),
3358 e2);
3359 cpu_x86_set_cpl(env, rpl);
3360 sp = new_esp;
3361#ifdef TARGET_X86_64
3362 if (env->hflags & HF_CS64_MASK)
3363 sp_mask = -1;
3364 else
3365#endif
3366 sp_mask = get_sp_mask(ss_e2);
3367
3368 /* validate data segments */
3369 validate_seg(R_ES, rpl);
3370 validate_seg(R_DS, rpl);
3371 validate_seg(R_FS, rpl);
3372 validate_seg(R_GS, rpl);
3373
3374 sp += addend;
3375 }
3376 SET_ESP(sp, sp_mask);
3377 env->eip = new_eip;
3378 if (is_iret) {
3379 /* NOTE: 'cpl' is the _old_ CPL */
3380 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3381 if (cpl == 0)
3382#ifdef VBOX
3383 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3384#else
3385 eflags_mask |= IOPL_MASK;
3386#endif
3387 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3388 if (cpl <= iopl)
3389 eflags_mask |= IF_MASK;
3390 if (shift == 0)
3391 eflags_mask &= 0xffff;
3392 load_eflags(new_eflags, eflags_mask);
3393 }
3394 return;
3395
3396 return_to_vm86:
3397 POPL(ssp, sp, sp_mask, new_esp);
3398 POPL(ssp, sp, sp_mask, new_ss);
3399 POPL(ssp, sp, sp_mask, new_es);
3400 POPL(ssp, sp, sp_mask, new_ds);
3401 POPL(ssp, sp, sp_mask, new_fs);
3402 POPL(ssp, sp, sp_mask, new_gs);
3403
3404 /* modify processor state */
3405 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3406 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3407 load_seg_vm(R_CS, new_cs & 0xffff);
3408 cpu_x86_set_cpl(env, 3);
3409 load_seg_vm(R_SS, new_ss & 0xffff);
3410 load_seg_vm(R_ES, new_es & 0xffff);
3411 load_seg_vm(R_DS, new_ds & 0xffff);
3412 load_seg_vm(R_FS, new_fs & 0xffff);
3413 load_seg_vm(R_GS, new_gs & 0xffff);
3414
3415 env->eip = new_eip & 0xffff;
3416 ESP = new_esp;
3417}
3418
3419void helper_iret_protected(int shift, int next_eip)
3420{
3421 int tss_selector, type;
3422 uint32_t e1, e2;
3423
3424#ifdef VBOX
3425 e1 = e2 = 0; /** @todo Why do we do this? */
3426 remR3TrapClear(env->pVM);
3427#endif
3428
3429 /* specific case for TSS */
3430 if (env->eflags & NT_MASK) {
3431#ifdef TARGET_X86_64
3432 if (env->hflags & HF_LMA_MASK)
3433 raise_exception_err(EXCP0D_GPF, 0);
3434#endif
3435 tss_selector = lduw_kernel(env->tr.base + 0);
3436 if (tss_selector & 4)
3437 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3438 if (load_segment(&e1, &e2, tss_selector) != 0)
3439 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3440 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3441 /* NOTE: we check both segment and busy TSS */
3442 if (type != 3)
3443 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3444 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3445 } else {
3446 helper_ret_protected(shift, 1, 0);
3447 }
3448 env->hflags2 &= ~HF2_NMI_MASK;
3449}
3450
3451void helper_lret_protected(int shift, int addend)
3452{
3453 helper_ret_protected(shift, 0, addend);
3454}
3455
3456void helper_sysenter(void)
3457{
3458 if (env->sysenter_cs == 0) {
3459 raise_exception_err(EXCP0D_GPF, 0);
3460 }
3461 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3462 cpu_x86_set_cpl(env, 0);
3463
3464#ifdef TARGET_X86_64
3465 if (env->hflags & HF_LMA_MASK) {
3466 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3467 0, 0xffffffff,
3468 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3469 DESC_S_MASK |
3470 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3471 } else
3472#endif
3473 {
3474 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3475 0, 0xffffffff,
3476 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3477 DESC_S_MASK |
3478 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3479 }
3480 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3481 0, 0xffffffff,
3482 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3483 DESC_S_MASK |
3484 DESC_W_MASK | DESC_A_MASK);
3485 ESP = env->sysenter_esp;
3486 EIP = env->sysenter_eip;
3487}
3488
3489void helper_sysexit(int dflag)
3490{
3491 int cpl;
3492
3493 cpl = env->hflags & HF_CPL_MASK;
3494 if (env->sysenter_cs == 0 || cpl != 0) {
3495 raise_exception_err(EXCP0D_GPF, 0);
3496 }
3497 cpu_x86_set_cpl(env, 3);
3498#ifdef TARGET_X86_64
3499 if (dflag == 2) {
3500 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3501 0, 0xffffffff,
3502 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3503 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3504 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3505 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3506 0, 0xffffffff,
3507 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3508 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3509 DESC_W_MASK | DESC_A_MASK);
3510 } else
3511#endif
3512 {
3513 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3514 0, 0xffffffff,
3515 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3516 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3517 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3518 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3519 0, 0xffffffff,
3520 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3521 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3522 DESC_W_MASK | DESC_A_MASK);
3523 }
3524 ESP = ECX;
3525 EIP = EDX;
3526}
3527
3528#if defined(CONFIG_USER_ONLY)
3529target_ulong helper_read_crN(int reg)
3530{
3531 return 0;
3532}
3533
3534void helper_write_crN(int reg, target_ulong t0)
3535{
3536}
3537
3538void helper_movl_drN_T0(int reg, target_ulong t0)
3539{
3540}
3541#else
3542target_ulong helper_read_crN(int reg)
3543{
3544 target_ulong val;
3545
3546 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3547 switch(reg) {
3548 default:
3549 val = env->cr[reg];
3550 break;
3551 case 8:
3552 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3553#ifndef VBOX
3554 val = cpu_get_apic_tpr(env->apic_state);
3555#else /* VBOX */
3556 val = cpu_get_apic_tpr(env);
3557#endif /* VBOX */
3558 } else {
3559 val = env->v_tpr;
3560 }
3561 break;
3562 }
3563 return val;
3564}
3565
3566void helper_write_crN(int reg, target_ulong t0)
3567{
3568 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3569 switch(reg) {
3570 case 0:
3571 cpu_x86_update_cr0(env, t0);
3572 break;
3573 case 3:
3574 cpu_x86_update_cr3(env, t0);
3575 break;
3576 case 4:
3577 cpu_x86_update_cr4(env, t0);
3578 break;
3579 case 8:
3580 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3581#ifndef VBOX
3582 cpu_set_apic_tpr(env->apic_state, t0);
3583#else /* VBOX */
3584 cpu_set_apic_tpr(env, t0);
3585#endif /* VBOX */
3586 }
3587 env->v_tpr = t0 & 0x0f;
3588 break;
3589 default:
3590 env->cr[reg] = t0;
3591 break;
3592 }
3593}
3594
3595void helper_movl_drN_T0(int reg, target_ulong t0)
3596{
3597 int i;
3598
3599 if (reg < 4) {
3600 hw_breakpoint_remove(env, reg);
3601 env->dr[reg] = t0;
3602 hw_breakpoint_insert(env, reg);
3603 } else if (reg == 7) {
3604 for (i = 0; i < 4; i++)
3605 hw_breakpoint_remove(env, i);
3606 env->dr[7] = t0;
3607 for (i = 0; i < 4; i++)
3608 hw_breakpoint_insert(env, i);
3609 } else
3610 env->dr[reg] = t0;
3611}
3612#endif
3613
3614void helper_lmsw(target_ulong t0)
3615{
3616 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3617 if already set to one. */
3618 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3619 helper_write_crN(0, t0);
3620}
3621
3622void helper_clts(void)
3623{
3624 env->cr[0] &= ~CR0_TS_MASK;
3625 env->hflags &= ~HF_TS_MASK;
3626}
3627
3628void helper_invlpg(target_ulong addr)
3629{
3630 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3631 tlb_flush_page(env, addr);
3632}
3633
3634void helper_rdtsc(void)
3635{
3636 uint64_t val;
3637
3638 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3639 raise_exception(EXCP0D_GPF);
3640 }
3641 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3642
3643 val = cpu_get_tsc(env) + env->tsc_offset;
3644 EAX = (uint32_t)(val);
3645 EDX = (uint32_t)(val >> 32);
3646}
3647
3648void helper_rdtscp(void)
3649{
3650 helper_rdtsc();
3651#ifndef VBOX
3652 ECX = (uint32_t)(env->tsc_aux);
3653#else /* VBOX */
3654 uint64_t val;
3655 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3656 ECX = (uint32_t)(val);
3657 else
3658 ECX = 0;
3659#endif /* VBOX */
3660}
3661
3662void helper_rdpmc(void)
3663{
3664#ifdef VBOX
3665 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3666 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3667 raise_exception(EXCP0D_GPF);
3668 }
3669 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3670 EAX = 0;
3671 EDX = 0;
3672#else /* !VBOX */
3673 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3674 raise_exception(EXCP0D_GPF);
3675 }
3676 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3677
3678 /* currently unimplemented */
3679 raise_exception_err(EXCP06_ILLOP, 0);
3680#endif /* !VBOX */
3681}
3682
3683#if defined(CONFIG_USER_ONLY)
3684void helper_wrmsr(void)
3685{
3686}
3687
3688void helper_rdmsr(void)
3689{
3690}
3691#else
3692void helper_wrmsr(void)
3693{
3694 uint64_t val;
3695
3696 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3697
3698 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3699
3700 switch((uint32_t)ECX) {
3701 case MSR_IA32_SYSENTER_CS:
3702 env->sysenter_cs = val & 0xffff;
3703 break;
3704 case MSR_IA32_SYSENTER_ESP:
3705 env->sysenter_esp = val;
3706 break;
3707 case MSR_IA32_SYSENTER_EIP:
3708 env->sysenter_eip = val;
3709 break;
3710 case MSR_IA32_APICBASE:
3711# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3712 cpu_set_apic_base(env->apic_state, val);
3713# endif
3714 break;
3715 case MSR_EFER:
3716 {
3717 uint64_t update_mask;
3718 update_mask = 0;
3719 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3720 update_mask |= MSR_EFER_SCE;
3721 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3722 update_mask |= MSR_EFER_LME;
3723 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3724 update_mask |= MSR_EFER_FFXSR;
3725 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3726 update_mask |= MSR_EFER_NXE;
3727 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3728 update_mask |= MSR_EFER_SVME;
3729 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3730 update_mask |= MSR_EFER_FFXSR;
3731 cpu_load_efer(env, (env->efer & ~update_mask) |
3732 (val & update_mask));
3733 }
3734 break;
3735 case MSR_STAR:
3736 env->star = val;
3737 break;
3738 case MSR_PAT:
3739 env->pat = val;
3740 break;
3741 case MSR_VM_HSAVE_PA:
3742 env->vm_hsave = val;
3743 break;
3744#ifdef TARGET_X86_64
3745 case MSR_LSTAR:
3746 env->lstar = val;
3747 break;
3748 case MSR_CSTAR:
3749 env->cstar = val;
3750 break;
3751 case MSR_FMASK:
3752 env->fmask = val;
3753 break;
3754 case MSR_FSBASE:
3755 env->segs[R_FS].base = val;
3756 break;
3757 case MSR_GSBASE:
3758 env->segs[R_GS].base = val;
3759 break;
3760 case MSR_KERNELGSBASE:
3761 env->kernelgsbase = val;
3762 break;
3763#endif
3764# ifndef VBOX
3765 case MSR_MTRRphysBase(0):
3766 case MSR_MTRRphysBase(1):
3767 case MSR_MTRRphysBase(2):
3768 case MSR_MTRRphysBase(3):
3769 case MSR_MTRRphysBase(4):
3770 case MSR_MTRRphysBase(5):
3771 case MSR_MTRRphysBase(6):
3772 case MSR_MTRRphysBase(7):
3773 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3774 break;
3775 case MSR_MTRRphysMask(0):
3776 case MSR_MTRRphysMask(1):
3777 case MSR_MTRRphysMask(2):
3778 case MSR_MTRRphysMask(3):
3779 case MSR_MTRRphysMask(4):
3780 case MSR_MTRRphysMask(5):
3781 case MSR_MTRRphysMask(6):
3782 case MSR_MTRRphysMask(7):
3783 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3784 break;
3785 case MSR_MTRRfix64K_00000:
3786 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3787 break;
3788 case MSR_MTRRfix16K_80000:
3789 case MSR_MTRRfix16K_A0000:
3790 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3791 break;
3792 case MSR_MTRRfix4K_C0000:
3793 case MSR_MTRRfix4K_C8000:
3794 case MSR_MTRRfix4K_D0000:
3795 case MSR_MTRRfix4K_D8000:
3796 case MSR_MTRRfix4K_E0000:
3797 case MSR_MTRRfix4K_E8000:
3798 case MSR_MTRRfix4K_F0000:
3799 case MSR_MTRRfix4K_F8000:
3800 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3801 break;
3802 case MSR_MTRRdefType:
3803 env->mtrr_deftype = val;
3804 break;
3805 case MSR_MCG_STATUS:
3806 env->mcg_status = val;
3807 break;
3808 case MSR_MCG_CTL:
3809 if ((env->mcg_cap & MCG_CTL_P)
3810 && (val == 0 || val == ~(uint64_t)0))
3811 env->mcg_ctl = val;
3812 break;
3813 case MSR_TSC_AUX:
3814 env->tsc_aux = val;
3815 break;
3816# endif /* !VBOX */
3817 default:
3818# ifndef VBOX
3819 if ((uint32_t)ECX >= MSR_MC0_CTL
3820 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3821 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3822 if ((offset & 0x3) != 0
3823 || (val == 0 || val == ~(uint64_t)0))
3824 env->mce_banks[offset] = val;
3825 break;
3826 }
3827 /* XXX: exception ? */
3828# endif
3829 break;
3830 }
3831
3832# ifdef VBOX
3833 /* call CPUM. */
3834 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3835 {
3836 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3837 }
3838# endif
3839}
3840
3841void helper_rdmsr(void)
3842{
3843 uint64_t val;
3844
3845 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3846
3847 switch((uint32_t)ECX) {
3848 case MSR_IA32_SYSENTER_CS:
3849 val = env->sysenter_cs;
3850 break;
3851 case MSR_IA32_SYSENTER_ESP:
3852 val = env->sysenter_esp;
3853 break;
3854 case MSR_IA32_SYSENTER_EIP:
3855 val = env->sysenter_eip;
3856 break;
3857 case MSR_IA32_APICBASE:
3858#ifndef VBOX
3859 val = cpu_get_apic_base(env->apic_state);
3860#else /* VBOX */
3861 val = cpu_get_apic_base(env);
3862#endif /* VBOX */
3863 break;
3864 case MSR_EFER:
3865 val = env->efer;
3866 break;
3867 case MSR_STAR:
3868 val = env->star;
3869 break;
3870 case MSR_PAT:
3871 val = env->pat;
3872 break;
3873 case MSR_VM_HSAVE_PA:
3874 val = env->vm_hsave;
3875 break;
3876# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3877 case MSR_IA32_PERF_STATUS:
3878 /* tsc_increment_by_tick */
3879 val = 1000ULL;
3880 /* CPU multiplier */
3881 val |= (((uint64_t)4ULL) << 40);
3882 break;
3883# endif /* !VBOX */
3884#ifdef TARGET_X86_64
3885 case MSR_LSTAR:
3886 val = env->lstar;
3887 break;
3888 case MSR_CSTAR:
3889 val = env->cstar;
3890 break;
3891 case MSR_FMASK:
3892 val = env->fmask;
3893 break;
3894 case MSR_FSBASE:
3895 val = env->segs[R_FS].base;
3896 break;
3897 case MSR_GSBASE:
3898 val = env->segs[R_GS].base;
3899 break;
3900 case MSR_KERNELGSBASE:
3901 val = env->kernelgsbase;
3902 break;
3903# ifndef VBOX
3904 case MSR_TSC_AUX:
3905 val = env->tsc_aux;
3906 break;
3907# endif /*!VBOX*/
3908#endif
3909# ifndef VBOX
3910 case MSR_MTRRphysBase(0):
3911 case MSR_MTRRphysBase(1):
3912 case MSR_MTRRphysBase(2):
3913 case MSR_MTRRphysBase(3):
3914 case MSR_MTRRphysBase(4):
3915 case MSR_MTRRphysBase(5):
3916 case MSR_MTRRphysBase(6):
3917 case MSR_MTRRphysBase(7):
3918 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3919 break;
3920 case MSR_MTRRphysMask(0):
3921 case MSR_MTRRphysMask(1):
3922 case MSR_MTRRphysMask(2):
3923 case MSR_MTRRphysMask(3):
3924 case MSR_MTRRphysMask(4):
3925 case MSR_MTRRphysMask(5):
3926 case MSR_MTRRphysMask(6):
3927 case MSR_MTRRphysMask(7):
3928 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3929 break;
3930 case MSR_MTRRfix64K_00000:
3931 val = env->mtrr_fixed[0];
3932 break;
3933 case MSR_MTRRfix16K_80000:
3934 case MSR_MTRRfix16K_A0000:
3935 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3936 break;
3937 case MSR_MTRRfix4K_C0000:
3938 case MSR_MTRRfix4K_C8000:
3939 case MSR_MTRRfix4K_D0000:
3940 case MSR_MTRRfix4K_D8000:
3941 case MSR_MTRRfix4K_E0000:
3942 case MSR_MTRRfix4K_E8000:
3943 case MSR_MTRRfix4K_F0000:
3944 case MSR_MTRRfix4K_F8000:
3945 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3946 break;
3947 case MSR_MTRRdefType:
3948 val = env->mtrr_deftype;
3949 break;
3950 case MSR_MTRRcap:
3951 if (env->cpuid_features & CPUID_MTRR)
3952 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3953 else
3954 /* XXX: exception ? */
3955 val = 0;
3956 break;
3957 case MSR_MCG_CAP:
3958 val = env->mcg_cap;
3959 break;
3960 case MSR_MCG_CTL:
3961 if (env->mcg_cap & MCG_CTL_P)
3962 val = env->mcg_ctl;
3963 else
3964 val = 0;
3965 break;
3966 case MSR_MCG_STATUS:
3967 val = env->mcg_status;
3968 break;
3969# endif /* !VBOX */
3970 default:
3971# ifndef VBOX
3972 if ((uint32_t)ECX >= MSR_MC0_CTL
3973 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3974 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3975 val = env->mce_banks[offset];
3976 break;
3977 }
3978 /* XXX: exception ? */
3979 val = 0;
3980# else /* VBOX */
3981 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3982 {
3983 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3984 val = 0;
3985 }
3986# endif /* VBOX */
3987 break;
3988 }
3989 EAX = (uint32_t)(val);
3990 EDX = (uint32_t)(val >> 32);
3991
3992# ifdef VBOX_STRICT
3993 if ((uint32_t)ECX != MSR_IA32_TSC) {
3994 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3995 val = 0;
3996 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3997 }
3998# endif
3999}
4000#endif
4001
4002target_ulong helper_lsl(target_ulong selector1)
4003{
4004 unsigned int limit;
4005 uint32_t e1, e2, eflags, selector;
4006 int rpl, dpl, cpl, type;
4007
4008 selector = selector1 & 0xffff;
4009 eflags = helper_cc_compute_all(CC_OP);
4010 if ((selector & 0xfffc) == 0)
4011 goto fail;
4012 if (load_segment(&e1, &e2, selector) != 0)
4013 goto fail;
4014 rpl = selector & 3;
4015 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4016 cpl = env->hflags & HF_CPL_MASK;
4017 if (e2 & DESC_S_MASK) {
4018 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4019 /* conforming */
4020 } else {
4021 if (dpl < cpl || dpl < rpl)
4022 goto fail;
4023 }
4024 } else {
4025 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4026 switch(type) {
4027 case 1:
4028 case 2:
4029 case 3:
4030 case 9:
4031 case 11:
4032 break;
4033 default:
4034 goto fail;
4035 }
4036 if (dpl < cpl || dpl < rpl) {
4037 fail:
4038 CC_SRC = eflags & ~CC_Z;
4039 return 0;
4040 }
4041 }
4042 limit = get_seg_limit(e1, e2);
4043 CC_SRC = eflags | CC_Z;
4044 return limit;
4045}
4046
4047target_ulong helper_lar(target_ulong selector1)
4048{
4049 uint32_t e1, e2, eflags, selector;
4050 int rpl, dpl, cpl, type;
4051
4052 selector = selector1 & 0xffff;
4053 eflags = helper_cc_compute_all(CC_OP);
4054 if ((selector & 0xfffc) == 0)
4055 goto fail;
4056 if (load_segment(&e1, &e2, selector) != 0)
4057 goto fail;
4058 rpl = selector & 3;
4059 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4060 cpl = env->hflags & HF_CPL_MASK;
4061 if (e2 & DESC_S_MASK) {
4062 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4063 /* conforming */
4064 } else {
4065 if (dpl < cpl || dpl < rpl)
4066 goto fail;
4067 }
4068 } else {
4069 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4070 switch(type) {
4071 case 1:
4072 case 2:
4073 case 3:
4074 case 4:
4075 case 5:
4076 case 9:
4077 case 11:
4078 case 12:
4079 break;
4080 default:
4081 goto fail;
4082 }
4083 if (dpl < cpl || dpl < rpl) {
4084 fail:
4085 CC_SRC = eflags & ~CC_Z;
4086 return 0;
4087 }
4088 }
4089 CC_SRC = eflags | CC_Z;
4090 return e2 & 0x00f0ff00;
4091}
4092
4093void helper_verr(target_ulong selector1)
4094{
4095 uint32_t e1, e2, eflags, selector;
4096 int rpl, dpl, cpl;
4097
4098 selector = selector1 & 0xffff;
4099 eflags = helper_cc_compute_all(CC_OP);
4100 if ((selector & 0xfffc) == 0)
4101 goto fail;
4102 if (load_segment(&e1, &e2, selector) != 0)
4103 goto fail;
4104 if (!(e2 & DESC_S_MASK))
4105 goto fail;
4106 rpl = selector & 3;
4107 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4108 cpl = env->hflags & HF_CPL_MASK;
4109 if (e2 & DESC_CS_MASK) {
4110 if (!(e2 & DESC_R_MASK))
4111 goto fail;
4112 if (!(e2 & DESC_C_MASK)) {
4113 if (dpl < cpl || dpl < rpl)
4114 goto fail;
4115 }
4116 } else {
4117 if (dpl < cpl || dpl < rpl) {
4118 fail:
4119 CC_SRC = eflags & ~CC_Z;
4120 return;
4121 }
4122 }
4123 CC_SRC = eflags | CC_Z;
4124}
4125
4126void helper_verw(target_ulong selector1)
4127{
4128 uint32_t e1, e2, eflags, selector;
4129 int rpl, dpl, cpl;
4130
4131 selector = selector1 & 0xffff;
4132 eflags = helper_cc_compute_all(CC_OP);
4133 if ((selector & 0xfffc) == 0)
4134 goto fail;
4135 if (load_segment(&e1, &e2, selector) != 0)
4136 goto fail;
4137 if (!(e2 & DESC_S_MASK))
4138 goto fail;
4139 rpl = selector & 3;
4140 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4141 cpl = env->hflags & HF_CPL_MASK;
4142 if (e2 & DESC_CS_MASK) {
4143 goto fail;
4144 } else {
4145 if (dpl < cpl || dpl < rpl)
4146 goto fail;
4147 if (!(e2 & DESC_W_MASK)) {
4148 fail:
4149 CC_SRC = eflags & ~CC_Z;
4150 return;
4151 }
4152 }
4153 CC_SRC = eflags | CC_Z;
4154}
4155
4156/* x87 FPU helpers */
4157
4158static void fpu_set_exception(int mask)
4159{
4160 env->fpus |= mask;
4161 if (env->fpus & (~env->fpuc & FPUC_EM))
4162 env->fpus |= FPUS_SE | FPUS_B;
4163}
4164
4165static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4166{
4167 if (b == 0.0)
4168 fpu_set_exception(FPUS_ZE);
4169 return a / b;
4170}
4171
4172static void fpu_raise_exception(void)
4173{
4174 if (env->cr[0] & CR0_NE_MASK) {
4175 raise_exception(EXCP10_COPR);
4176 }
4177#if !defined(CONFIG_USER_ONLY)
4178 else {
4179 cpu_set_ferr(env);
4180 }
4181#endif
4182}
4183
4184void helper_flds_FT0(uint32_t val)
4185{
4186 union {
4187 float32 f;
4188 uint32_t i;
4189 } u;
4190 u.i = val;
4191 FT0 = float32_to_floatx(u.f, &env->fp_status);
4192}
4193
4194void helper_fldl_FT0(uint64_t val)
4195{
4196 union {
4197 float64 f;
4198 uint64_t i;
4199 } u;
4200 u.i = val;
4201 FT0 = float64_to_floatx(u.f, &env->fp_status);
4202}
4203
4204void helper_fildl_FT0(int32_t val)
4205{
4206 FT0 = int32_to_floatx(val, &env->fp_status);
4207}
4208
4209void helper_flds_ST0(uint32_t val)
4210{
4211 int new_fpstt;
4212 union {
4213 float32 f;
4214 uint32_t i;
4215 } u;
4216 new_fpstt = (env->fpstt - 1) & 7;
4217 u.i = val;
4218 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4219 env->fpstt = new_fpstt;
4220 env->fptags[new_fpstt] = 0; /* validate stack entry */
4221}
4222
4223void helper_fldl_ST0(uint64_t val)
4224{
4225 int new_fpstt;
4226 union {
4227 float64 f;
4228 uint64_t i;
4229 } u;
4230 new_fpstt = (env->fpstt - 1) & 7;
4231 u.i = val;
4232 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4233 env->fpstt = new_fpstt;
4234 env->fptags[new_fpstt] = 0; /* validate stack entry */
4235}
4236
4237void helper_fildl_ST0(int32_t val)
4238{
4239 int new_fpstt;
4240 new_fpstt = (env->fpstt - 1) & 7;
4241 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4242 env->fpstt = new_fpstt;
4243 env->fptags[new_fpstt] = 0; /* validate stack entry */
4244}
4245
4246void helper_fildll_ST0(int64_t val)
4247{
4248 int new_fpstt;
4249 new_fpstt = (env->fpstt - 1) & 7;
4250 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4251 env->fpstt = new_fpstt;
4252 env->fptags[new_fpstt] = 0; /* validate stack entry */
4253}
4254
4255#ifndef VBOX
4256uint32_t helper_fsts_ST0(void)
4257#else
4258RTCCUINTREG helper_fsts_ST0(void)
4259#endif
4260{
4261 union {
4262 float32 f;
4263 uint32_t i;
4264 } u;
4265 u.f = floatx_to_float32(ST0, &env->fp_status);
4266 return u.i;
4267}
4268
4269uint64_t helper_fstl_ST0(void)
4270{
4271 union {
4272 float64 f;
4273 uint64_t i;
4274 } u;
4275 u.f = floatx_to_float64(ST0, &env->fp_status);
4276 return u.i;
4277}
4278
4279#ifndef VBOX
4280int32_t helper_fist_ST0(void)
4281#else
4282RTCCINTREG helper_fist_ST0(void)
4283#endif
4284{
4285 int32_t val;
4286 val = floatx_to_int32(ST0, &env->fp_status);
4287 if (val != (int16_t)val)
4288 val = -32768;
4289 return val;
4290}
4291
4292#ifndef VBOX
4293int32_t helper_fistl_ST0(void)
4294#else
4295RTCCINTREG helper_fistl_ST0(void)
4296#endif
4297{
4298 int32_t val;
4299 val = floatx_to_int32(ST0, &env->fp_status);
4300 return val;
4301}
4302
4303int64_t helper_fistll_ST0(void)
4304{
4305 int64_t val;
4306 val = floatx_to_int64(ST0, &env->fp_status);
4307 return val;
4308}
4309
4310#ifndef VBOX
4311int32_t helper_fistt_ST0(void)
4312#else
4313RTCCINTREG helper_fistt_ST0(void)
4314#endif
4315{
4316 int32_t val;
4317 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4318 if (val != (int16_t)val)
4319 val = -32768;
4320 return val;
4321}
4322
4323#ifndef VBOX
4324int32_t helper_fisttl_ST0(void)
4325#else
4326RTCCINTREG helper_fisttl_ST0(void)
4327#endif
4328{
4329 int32_t val;
4330 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4331 return val;
4332}
4333
4334int64_t helper_fisttll_ST0(void)
4335{
4336 int64_t val;
4337 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4338 return val;
4339}
4340
4341void helper_fldt_ST0(target_ulong ptr)
4342{
4343 int new_fpstt;
4344 new_fpstt = (env->fpstt - 1) & 7;
4345 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4346 env->fpstt = new_fpstt;
4347 env->fptags[new_fpstt] = 0; /* validate stack entry */
4348}
4349
4350void helper_fstt_ST0(target_ulong ptr)
4351{
4352 helper_fstt(ST0, ptr);
4353}
4354
4355void helper_fpush(void)
4356{
4357 fpush();
4358}
4359
4360void helper_fpop(void)
4361{
4362 fpop();
4363}
4364
4365void helper_fdecstp(void)
4366{
4367 env->fpstt = (env->fpstt - 1) & 7;
4368 env->fpus &= (~0x4700);
4369}
4370
4371void helper_fincstp(void)
4372{
4373 env->fpstt = (env->fpstt + 1) & 7;
4374 env->fpus &= (~0x4700);
4375}
4376
4377/* FPU move */
4378
4379void helper_ffree_STN(int st_index)
4380{
4381 env->fptags[(env->fpstt + st_index) & 7] = 1;
4382}
4383
4384void helper_fmov_ST0_FT0(void)
4385{
4386 ST0 = FT0;
4387}
4388
4389void helper_fmov_FT0_STN(int st_index)
4390{
4391 FT0 = ST(st_index);
4392}
4393
4394void helper_fmov_ST0_STN(int st_index)
4395{
4396 ST0 = ST(st_index);
4397}
4398
4399void helper_fmov_STN_ST0(int st_index)
4400{
4401 ST(st_index) = ST0;
4402}
4403
4404void helper_fxchg_ST0_STN(int st_index)
4405{
4406 CPU86_LDouble tmp;
4407 tmp = ST(st_index);
4408 ST(st_index) = ST0;
4409 ST0 = tmp;
4410}
4411
4412/* FPU operations */
4413
4414static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4415
4416void helper_fcom_ST0_FT0(void)
4417{
4418 int ret;
4419
4420 ret = floatx_compare(ST0, FT0, &env->fp_status);
4421 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4422}
4423
4424void helper_fucom_ST0_FT0(void)
4425{
4426 int ret;
4427
4428 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4429 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4430}
4431
4432static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4433
4434void helper_fcomi_ST0_FT0(void)
4435{
4436 int eflags;
4437 int ret;
4438
4439 ret = floatx_compare(ST0, FT0, &env->fp_status);
4440 eflags = helper_cc_compute_all(CC_OP);
4441 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4442 CC_SRC = eflags;
4443}
4444
4445void helper_fucomi_ST0_FT0(void)
4446{
4447 int eflags;
4448 int ret;
4449
4450 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4451 eflags = helper_cc_compute_all(CC_OP);
4452 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4453 CC_SRC = eflags;
4454}
4455
4456void helper_fadd_ST0_FT0(void)
4457{
4458 ST0 += FT0;
4459}
4460
4461void helper_fmul_ST0_FT0(void)
4462{
4463 ST0 *= FT0;
4464}
4465
4466void helper_fsub_ST0_FT0(void)
4467{
4468 ST0 -= FT0;
4469}
4470
4471void helper_fsubr_ST0_FT0(void)
4472{
4473 ST0 = FT0 - ST0;
4474}
4475
4476void helper_fdiv_ST0_FT0(void)
4477{
4478 ST0 = helper_fdiv(ST0, FT0);
4479}
4480
4481void helper_fdivr_ST0_FT0(void)
4482{
4483 ST0 = helper_fdiv(FT0, ST0);
4484}
4485
4486/* fp operations between STN and ST0 */
4487
4488void helper_fadd_STN_ST0(int st_index)
4489{
4490 ST(st_index) += ST0;
4491}
4492
4493void helper_fmul_STN_ST0(int st_index)
4494{
4495 ST(st_index) *= ST0;
4496}
4497
4498void helper_fsub_STN_ST0(int st_index)
4499{
4500 ST(st_index) -= ST0;
4501}
4502
4503void helper_fsubr_STN_ST0(int st_index)
4504{
4505 CPU86_LDouble *p;
4506 p = &ST(st_index);
4507 *p = ST0 - *p;
4508}
4509
4510void helper_fdiv_STN_ST0(int st_index)
4511{
4512 CPU86_LDouble *p;
4513 p = &ST(st_index);
4514 *p = helper_fdiv(*p, ST0);
4515}
4516
4517void helper_fdivr_STN_ST0(int st_index)
4518{
4519 CPU86_LDouble *p;
4520 p = &ST(st_index);
4521 *p = helper_fdiv(ST0, *p);
4522}
4523
4524/* misc FPU operations */
4525void helper_fchs_ST0(void)
4526{
4527 ST0 = floatx_chs(ST0);
4528}
4529
4530void helper_fabs_ST0(void)
4531{
4532 ST0 = floatx_abs(ST0);
4533}
4534
4535void helper_fld1_ST0(void)
4536{
4537 ST0 = f15rk[1];
4538}
4539
4540void helper_fldl2t_ST0(void)
4541{
4542 ST0 = f15rk[6];
4543}
4544
4545void helper_fldl2e_ST0(void)
4546{
4547 ST0 = f15rk[5];
4548}
4549
4550void helper_fldpi_ST0(void)
4551{
4552 ST0 = f15rk[2];
4553}
4554
4555void helper_fldlg2_ST0(void)
4556{
4557 ST0 = f15rk[3];
4558}
4559
4560void helper_fldln2_ST0(void)
4561{
4562 ST0 = f15rk[4];
4563}
4564
4565void helper_fldz_ST0(void)
4566{
4567 ST0 = f15rk[0];
4568}
4569
4570void helper_fldz_FT0(void)
4571{
4572 FT0 = f15rk[0];
4573}
4574
4575#ifndef VBOX
4576uint32_t helper_fnstsw(void)
4577#else
4578RTCCUINTREG helper_fnstsw(void)
4579#endif
4580{
4581 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4582}
4583
4584#ifndef VBOX
4585uint32_t helper_fnstcw(void)
4586#else
4587RTCCUINTREG helper_fnstcw(void)
4588#endif
4589{
4590 return env->fpuc;
4591}
4592
4593static void update_fp_status(void)
4594{
4595 int rnd_type;
4596
4597 /* set rounding mode */
4598 switch(env->fpuc & RC_MASK) {
4599 default:
4600 case RC_NEAR:
4601 rnd_type = float_round_nearest_even;
4602 break;
4603 case RC_DOWN:
4604 rnd_type = float_round_down;
4605 break;
4606 case RC_UP:
4607 rnd_type = float_round_up;
4608 break;
4609 case RC_CHOP:
4610 rnd_type = float_round_to_zero;
4611 break;
4612 }
4613 set_float_rounding_mode(rnd_type, &env->fp_status);
4614#ifdef FLOATX80
4615 switch((env->fpuc >> 8) & 3) {
4616 case 0:
4617 rnd_type = 32;
4618 break;
4619 case 2:
4620 rnd_type = 64;
4621 break;
4622 case 3:
4623 default:
4624 rnd_type = 80;
4625 break;
4626 }
4627 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4628#endif
4629}
4630
4631void helper_fldcw(uint32_t val)
4632{
4633 env->fpuc = val;
4634 update_fp_status();
4635}
4636
4637void helper_fclex(void)
4638{
4639 env->fpus &= 0x7f00;
4640}
4641
4642void helper_fwait(void)
4643{
4644 if (env->fpus & FPUS_SE)
4645 fpu_raise_exception();
4646}
4647
4648void helper_fninit(void)
4649{
4650 env->fpus = 0;
4651 env->fpstt = 0;
4652 env->fpuc = 0x37f;
4653 env->fptags[0] = 1;
4654 env->fptags[1] = 1;
4655 env->fptags[2] = 1;
4656 env->fptags[3] = 1;
4657 env->fptags[4] = 1;
4658 env->fptags[5] = 1;
4659 env->fptags[6] = 1;
4660 env->fptags[7] = 1;
4661}
4662
4663/* BCD ops */
4664
4665void helper_fbld_ST0(target_ulong ptr)
4666{
4667 CPU86_LDouble tmp;
4668 uint64_t val;
4669 unsigned int v;
4670 int i;
4671
4672 val = 0;
4673 for(i = 8; i >= 0; i--) {
4674 v = ldub(ptr + i);
4675 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4676 }
4677 tmp = val;
4678 if (ldub(ptr + 9) & 0x80)
4679 tmp = -tmp;
4680 fpush();
4681 ST0 = tmp;
4682}
4683
4684void helper_fbst_ST0(target_ulong ptr)
4685{
4686 int v;
4687 target_ulong mem_ref, mem_end;
4688 int64_t val;
4689
4690 val = floatx_to_int64(ST0, &env->fp_status);
4691 mem_ref = ptr;
4692 mem_end = mem_ref + 9;
4693 if (val < 0) {
4694 stb(mem_end, 0x80);
4695 val = -val;
4696 } else {
4697 stb(mem_end, 0x00);
4698 }
4699 while (mem_ref < mem_end) {
4700 if (val == 0)
4701 break;
4702 v = val % 100;
4703 val = val / 100;
4704 v = ((v / 10) << 4) | (v % 10);
4705 stb(mem_ref++, v);
4706 }
4707 while (mem_ref < mem_end) {
4708 stb(mem_ref++, 0);
4709 }
4710}
4711
4712void helper_f2xm1(void)
4713{
4714 ST0 = pow(2.0,ST0) - 1.0;
4715}
4716
4717void helper_fyl2x(void)
4718{
4719 CPU86_LDouble fptemp;
4720
4721 fptemp = ST0;
4722 if (fptemp>0.0){
4723 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4724 ST1 *= fptemp;
4725 fpop();
4726 } else {
4727 env->fpus &= (~0x4700);
4728 env->fpus |= 0x400;
4729 }
4730}
4731
4732void helper_fptan(void)
4733{
4734 CPU86_LDouble fptemp;
4735
4736 fptemp = ST0;
4737 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4738 env->fpus |= 0x400;
4739 } else {
4740 ST0 = tan(fptemp);
4741 fpush();
4742 ST0 = 1.0;
4743 env->fpus &= (~0x400); /* C2 <-- 0 */
4744 /* the above code is for |arg| < 2**52 only */
4745 }
4746}
4747
4748void helper_fpatan(void)
4749{
4750 CPU86_LDouble fptemp, fpsrcop;
4751
4752 fpsrcop = ST1;
4753 fptemp = ST0;
4754 ST1 = atan2(fpsrcop,fptemp);
4755 fpop();
4756}
4757
4758void helper_fxtract(void)
4759{
4760 CPU86_LDoubleU temp;
4761 unsigned int expdif;
4762
4763 temp.d = ST0;
4764 expdif = EXPD(temp) - EXPBIAS;
4765 /*DP exponent bias*/
4766 ST0 = expdif;
4767 fpush();
4768 BIASEXPONENT(temp);
4769 ST0 = temp.d;
4770}
4771
4772void helper_fprem1(void)
4773{
4774 CPU86_LDouble dblq, fpsrcop, fptemp;
4775 CPU86_LDoubleU fpsrcop1, fptemp1;
4776 int expdif;
4777 signed long long int q;
4778
4779#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4780 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4781#else
4782 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4783#endif
4784 ST0 = 0.0 / 0.0; /* NaN */
4785 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4786 return;
4787 }
4788
4789 fpsrcop = ST0;
4790 fptemp = ST1;
4791 fpsrcop1.d = fpsrcop;
4792 fptemp1.d = fptemp;
4793 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4794
4795 if (expdif < 0) {
4796 /* optimisation? taken from the AMD docs */
4797 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4798 /* ST0 is unchanged */
4799 return;
4800 }
4801
4802 if (expdif < 53) {
4803 dblq = fpsrcop / fptemp;
4804 /* round dblq towards nearest integer */
4805 dblq = rint(dblq);
4806 ST0 = fpsrcop - fptemp * dblq;
4807
4808 /* convert dblq to q by truncating towards zero */
4809 if (dblq < 0.0)
4810 q = (signed long long int)(-dblq);
4811 else
4812 q = (signed long long int)dblq;
4813
4814 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4815 /* (C0,C3,C1) <-- (q2,q1,q0) */
4816 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4817 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4818 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4819 } else {
4820 env->fpus |= 0x400; /* C2 <-- 1 */
4821 fptemp = pow(2.0, expdif - 50);
4822 fpsrcop = (ST0 / ST1) / fptemp;
4823 /* fpsrcop = integer obtained by chopping */
4824 fpsrcop = (fpsrcop < 0.0) ?
4825 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4826 ST0 -= (ST1 * fpsrcop * fptemp);
4827 }
4828}
4829
4830void helper_fprem(void)
4831{
4832 CPU86_LDouble dblq, fpsrcop, fptemp;
4833 CPU86_LDoubleU fpsrcop1, fptemp1;
4834 int expdif;
4835 signed long long int q;
4836
4837#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4838 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4839#else
4840 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4841#endif
4842 ST0 = 0.0 / 0.0; /* NaN */
4843 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4844 return;
4845 }
4846
4847 fpsrcop = (CPU86_LDouble)ST0;
4848 fptemp = (CPU86_LDouble)ST1;
4849 fpsrcop1.d = fpsrcop;
4850 fptemp1.d = fptemp;
4851 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4852
4853 if (expdif < 0) {
4854 /* optimisation? taken from the AMD docs */
4855 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4856 /* ST0 is unchanged */
4857 return;
4858 }
4859
4860 if ( expdif < 53 ) {
4861 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4862 /* round dblq towards zero */
4863 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4864 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4865
4866 /* convert dblq to q by truncating towards zero */
4867 if (dblq < 0.0)
4868 q = (signed long long int)(-dblq);
4869 else
4870 q = (signed long long int)dblq;
4871
4872 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4873 /* (C0,C3,C1) <-- (q2,q1,q0) */
4874 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4875 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4876 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4877 } else {
4878 int N = 32 + (expdif % 32); /* as per AMD docs */
4879 env->fpus |= 0x400; /* C2 <-- 1 */
4880 fptemp = pow(2.0, (double)(expdif - N));
4881 fpsrcop = (ST0 / ST1) / fptemp;
4882 /* fpsrcop = integer obtained by chopping */
4883 fpsrcop = (fpsrcop < 0.0) ?
4884 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4885 ST0 -= (ST1 * fpsrcop * fptemp);
4886 }
4887}
4888
4889void helper_fyl2xp1(void)
4890{
4891 CPU86_LDouble fptemp;
4892
4893 fptemp = ST0;
4894 if ((fptemp+1.0)>0.0) {
4895 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4896 ST1 *= fptemp;
4897 fpop();
4898 } else {
4899 env->fpus &= (~0x4700);
4900 env->fpus |= 0x400;
4901 }
4902}
4903
4904void helper_fsqrt(void)
4905{
4906 CPU86_LDouble fptemp;
4907
4908 fptemp = ST0;
4909 if (fptemp<0.0) {
4910 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4911 env->fpus |= 0x400;
4912 }
4913 ST0 = sqrt(fptemp);
4914}
4915
4916void helper_fsincos(void)
4917{
4918 CPU86_LDouble fptemp;
4919
4920 fptemp = ST0;
4921 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4922 env->fpus |= 0x400;
4923 } else {
4924 ST0 = sin(fptemp);
4925 fpush();
4926 ST0 = cos(fptemp);
4927 env->fpus &= (~0x400); /* C2 <-- 0 */
4928 /* the above code is for |arg| < 2**63 only */
4929 }
4930}
4931
4932void helper_frndint(void)
4933{
4934 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4935}
4936
4937void helper_fscale(void)
4938{
4939 ST0 = ldexp (ST0, (int)(ST1));
4940}
4941
4942void helper_fsin(void)
4943{
4944 CPU86_LDouble fptemp;
4945
4946 fptemp = ST0;
4947 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4948 env->fpus |= 0x400;
4949 } else {
4950 ST0 = sin(fptemp);
4951 env->fpus &= (~0x400); /* C2 <-- 0 */
4952 /* the above code is for |arg| < 2**53 only */
4953 }
4954}
4955
4956void helper_fcos(void)
4957{
4958 CPU86_LDouble fptemp;
4959
4960 fptemp = ST0;
4961 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4962 env->fpus |= 0x400;
4963 } else {
4964 ST0 = cos(fptemp);
4965 env->fpus &= (~0x400); /* C2 <-- 0 */
4966 /* the above code is for |arg5 < 2**63 only */
4967 }
4968}
4969
4970void helper_fxam_ST0(void)
4971{
4972 CPU86_LDoubleU temp;
4973 int expdif;
4974
4975 temp.d = ST0;
4976
4977 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4978 if (SIGND(temp))
4979 env->fpus |= 0x200; /* C1 <-- 1 */
4980
4981 /* XXX: test fptags too */
4982 expdif = EXPD(temp);
4983 if (expdif == MAXEXPD) {
4984#ifdef USE_X86LDOUBLE
4985 if (MANTD(temp) == 0x8000000000000000ULL)
4986#else
4987 if (MANTD(temp) == 0)
4988#endif
4989 env->fpus |= 0x500 /*Infinity*/;
4990 else
4991 env->fpus |= 0x100 /*NaN*/;
4992 } else if (expdif == 0) {
4993 if (MANTD(temp) == 0)
4994 env->fpus |= 0x4000 /*Zero*/;
4995 else
4996 env->fpus |= 0x4400 /*Denormal*/;
4997 } else {
4998 env->fpus |= 0x400;
4999 }
5000}
5001
5002void helper_fstenv(target_ulong ptr, int data32)
5003{
5004 int fpus, fptag, exp, i;
5005 uint64_t mant;
5006 CPU86_LDoubleU tmp;
5007
5008 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5009 fptag = 0;
5010 for (i=7; i>=0; i--) {
5011 fptag <<= 2;
5012 if (env->fptags[i]) {
5013 fptag |= 3;
5014 } else {
5015 tmp.d = env->fpregs[i].d;
5016 exp = EXPD(tmp);
5017 mant = MANTD(tmp);
5018 if (exp == 0 && mant == 0) {
5019 /* zero */
5020 fptag |= 1;
5021 } else if (exp == 0 || exp == MAXEXPD
5022#ifdef USE_X86LDOUBLE
5023 || (mant & (1LL << 63)) == 0
5024#endif
5025 ) {
5026 /* NaNs, infinity, denormal */
5027 fptag |= 2;
5028 }
5029 }
5030 }
5031 if (data32) {
5032 /* 32 bit */
5033 stl(ptr, env->fpuc);
5034 stl(ptr + 4, fpus);
5035 stl(ptr + 8, fptag);
5036 stl(ptr + 12, 0); /* fpip */
5037 stl(ptr + 16, 0); /* fpcs */
5038 stl(ptr + 20, 0); /* fpoo */
5039 stl(ptr + 24, 0); /* fpos */
5040 } else {
5041 /* 16 bit */
5042 stw(ptr, env->fpuc);
5043 stw(ptr + 2, fpus);
5044 stw(ptr + 4, fptag);
5045 stw(ptr + 6, 0);
5046 stw(ptr + 8, 0);
5047 stw(ptr + 10, 0);
5048 stw(ptr + 12, 0);
5049 }
5050}
5051
5052void helper_fldenv(target_ulong ptr, int data32)
5053{
5054 int i, fpus, fptag;
5055
5056 if (data32) {
5057 env->fpuc = lduw(ptr);
5058 fpus = lduw(ptr + 4);
5059 fptag = lduw(ptr + 8);
5060 }
5061 else {
5062 env->fpuc = lduw(ptr);
5063 fpus = lduw(ptr + 2);
5064 fptag = lduw(ptr + 4);
5065 }
5066 env->fpstt = (fpus >> 11) & 7;
5067 env->fpus = fpus & ~0x3800;
5068 for(i = 0;i < 8; i++) {
5069 env->fptags[i] = ((fptag & 3) == 3);
5070 fptag >>= 2;
5071 }
5072}
5073
5074void helper_fsave(target_ulong ptr, int data32)
5075{
5076 CPU86_LDouble tmp;
5077 int i;
5078
5079 helper_fstenv(ptr, data32);
5080
5081 ptr += (14 << data32);
5082 for(i = 0;i < 8; i++) {
5083 tmp = ST(i);
5084 helper_fstt(tmp, ptr);
5085 ptr += 10;
5086 }
5087
5088 /* fninit */
5089 env->fpus = 0;
5090 env->fpstt = 0;
5091 env->fpuc = 0x37f;
5092 env->fptags[0] = 1;
5093 env->fptags[1] = 1;
5094 env->fptags[2] = 1;
5095 env->fptags[3] = 1;
5096 env->fptags[4] = 1;
5097 env->fptags[5] = 1;
5098 env->fptags[6] = 1;
5099 env->fptags[7] = 1;
5100}
5101
5102void helper_frstor(target_ulong ptr, int data32)
5103{
5104 CPU86_LDouble tmp;
5105 int i;
5106
5107 helper_fldenv(ptr, data32);
5108 ptr += (14 << data32);
5109
5110 for(i = 0;i < 8; i++) {
5111 tmp = helper_fldt(ptr);
5112 ST(i) = tmp;
5113 ptr += 10;
5114 }
5115}
5116
5117void helper_fxsave(target_ulong ptr, int data64)
5118{
5119 int fpus, fptag, i, nb_xmm_regs;
5120 CPU86_LDouble tmp;
5121 target_ulong addr;
5122
5123 /* The operand must be 16 byte aligned */
5124 if (ptr & 0xf) {
5125 raise_exception(EXCP0D_GPF);
5126 }
5127
5128 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5129 fptag = 0;
5130 for(i = 0; i < 8; i++) {
5131 fptag |= (env->fptags[i] << i);
5132 }
5133 stw(ptr, env->fpuc);
5134 stw(ptr + 2, fpus);
5135 stw(ptr + 4, fptag ^ 0xff);
5136#ifdef TARGET_X86_64
5137 if (data64) {
5138 stq(ptr + 0x08, 0); /* rip */
5139 stq(ptr + 0x10, 0); /* rdp */
5140 } else
5141#endif
5142 {
5143 stl(ptr + 0x08, 0); /* eip */
5144 stl(ptr + 0x0c, 0); /* sel */
5145 stl(ptr + 0x10, 0); /* dp */
5146 stl(ptr + 0x14, 0); /* sel */
5147 }
5148
5149 addr = ptr + 0x20;
5150 for(i = 0;i < 8; i++) {
5151 tmp = ST(i);
5152 helper_fstt(tmp, addr);
5153 addr += 16;
5154 }
5155
5156 if (env->cr[4] & CR4_OSFXSR_MASK) {
5157 /* XXX: finish it */
5158 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5159 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5160 if (env->hflags & HF_CS64_MASK)
5161 nb_xmm_regs = 16;
5162 else
5163 nb_xmm_regs = 8;
5164 addr = ptr + 0xa0;
5165 /* Fast FXSAVE leaves out the XMM registers */
5166 if (!(env->efer & MSR_EFER_FFXSR)
5167 || (env->hflags & HF_CPL_MASK)
5168 || !(env->hflags & HF_LMA_MASK)) {
5169 for(i = 0; i < nb_xmm_regs; i++) {
5170 stq(addr, env->xmm_regs[i].XMM_Q(0));
5171 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5172 addr += 16;
5173 }
5174 }
5175 }
5176}
5177
5178void helper_fxrstor(target_ulong ptr, int data64)
5179{
5180 int i, fpus, fptag, nb_xmm_regs;
5181 CPU86_LDouble tmp;
5182 target_ulong addr;
5183
5184 /* The operand must be 16 byte aligned */
5185 if (ptr & 0xf) {
5186 raise_exception(EXCP0D_GPF);
5187 }
5188
5189 env->fpuc = lduw(ptr);
5190 fpus = lduw(ptr + 2);
5191 fptag = lduw(ptr + 4);
5192 env->fpstt = (fpus >> 11) & 7;
5193 env->fpus = fpus & ~0x3800;
5194 fptag ^= 0xff;
5195 for(i = 0;i < 8; i++) {
5196 env->fptags[i] = ((fptag >> i) & 1);
5197 }
5198
5199 addr = ptr + 0x20;
5200 for(i = 0;i < 8; i++) {
5201 tmp = helper_fldt(addr);
5202 ST(i) = tmp;
5203 addr += 16;
5204 }
5205
5206 if (env->cr[4] & CR4_OSFXSR_MASK) {
5207 /* XXX: finish it */
5208 env->mxcsr = ldl(ptr + 0x18);
5209 //ldl(ptr + 0x1c);
5210 if (env->hflags & HF_CS64_MASK)
5211 nb_xmm_regs = 16;
5212 else
5213 nb_xmm_regs = 8;
5214 addr = ptr + 0xa0;
5215 /* Fast FXRESTORE leaves out the XMM registers */
5216 if (!(env->efer & MSR_EFER_FFXSR)
5217 || (env->hflags & HF_CPL_MASK)
5218 || !(env->hflags & HF_LMA_MASK)) {
5219 for(i = 0; i < nb_xmm_regs; i++) {
5220#if !defined(VBOX) || __GNUC__ < 4
5221 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5222 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5223#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5224# if 1
5225 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5226 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5227 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5228 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5229# else
5230 /* this works fine on Mac OS X, gcc 4.0.1 */
5231 uint64_t u64 = ldq(addr);
5232 env->xmm_regs[i].XMM_Q(0);
5233 u64 = ldq(addr + 4);
5234 env->xmm_regs[i].XMM_Q(1) = u64;
5235# endif
5236#endif
5237 addr += 16;
5238 }
5239 }
5240 }
5241}
5242
5243#ifndef USE_X86LDOUBLE
5244
5245void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5246{
5247 CPU86_LDoubleU temp;
5248 int e;
5249
5250 temp.d = f;
5251 /* mantissa */
5252 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5253 /* exponent + sign */
5254 e = EXPD(temp) - EXPBIAS + 16383;
5255 e |= SIGND(temp) >> 16;
5256 *pexp = e;
5257}
5258
5259CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5260{
5261 CPU86_LDoubleU temp;
5262 int e;
5263 uint64_t ll;
5264
5265 /* XXX: handle overflow ? */
5266 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5267 e |= (upper >> 4) & 0x800; /* sign */
5268 ll = (mant >> 11) & ((1LL << 52) - 1);
5269#ifdef __arm__
5270 temp.l.upper = (e << 20) | (ll >> 32);
5271 temp.l.lower = ll;
5272#else
5273 temp.ll = ll | ((uint64_t)e << 52);
5274#endif
5275 return temp.d;
5276}
5277
5278#else
5279
5280void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5281{
5282 CPU86_LDoubleU temp;
5283
5284 temp.d = f;
5285 *pmant = temp.l.lower;
5286 *pexp = temp.l.upper;
5287}
5288
5289CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5290{
5291 CPU86_LDoubleU temp;
5292
5293 temp.l.upper = upper;
5294 temp.l.lower = mant;
5295 return temp.d;
5296}
5297#endif
5298
5299#ifdef TARGET_X86_64
5300
5301//#define DEBUG_MULDIV
5302
5303static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5304{
5305 *plow += a;
5306 /* carry test */
5307 if (*plow < a)
5308 (*phigh)++;
5309 *phigh += b;
5310}
5311
5312static void neg128(uint64_t *plow, uint64_t *phigh)
5313{
5314 *plow = ~ *plow;
5315 *phigh = ~ *phigh;
5316 add128(plow, phigh, 1, 0);
5317}
5318
5319/* return TRUE if overflow */
5320static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5321{
5322 uint64_t q, r, a1, a0;
5323 int i, qb, ab;
5324
5325 a0 = *plow;
5326 a1 = *phigh;
5327 if (a1 == 0) {
5328 q = a0 / b;
5329 r = a0 % b;
5330 *plow = q;
5331 *phigh = r;
5332 } else {
5333 if (a1 >= b)
5334 return 1;
5335 /* XXX: use a better algorithm */
5336 for(i = 0; i < 64; i++) {
5337 ab = a1 >> 63;
5338 a1 = (a1 << 1) | (a0 >> 63);
5339 if (ab || a1 >= b) {
5340 a1 -= b;
5341 qb = 1;
5342 } else {
5343 qb = 0;
5344 }
5345 a0 = (a0 << 1) | qb;
5346 }
5347#if defined(DEBUG_MULDIV)
5348 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5349 *phigh, *plow, b, a0, a1);
5350#endif
5351 *plow = a0;
5352 *phigh = a1;
5353 }
5354 return 0;
5355}
5356
5357/* return TRUE if overflow */
5358static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5359{
5360 int sa, sb;
5361 sa = ((int64_t)*phigh < 0);
5362 if (sa)
5363 neg128(plow, phigh);
5364 sb = (b < 0);
5365 if (sb)
5366 b = -b;
5367 if (div64(plow, phigh, b) != 0)
5368 return 1;
5369 if (sa ^ sb) {
5370 if (*plow > (1ULL << 63))
5371 return 1;
5372 *plow = - *plow;
5373 } else {
5374 if (*plow >= (1ULL << 63))
5375 return 1;
5376 }
5377 if (sa)
5378 *phigh = - *phigh;
5379 return 0;
5380}
5381
5382void helper_mulq_EAX_T0(target_ulong t0)
5383{
5384 uint64_t r0, r1;
5385
5386 mulu64(&r0, &r1, EAX, t0);
5387 EAX = r0;
5388 EDX = r1;
5389 CC_DST = r0;
5390 CC_SRC = r1;
5391}
5392
5393void helper_imulq_EAX_T0(target_ulong t0)
5394{
5395 uint64_t r0, r1;
5396
5397 muls64(&r0, &r1, EAX, t0);
5398 EAX = r0;
5399 EDX = r1;
5400 CC_DST = r0;
5401 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5402}
5403
5404target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5405{
5406 uint64_t r0, r1;
5407
5408 muls64(&r0, &r1, t0, t1);
5409 CC_DST = r0;
5410 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5411 return r0;
5412}
5413
5414void helper_divq_EAX(target_ulong t0)
5415{
5416 uint64_t r0, r1;
5417 if (t0 == 0) {
5418 raise_exception(EXCP00_DIVZ);
5419 }
5420 r0 = EAX;
5421 r1 = EDX;
5422 if (div64(&r0, &r1, t0))
5423 raise_exception(EXCP00_DIVZ);
5424 EAX = r0;
5425 EDX = r1;
5426}
5427
5428void helper_idivq_EAX(target_ulong t0)
5429{
5430 uint64_t r0, r1;
5431 if (t0 == 0) {
5432 raise_exception(EXCP00_DIVZ);
5433 }
5434 r0 = EAX;
5435 r1 = EDX;
5436 if (idiv64(&r0, &r1, t0))
5437 raise_exception(EXCP00_DIVZ);
5438 EAX = r0;
5439 EDX = r1;
5440}
5441#endif
5442
5443static void do_hlt(void)
5444{
5445 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5446 env->halted = 1;
5447 env->exception_index = EXCP_HLT;
5448 cpu_loop_exit();
5449}
5450
5451void helper_hlt(int next_eip_addend)
5452{
5453 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5454 EIP += next_eip_addend;
5455
5456 do_hlt();
5457}
5458
5459void helper_monitor(target_ulong ptr)
5460{
5461#ifdef VBOX
5462 if ((uint32_t)ECX > 1)
5463 raise_exception(EXCP0D_GPF);
5464#else /* !VBOX */
5465 if ((uint32_t)ECX != 0)
5466 raise_exception(EXCP0D_GPF);
5467#endif /* !VBOX */
5468 /* XXX: store address ? */
5469 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5470}
5471
5472void helper_mwait(int next_eip_addend)
5473{
5474 if ((uint32_t)ECX != 0)
5475 raise_exception(EXCP0D_GPF);
5476#ifdef VBOX
5477 helper_hlt(next_eip_addend);
5478#else /* !VBOX */
5479 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5480 EIP += next_eip_addend;
5481
5482 /* XXX: not complete but not completely erroneous */
5483 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5484 /* more than one CPU: do not sleep because another CPU may
5485 wake this one */
5486 } else {
5487 do_hlt();
5488 }
5489#endif /* !VBOX */
5490}
5491
5492void helper_debug(void)
5493{
5494 env->exception_index = EXCP_DEBUG;
5495 cpu_loop_exit();
5496}
5497
5498void helper_reset_rf(void)
5499{
5500 env->eflags &= ~RF_MASK;
5501}
5502
5503void helper_raise_interrupt(int intno, int next_eip_addend)
5504{
5505 raise_interrupt(intno, 1, 0, next_eip_addend);
5506}
5507
5508void helper_raise_exception(int exception_index)
5509{
5510 raise_exception(exception_index);
5511}
5512
5513void helper_cli(void)
5514{
5515 env->eflags &= ~IF_MASK;
5516}
5517
5518void helper_sti(void)
5519{
5520 env->eflags |= IF_MASK;
5521}
5522
5523#ifdef VBOX
5524void helper_cli_vme(void)
5525{
5526 env->eflags &= ~VIF_MASK;
5527}
5528
5529void helper_sti_vme(void)
5530{
5531 /* First check, then change eflags according to the AMD manual */
5532 if (env->eflags & VIP_MASK) {
5533 raise_exception(EXCP0D_GPF);
5534 }
5535 env->eflags |= VIF_MASK;
5536}
5537#endif /* VBOX */
5538
5539#if 0
5540/* vm86plus instructions */
5541void helper_cli_vm(void)
5542{
5543 env->eflags &= ~VIF_MASK;
5544}
5545
5546void helper_sti_vm(void)
5547{
5548 env->eflags |= VIF_MASK;
5549 if (env->eflags & VIP_MASK) {
5550 raise_exception(EXCP0D_GPF);
5551 }
5552}
5553#endif
5554
5555void helper_set_inhibit_irq(void)
5556{
5557 env->hflags |= HF_INHIBIT_IRQ_MASK;
5558}
5559
5560void helper_reset_inhibit_irq(void)
5561{
5562 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5563}
5564
5565void helper_boundw(target_ulong a0, int v)
5566{
5567 int low, high;
5568 low = ldsw(a0);
5569 high = ldsw(a0 + 2);
5570 v = (int16_t)v;
5571 if (v < low || v > high) {
5572 raise_exception(EXCP05_BOUND);
5573 }
5574}
5575
5576void helper_boundl(target_ulong a0, int v)
5577{
5578 int low, high;
5579 low = ldl(a0);
5580 high = ldl(a0 + 4);
5581 if (v < low || v > high) {
5582 raise_exception(EXCP05_BOUND);
5583 }
5584}
5585
5586static float approx_rsqrt(float a)
5587{
5588 return 1.0 / sqrt(a);
5589}
5590
5591static float approx_rcp(float a)
5592{
5593 return 1.0 / a;
5594}
5595
5596#if !defined(CONFIG_USER_ONLY)
5597
5598#define MMUSUFFIX _mmu
5599
5600#define SHIFT 0
5601#include "softmmu_template.h"
5602
5603#define SHIFT 1
5604#include "softmmu_template.h"
5605
5606#define SHIFT 2
5607#include "softmmu_template.h"
5608
5609#define SHIFT 3
5610#include "softmmu_template.h"
5611
5612#endif
5613
5614#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5615/* This code assumes real physical address always fit into host CPU reg,
5616 which is wrong in general, but true for our current use cases. */
5617RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5618{
5619 return remR3PhysReadS8(addr);
5620}
5621RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5622{
5623 return remR3PhysReadU8(addr);
5624}
5625void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5626{
5627 remR3PhysWriteU8(addr, val);
5628}
5629RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5630{
5631 return remR3PhysReadS16(addr);
5632}
5633RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5634{
5635 return remR3PhysReadU16(addr);
5636}
5637void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5638{
5639 remR3PhysWriteU16(addr, val);
5640}
5641RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5642{
5643 return remR3PhysReadS32(addr);
5644}
5645RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5646{
5647 return remR3PhysReadU32(addr);
5648}
5649void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5650{
5651 remR3PhysWriteU32(addr, val);
5652}
5653uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5654{
5655 return remR3PhysReadU64(addr);
5656}
5657void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5658{
5659 remR3PhysWriteU64(addr, val);
5660}
5661#endif /* VBOX */
5662
5663#if !defined(CONFIG_USER_ONLY)
5664/* try to fill the TLB and return an exception if error. If retaddr is
5665 NULL, it means that the function was called in C code (i.e. not
5666 from generated code or from helper.c) */
5667/* XXX: fix it to restore all registers */
5668void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5669{
5670 TranslationBlock *tb;
5671 int ret;
5672 uintptr_t pc;
5673 CPUX86State *saved_env;
5674
5675 /* XXX: hack to restore env in all cases, even if not called from
5676 generated code */
5677 saved_env = env;
5678 env = cpu_single_env;
5679
5680 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5681 if (ret) {
5682 if (retaddr) {
5683 /* now we have a real cpu fault */
5684 pc = (uintptr_t)retaddr;
5685 tb = tb_find_pc(pc);
5686 if (tb) {
5687 /* the PC is inside the translated code. It means that we have
5688 a virtual CPU fault */
5689 cpu_restore_state(tb, env, pc, NULL);
5690 }
5691 }
5692 raise_exception_err(env->exception_index, env->error_code);
5693 }
5694 env = saved_env;
5695}
5696#endif
5697
5698#ifdef VBOX
5699
5700/**
5701 * Correctly computes the eflags.
5702 * @returns eflags.
5703 * @param env1 CPU environment.
5704 */
5705uint32_t raw_compute_eflags(CPUX86State *env1)
5706{
5707 CPUX86State *savedenv = env;
5708 uint32_t efl;
5709 env = env1;
5710 efl = compute_eflags();
5711 env = savedenv;
5712 return efl;
5713}
5714
5715/**
5716 * Reads byte from virtual address in guest memory area.
5717 * XXX: is it working for any addresses? swapped out pages?
5718 * @returns read data byte.
5719 * @param env1 CPU environment.
5720 * @param pvAddr GC Virtual address.
5721 */
5722uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5723{
5724 CPUX86State *savedenv = env;
5725 uint8_t u8;
5726 env = env1;
5727 u8 = ldub_kernel(addr);
5728 env = savedenv;
5729 return u8;
5730}
5731
5732/**
5733 * Reads byte from virtual address in guest memory area.
5734 * XXX: is it working for any addresses? swapped out pages?
5735 * @returns read data byte.
5736 * @param env1 CPU environment.
5737 * @param pvAddr GC Virtual address.
5738 */
5739uint16_t read_word(CPUX86State *env1, target_ulong addr)
5740{
5741 CPUX86State *savedenv = env;
5742 uint16_t u16;
5743 env = env1;
5744 u16 = lduw_kernel(addr);
5745 env = savedenv;
5746 return u16;
5747}
5748
5749/**
5750 * Reads byte from virtual address in guest memory area.
5751 * XXX: is it working for any addresses? swapped out pages?
5752 * @returns read data byte.
5753 * @param env1 CPU environment.
5754 * @param pvAddr GC Virtual address.
5755 */
5756uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5757{
5758 CPUX86State *savedenv = env;
5759 uint32_t u32;
5760 env = env1;
5761 u32 = ldl_kernel(addr);
5762 env = savedenv;
5763 return u32;
5764}
5765
5766/**
5767 * Writes byte to virtual address in guest memory area.
5768 * XXX: is it working for any addresses? swapped out pages?
5769 * @returns read data byte.
5770 * @param env1 CPU environment.
5771 * @param pvAddr GC Virtual address.
5772 * @param val byte value
5773 */
5774void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5775{
5776 CPUX86State *savedenv = env;
5777 env = env1;
5778 stb(addr, val);
5779 env = savedenv;
5780}
5781
5782void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5783{
5784 CPUX86State *savedenv = env;
5785 env = env1;
5786 stw(addr, val);
5787 env = savedenv;
5788}
5789
5790void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5791{
5792 CPUX86State *savedenv = env;
5793 env = env1;
5794 stl(addr, val);
5795 env = savedenv;
5796}
5797
5798/**
5799 * Correctly loads selector into segment register with updating internal
5800 * qemu data/caches.
5801 * @param env1 CPU environment.
5802 * @param seg_reg Segment register.
5803 * @param selector Selector to load.
5804 */
5805void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5806{
5807 CPUX86State *savedenv = env;
5808#ifdef FORCE_SEGMENT_SYNC
5809 jmp_buf old_buf;
5810#endif
5811
5812 env = env1;
5813
5814 if ( env->eflags & X86_EFL_VM
5815 || !(env->cr[0] & X86_CR0_PE))
5816 {
5817 load_seg_vm(seg_reg, selector);
5818
5819 env = savedenv;
5820
5821 /* Successful sync. */
5822 Assert(env1->segs[seg_reg].newselector == 0);
5823 }
5824 else
5825 {
5826 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5827 time critical - let's not do that */
5828#ifdef FORCE_SEGMENT_SYNC
5829 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5830#endif
5831 if (setjmp(env1->jmp_env) == 0)
5832 {
5833 if (seg_reg == R_CS)
5834 {
5835 uint32_t e1, e2;
5836 e1 = e2 = 0;
5837 load_segment(&e1, &e2, selector);
5838 cpu_x86_load_seg_cache(env, R_CS, selector,
5839 get_seg_base(e1, e2),
5840 get_seg_limit(e1, e2),
5841 e2);
5842 }
5843 else
5844 helper_load_seg(seg_reg, selector);
5845 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5846 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5847
5848 env = savedenv;
5849
5850 /* Successful sync. */
5851 Assert(env1->segs[seg_reg].newselector == 0);
5852 }
5853 else
5854 {
5855 env = savedenv;
5856
5857 /* Postpone sync until the guest uses the selector. */
5858 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5859 env1->segs[seg_reg].newselector = selector;
5860 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5861 env1->exception_index = -1;
5862 env1->error_code = 0;
5863 env1->old_exception = -1;
5864 }
5865#ifdef FORCE_SEGMENT_SYNC
5866 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5867#endif
5868 }
5869
5870}
5871
5872DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5873{
5874 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5875}
5876
5877
5878int emulate_single_instr(CPUX86State *env1)
5879{
5880 TranslationBlock *tb;
5881 TranslationBlock *current;
5882 int flags;
5883 uint8_t *tc_ptr;
5884 target_ulong old_eip;
5885
5886 /* ensures env is loaded! */
5887 CPUX86State *savedenv = env;
5888 env = env1;
5889
5890 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5891
5892 current = env->current_tb;
5893 env->current_tb = NULL;
5894 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5895
5896 /*
5897 * Translate only one instruction.
5898 */
5899 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5900 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5901 env->segs[R_CS].base, flags, 0);
5902
5903 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5904
5905
5906 /* tb_link_phys: */
5907 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5908 tb->jmp_next[0] = NULL;
5909 tb->jmp_next[1] = NULL;
5910 Assert(tb->jmp_next[0] == NULL);
5911 Assert(tb->jmp_next[1] == NULL);
5912 if (tb->tb_next_offset[0] != 0xffff)
5913 tb_reset_jump(tb, 0);
5914 if (tb->tb_next_offset[1] != 0xffff)
5915 tb_reset_jump(tb, 1);
5916
5917 /*
5918 * Execute it using emulation
5919 */
5920 old_eip = env->eip;
5921 env->current_tb = tb;
5922
5923 /*
5924 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5925 * perhaps not a very safe hack
5926 */
5927 while (old_eip == env->eip)
5928 {
5929 tc_ptr = tb->tc_ptr;
5930
5931#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5932 int fake_ret;
5933 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5934#else
5935 tcg_qemu_tb_exec(tc_ptr);
5936#endif
5937
5938 /*
5939 * Exit once we detect an external interrupt and interrupts are enabled
5940 */
5941 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5942 || ( (env->eflags & IF_MASK)
5943 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5944 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5945 )
5946 {
5947 break;
5948 }
5949 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5950 tlb_flush(env, true);
5951 }
5952 }
5953 env->current_tb = current;
5954
5955 tb_phys_invalidate(tb, -1);
5956 tb_free(tb);
5957/*
5958 Assert(tb->tb_next_offset[0] == 0xffff);
5959 Assert(tb->tb_next_offset[1] == 0xffff);
5960 Assert(tb->tb_next[0] == 0xffff);
5961 Assert(tb->tb_next[1] == 0xffff);
5962 Assert(tb->jmp_next[0] == NULL);
5963 Assert(tb->jmp_next[1] == NULL);
5964 Assert(tb->jmp_first == NULL); */
5965
5966 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5967
5968 /*
5969 * Execute the next instruction when we encounter instruction fusing.
5970 */
5971 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5972 {
5973 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5974 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5975 emulate_single_instr(env);
5976 }
5977
5978 env = savedenv;
5979 return 0;
5980}
5981
5982/**
5983 * Correctly loads a new ldtr selector.
5984 *
5985 * @param env1 CPU environment.
5986 * @param selector Selector to load.
5987 */
5988void sync_ldtr(CPUX86State *env1, int selector)
5989{
5990 CPUX86State *saved_env = env;
5991 if (setjmp(env1->jmp_env) == 0)
5992 {
5993 env = env1;
5994 helper_lldt(selector);
5995 env = saved_env;
5996 }
5997 else
5998 {
5999 env = saved_env;
6000#ifdef VBOX_STRICT
6001 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6002#endif
6003 }
6004}
6005
6006int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6007 uint32_t *esp_ptr, int dpl)
6008{
6009 int type, index, shift;
6010
6011 CPUX86State *savedenv = env;
6012 env = env1;
6013
6014 if (!(env->tr.flags & DESC_P_MASK))
6015 cpu_abort(env, "invalid tss");
6016 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6017 if ((type & 7) != 1)
6018 cpu_abort(env, "invalid tss type %d", type);
6019 shift = type >> 3;
6020 index = (dpl * 4 + 2) << shift;
6021 if (index + (4 << shift) - 1 > env->tr.limit)
6022 {
6023 env = savedenv;
6024 return 0;
6025 }
6026 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6027
6028 if (shift == 0) {
6029 *esp_ptr = lduw_kernel(env->tr.base + index);
6030 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6031 } else {
6032 *esp_ptr = ldl_kernel(env->tr.base + index);
6033 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6034 }
6035
6036 env = savedenv;
6037 return 1;
6038}
6039
6040//*****************************************************************************
6041// Needs to be at the bottom of the file (overriding macros)
6042
6043static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6044{
6045#ifdef USE_X86LDOUBLE
6046 CPU86_LDoubleU tmp;
6047 tmp.l.lower = *(uint64_t const *)ptr;
6048 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6049 return tmp.d;
6050#else
6051# error "Busted FPU saving/restoring!"
6052 return *(CPU86_LDouble *)ptr;
6053#endif
6054}
6055
6056static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6057{
6058#ifdef USE_X86LDOUBLE
6059 CPU86_LDoubleU tmp;
6060 tmp.d = f;
6061 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6062 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6063 *(uint16_t *)(ptr + 10) = 0;
6064 *(uint32_t *)(ptr + 12) = 0;
6065 AssertCompile(sizeof(long double) > 8);
6066#else
6067# error "Busted FPU saving/restoring!"
6068 *(CPU86_LDouble *)ptr = f;
6069#endif
6070}
6071
6072#undef stw
6073#undef stl
6074#undef stq
6075#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6076#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6077#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6078
6079//*****************************************************************************
6080void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6081{
6082 int fpus, fptag, i, nb_xmm_regs;
6083 CPU86_LDouble tmp;
6084 uint8_t *addr;
6085 int data64 = !!(env->hflags & HF_LMA_MASK);
6086
6087 if (env->cpuid_features & CPUID_FXSR)
6088 {
6089 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6090 fptag = 0;
6091 for(i = 0; i < 8; i++) {
6092 fptag |= (env->fptags[i] << i);
6093 }
6094 stw(ptr, env->fpuc);
6095 stw(ptr + 2, fpus);
6096 stw(ptr + 4, fptag ^ 0xff);
6097
6098 addr = ptr + 0x20;
6099 for(i = 0;i < 8; i++) {
6100 tmp = ST(i);
6101 helper_fstt_raw(tmp, addr);
6102 addr += 16;
6103 }
6104
6105 if (env->cr[4] & CR4_OSFXSR_MASK) {
6106 /* XXX: finish it */
6107 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6108 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6109 nb_xmm_regs = 8 << data64;
6110 addr = ptr + 0xa0;
6111 for(i = 0; i < nb_xmm_regs; i++) {
6112#if __GNUC__ < 4
6113 stq(addr, env->xmm_regs[i].XMM_Q(0));
6114 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6115#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6116 stl(addr, env->xmm_regs[i].XMM_L(0));
6117 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6118 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6119 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6120#endif
6121 addr += 16;
6122 }
6123 }
6124 }
6125 else
6126 {
6127 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6128 int fptag;
6129
6130 fp->FCW = env->fpuc;
6131 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6132 fptag = 0;
6133 for (i=7; i>=0; i--) {
6134 fptag <<= 2;
6135 if (env->fptags[i]) {
6136 fptag |= 3;
6137 } else {
6138 /* the FPU automatically computes it */
6139 }
6140 }
6141 fp->FTW = fptag;
6142
6143 for(i = 0;i < 8; i++) {
6144 tmp = ST(i);
6145 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6146 }
6147 }
6148}
6149
6150//*****************************************************************************
6151#undef lduw
6152#undef ldl
6153#undef ldq
6154#define lduw(a) *(uint16_t *)(a)
6155#define ldl(a) *(uint32_t *)(a)
6156#define ldq(a) *(uint64_t *)(a)
6157//*****************************************************************************
6158void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6159{
6160 int i, fpus, fptag, nb_xmm_regs;
6161 CPU86_LDouble tmp;
6162 uint8_t *addr;
6163 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6164
6165 if (env->cpuid_features & CPUID_FXSR)
6166 {
6167 env->fpuc = lduw(ptr);
6168 fpus = lduw(ptr + 2);
6169 fptag = lduw(ptr + 4);
6170 env->fpstt = (fpus >> 11) & 7;
6171 env->fpus = fpus & ~0x3800;
6172 fptag ^= 0xff;
6173 for(i = 0;i < 8; i++) {
6174 env->fptags[i] = ((fptag >> i) & 1);
6175 }
6176
6177 addr = ptr + 0x20;
6178 for(i = 0;i < 8; i++) {
6179 tmp = helper_fldt_raw(addr);
6180 ST(i) = tmp;
6181 addr += 16;
6182 }
6183
6184 if (env->cr[4] & CR4_OSFXSR_MASK) {
6185 /* XXX: finish it, endianness */
6186 env->mxcsr = ldl(ptr + 0x18);
6187 //ldl(ptr + 0x1c);
6188 nb_xmm_regs = 8 << data64;
6189 addr = ptr + 0xa0;
6190 for(i = 0; i < nb_xmm_regs; i++) {
6191#if HC_ARCH_BITS == 32
6192 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6193 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6194 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6195 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6196 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6197#else
6198 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6199 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6200#endif
6201 addr += 16;
6202 }
6203 }
6204 }
6205 else
6206 {
6207 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6208 int fptag, j;
6209
6210 env->fpuc = fp->FCW;
6211 env->fpstt = (fp->FSW >> 11) & 7;
6212 env->fpus = fp->FSW & ~0x3800;
6213 fptag = fp->FTW;
6214 for(i = 0;i < 8; i++) {
6215 env->fptags[i] = ((fptag & 3) == 3);
6216 fptag >>= 2;
6217 }
6218 j = env->fpstt;
6219 for(i = 0;i < 8; i++) {
6220 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6221 ST(i) = tmp;
6222 }
6223 }
6224}
6225//*****************************************************************************
6226//*****************************************************************************
6227
6228#endif /* VBOX */
6229
6230/* Secure Virtual Machine helpers */
6231
6232#if defined(CONFIG_USER_ONLY)
6233
6234void helper_vmrun(int aflag, int next_eip_addend)
6235{
6236}
6237void helper_vmmcall(void)
6238{
6239}
6240void helper_vmload(int aflag)
6241{
6242}
6243void helper_vmsave(int aflag)
6244{
6245}
6246void helper_stgi(void)
6247{
6248}
6249void helper_clgi(void)
6250{
6251}
6252void helper_skinit(void)
6253{
6254}
6255void helper_invlpga(int aflag)
6256{
6257}
6258void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6259{
6260}
6261void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6262{
6263}
6264
6265void helper_svm_check_io(uint32_t port, uint32_t param,
6266 uint32_t next_eip_addend)
6267{
6268}
6269#else
6270
6271static inline void svm_save_seg(target_phys_addr_t addr,
6272 const SegmentCache *sc)
6273{
6274 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6275 sc->selector);
6276 stq_phys(addr + offsetof(struct vmcb_seg, base),
6277 sc->base);
6278 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6279 sc->limit);
6280 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6281 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6282}
6283
6284static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6285{
6286 unsigned int flags;
6287
6288 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6289 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6290 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6291 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6292 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6293}
6294
6295static inline void svm_load_seg_cache(target_phys_addr_t addr,
6296 CPUState *env, int seg_reg)
6297{
6298 SegmentCache sc1, *sc = &sc1;
6299 svm_load_seg(addr, sc);
6300 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6301 sc->base, sc->limit, sc->flags);
6302}
6303
6304void helper_vmrun(int aflag, int next_eip_addend)
6305{
6306 target_ulong addr;
6307 uint32_t event_inj;
6308 uint32_t int_ctl;
6309
6310 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6311
6312 if (aflag == 2)
6313 addr = EAX;
6314 else
6315 addr = (uint32_t)EAX;
6316
6317 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6318
6319 env->vm_vmcb = addr;
6320
6321 /* save the current CPU state in the hsave page */
6322 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6323 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6324
6325 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6326 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6327
6328 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6329 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6330 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6331 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6332 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6333 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6334
6335 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6336 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6337
6338 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6339 &env->segs[R_ES]);
6340 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6341 &env->segs[R_CS]);
6342 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6343 &env->segs[R_SS]);
6344 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6345 &env->segs[R_DS]);
6346
6347 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6348 EIP + next_eip_addend);
6349 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6350 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6351
6352 /* load the interception bitmaps so we do not need to access the
6353 vmcb in svm mode */
6354 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6355 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6356 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6357 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6358 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6359 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6360
6361 /* enable intercepts */
6362 env->hflags |= HF_SVMI_MASK;
6363
6364 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6365
6366 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6367 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6368
6369 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6370 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6371
6372 /* clear exit_info_2 so we behave like the real hardware */
6373 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6374
6375 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6376 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6377 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6378 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6379 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6380 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6381 if (int_ctl & V_INTR_MASKING_MASK) {
6382 env->v_tpr = int_ctl & V_TPR_MASK;
6383 env->hflags2 |= HF2_VINTR_MASK;
6384 if (env->eflags & IF_MASK)
6385 env->hflags2 |= HF2_HIF_MASK;
6386 }
6387
6388 cpu_load_efer(env,
6389 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6390 env->eflags = 0;
6391 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6392 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6393 CC_OP = CC_OP_EFLAGS;
6394
6395 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6396 env, R_ES);
6397 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6398 env, R_CS);
6399 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6400 env, R_SS);
6401 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6402 env, R_DS);
6403
6404 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6405 env->eip = EIP;
6406 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6407 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6408 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6409 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6410 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6411
6412 /* FIXME: guest state consistency checks */
6413
6414 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6415 case TLB_CONTROL_DO_NOTHING:
6416 break;
6417 case TLB_CONTROL_FLUSH_ALL_ASID:
6418 /* FIXME: this is not 100% correct but should work for now */
6419 tlb_flush(env, 1);
6420 break;
6421 }
6422
6423 env->hflags2 |= HF2_GIF_MASK;
6424
6425 if (int_ctl & V_IRQ_MASK) {
6426 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6427 }
6428
6429 /* maybe we need to inject an event */
6430 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6431 if (event_inj & SVM_EVTINJ_VALID) {
6432 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6433 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6434 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6435
6436 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6437 /* FIXME: need to implement valid_err */
6438 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6439 case SVM_EVTINJ_TYPE_INTR:
6440 env->exception_index = vector;
6441 env->error_code = event_inj_err;
6442 env->exception_is_int = 0;
6443 env->exception_next_eip = -1;
6444 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6445 /* XXX: is it always correct ? */
6446 do_interrupt(vector, 0, 0, 0, 1);
6447 break;
6448 case SVM_EVTINJ_TYPE_NMI:
6449 env->exception_index = EXCP02_NMI;
6450 env->error_code = event_inj_err;
6451 env->exception_is_int = 0;
6452 env->exception_next_eip = EIP;
6453 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6454 cpu_loop_exit();
6455 break;
6456 case SVM_EVTINJ_TYPE_EXEPT:
6457 env->exception_index = vector;
6458 env->error_code = event_inj_err;
6459 env->exception_is_int = 0;
6460 env->exception_next_eip = -1;
6461 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6462 cpu_loop_exit();
6463 break;
6464 case SVM_EVTINJ_TYPE_SOFT:
6465 env->exception_index = vector;
6466 env->error_code = event_inj_err;
6467 env->exception_is_int = 1;
6468 env->exception_next_eip = EIP;
6469 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6470 cpu_loop_exit();
6471 break;
6472 }
6473 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6474 }
6475}
6476
6477void helper_vmmcall(void)
6478{
6479 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6480 raise_exception(EXCP06_ILLOP);
6481}
6482
6483void helper_vmload(int aflag)
6484{
6485 target_ulong addr;
6486 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6487
6488 if (aflag == 2)
6489 addr = EAX;
6490 else
6491 addr = (uint32_t)EAX;
6492
6493 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6494 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6495 env->segs[R_FS].base);
6496
6497 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6498 env, R_FS);
6499 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6500 env, R_GS);
6501 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6502 &env->tr);
6503 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6504 &env->ldt);
6505
6506#ifdef TARGET_X86_64
6507 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6508 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6509 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6510 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6511#endif
6512 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6513 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6514 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6515 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6516}
6517
6518void helper_vmsave(int aflag)
6519{
6520 target_ulong addr;
6521 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6522
6523 if (aflag == 2)
6524 addr = EAX;
6525 else
6526 addr = (uint32_t)EAX;
6527
6528 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6529 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6530 env->segs[R_FS].base);
6531
6532 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6533 &env->segs[R_FS]);
6534 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6535 &env->segs[R_GS]);
6536 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6537 &env->tr);
6538 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6539 &env->ldt);
6540
6541#ifdef TARGET_X86_64
6542 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6543 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6544 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6545 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6546#endif
6547 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6548 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6549 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6550 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6551}
6552
6553void helper_stgi(void)
6554{
6555 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6556 env->hflags2 |= HF2_GIF_MASK;
6557}
6558
6559void helper_clgi(void)
6560{
6561 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6562 env->hflags2 &= ~HF2_GIF_MASK;
6563}
6564
6565void helper_skinit(void)
6566{
6567 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6568 /* XXX: not implemented */
6569 raise_exception(EXCP06_ILLOP);
6570}
6571
6572void helper_invlpga(int aflag)
6573{
6574 target_ulong addr;
6575 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6576
6577 if (aflag == 2)
6578 addr = EAX;
6579 else
6580 addr = (uint32_t)EAX;
6581
6582 /* XXX: could use the ASID to see if it is needed to do the
6583 flush */
6584 tlb_flush_page(env, addr);
6585}
6586
6587void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6588{
6589 if (likely(!(env->hflags & HF_SVMI_MASK)))
6590 return;
6591#ifndef VBOX
6592 switch(type) {
6593 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6594 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6595 helper_vmexit(type, param);
6596 }
6597 break;
6598 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6599 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6600 helper_vmexit(type, param);
6601 }
6602 break;
6603 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6604 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6605 helper_vmexit(type, param);
6606 }
6607 break;
6608 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6609 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6610 helper_vmexit(type, param);
6611 }
6612 break;
6613 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6614 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6615 helper_vmexit(type, param);
6616 }
6617 break;
6618 case SVM_EXIT_MSR:
6619 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6620 /* FIXME: this should be read in at vmrun (faster this way?) */
6621 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6622 uint32_t t0, t1;
6623 switch((uint32_t)ECX) {
6624 case 0 ... 0x1fff:
6625 t0 = (ECX * 2) % 8;
6626 t1 = ECX / 8;
6627 break;
6628 case 0xc0000000 ... 0xc0001fff:
6629 t0 = (8192 + ECX - 0xc0000000) * 2;
6630 t1 = (t0 / 8);
6631 t0 %= 8;
6632 break;
6633 case 0xc0010000 ... 0xc0011fff:
6634 t0 = (16384 + ECX - 0xc0010000) * 2;
6635 t1 = (t0 / 8);
6636 t0 %= 8;
6637 break;
6638 default:
6639 helper_vmexit(type, param);
6640 t0 = 0;
6641 t1 = 0;
6642 break;
6643 }
6644 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6645 helper_vmexit(type, param);
6646 }
6647 break;
6648 default:
6649 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6650 helper_vmexit(type, param);
6651 }
6652 break;
6653 }
6654#else /* VBOX */
6655 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6656#endif /* VBOX */
6657}
6658
6659void helper_svm_check_io(uint32_t port, uint32_t param,
6660 uint32_t next_eip_addend)
6661{
6662 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6663 /* FIXME: this should be read in at vmrun (faster this way?) */
6664 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6665 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6666 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6667 /* next EIP */
6668 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6669 env->eip + next_eip_addend);
6670 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6671 }
6672 }
6673}
6674
6675/* Note: currently only 32 bits of exit_code are used */
6676void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6677{
6678 uint32_t int_ctl;
6679
6680 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6681 exit_code, exit_info_1,
6682 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6683 EIP);
6684
6685 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6686 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6687 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6688 } else {
6689 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6690 }
6691
6692 /* Save the VM state in the vmcb */
6693 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6694 &env->segs[R_ES]);
6695 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6696 &env->segs[R_CS]);
6697 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6698 &env->segs[R_SS]);
6699 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6700 &env->segs[R_DS]);
6701
6702 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6703 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6704
6705 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6706 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6707
6708 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6709 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6710 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6711 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6712 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6713
6714 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6715 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6716 int_ctl |= env->v_tpr & V_TPR_MASK;
6717 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6718 int_ctl |= V_IRQ_MASK;
6719 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6720
6721 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6722 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6723 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6724 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6725 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6726 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6727 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6728
6729 /* Reload the host state from vm_hsave */
6730 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6731 env->hflags &= ~HF_SVMI_MASK;
6732 env->intercept = 0;
6733 env->intercept_exceptions = 0;
6734 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6735 env->tsc_offset = 0;
6736
6737 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6738 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6739
6740 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6741 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6742
6743 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6744 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6745 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6746 /* we need to set the efer after the crs so the hidden flags get
6747 set properly */
6748 cpu_load_efer(env,
6749 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6750 env->eflags = 0;
6751 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6752 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6753 CC_OP = CC_OP_EFLAGS;
6754
6755 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6756 env, R_ES);
6757 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6758 env, R_CS);
6759 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6760 env, R_SS);
6761 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6762 env, R_DS);
6763
6764 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6765 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6766 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6767
6768 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6769 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6770
6771 /* other setups */
6772 cpu_x86_set_cpl(env, 0);
6773 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6774 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6775
6776 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6777 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6778 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6779 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6780 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6781
6782 env->hflags2 &= ~HF2_GIF_MASK;
6783 /* FIXME: Resets the current ASID register to zero (host ASID). */
6784
6785 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6786
6787 /* Clears the TSC_OFFSET inside the processor. */
6788
6789 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6790 from the page table indicated the host's CR3. If the PDPEs contain
6791 illegal state, the processor causes a shutdown. */
6792
6793 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6794 env->cr[0] |= CR0_PE_MASK;
6795 env->eflags &= ~VM_MASK;
6796
6797 /* Disables all breakpoints in the host DR7 register. */
6798
6799 /* Checks the reloaded host state for consistency. */
6800
6801 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6802 host's code segment or non-canonical (in the case of long mode), a
6803 #GP fault is delivered inside the host.) */
6804
6805 /* remove any pending exception */
6806 env->exception_index = -1;
6807 env->error_code = 0;
6808 env->old_exception = -1;
6809
6810 cpu_loop_exit();
6811}
6812
6813#endif
6814
6815/* MMX/SSE */
6816/* XXX: optimize by storing fptt and fptags in the static cpu state */
6817void helper_enter_mmx(void)
6818{
6819 env->fpstt = 0;
6820 *(uint32_t *)(env->fptags) = 0;
6821 *(uint32_t *)(env->fptags + 4) = 0;
6822}
6823
6824void helper_emms(void)
6825{
6826 /* set to empty state */
6827 *(uint32_t *)(env->fptags) = 0x01010101;
6828 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6829}
6830
6831/* XXX: suppress */
6832void helper_movq(void *d, void *s)
6833{
6834 *(uint64_t *)d = *(uint64_t *)s;
6835}
6836
6837#define SHIFT 0
6838#include "ops_sse.h"
6839
6840#define SHIFT 1
6841#include "ops_sse.h"
6842
6843#define SHIFT 0
6844#include "helper_template.h"
6845#undef SHIFT
6846
6847#define SHIFT 1
6848#include "helper_template.h"
6849#undef SHIFT
6850
6851#define SHIFT 2
6852#include "helper_template.h"
6853#undef SHIFT
6854
6855#ifdef TARGET_X86_64
6856
6857#define SHIFT 3
6858#include "helper_template.h"
6859#undef SHIFT
6860
6861#endif
6862
6863/* bit operations */
6864target_ulong helper_bsf(target_ulong t0)
6865{
6866 int count;
6867 target_ulong res;
6868
6869 res = t0;
6870 count = 0;
6871 while ((res & 1) == 0) {
6872 count++;
6873 res >>= 1;
6874 }
6875 return count;
6876}
6877
6878target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6879{
6880 int count;
6881 target_ulong res, mask;
6882
6883 if (wordsize > 0 && t0 == 0) {
6884 return wordsize;
6885 }
6886 res = t0;
6887 count = TARGET_LONG_BITS - 1;
6888 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6889 while ((res & mask) == 0) {
6890 count--;
6891 res <<= 1;
6892 }
6893 if (wordsize > 0) {
6894 return wordsize - 1 - count;
6895 }
6896 return count;
6897}
6898
6899target_ulong helper_bsr(target_ulong t0)
6900{
6901 return helper_lzcnt(t0, 0);
6902}
6903
6904static int compute_all_eflags(void)
6905{
6906 return CC_SRC;
6907}
6908
6909static int compute_c_eflags(void)
6910{
6911 return CC_SRC & CC_C;
6912}
6913
6914uint32_t helper_cc_compute_all(int op)
6915{
6916 switch (op) {
6917 default: /* should never happen */ return 0;
6918
6919 case CC_OP_EFLAGS: return compute_all_eflags();
6920
6921 case CC_OP_MULB: return compute_all_mulb();
6922 case CC_OP_MULW: return compute_all_mulw();
6923 case CC_OP_MULL: return compute_all_mull();
6924
6925 case CC_OP_ADDB: return compute_all_addb();
6926 case CC_OP_ADDW: return compute_all_addw();
6927 case CC_OP_ADDL: return compute_all_addl();
6928
6929 case CC_OP_ADCB: return compute_all_adcb();
6930 case CC_OP_ADCW: return compute_all_adcw();
6931 case CC_OP_ADCL: return compute_all_adcl();
6932
6933 case CC_OP_SUBB: return compute_all_subb();
6934 case CC_OP_SUBW: return compute_all_subw();
6935 case CC_OP_SUBL: return compute_all_subl();
6936
6937 case CC_OP_SBBB: return compute_all_sbbb();
6938 case CC_OP_SBBW: return compute_all_sbbw();
6939 case CC_OP_SBBL: return compute_all_sbbl();
6940
6941 case CC_OP_LOGICB: return compute_all_logicb();
6942 case CC_OP_LOGICW: return compute_all_logicw();
6943 case CC_OP_LOGICL: return compute_all_logicl();
6944
6945 case CC_OP_INCB: return compute_all_incb();
6946 case CC_OP_INCW: return compute_all_incw();
6947 case CC_OP_INCL: return compute_all_incl();
6948
6949 case CC_OP_DECB: return compute_all_decb();
6950 case CC_OP_DECW: return compute_all_decw();
6951 case CC_OP_DECL: return compute_all_decl();
6952
6953 case CC_OP_SHLB: return compute_all_shlb();
6954 case CC_OP_SHLW: return compute_all_shlw();
6955 case CC_OP_SHLL: return compute_all_shll();
6956
6957 case CC_OP_SARB: return compute_all_sarb();
6958 case CC_OP_SARW: return compute_all_sarw();
6959 case CC_OP_SARL: return compute_all_sarl();
6960
6961#ifdef TARGET_X86_64
6962 case CC_OP_MULQ: return compute_all_mulq();
6963
6964 case CC_OP_ADDQ: return compute_all_addq();
6965
6966 case CC_OP_ADCQ: return compute_all_adcq();
6967
6968 case CC_OP_SUBQ: return compute_all_subq();
6969
6970 case CC_OP_SBBQ: return compute_all_sbbq();
6971
6972 case CC_OP_LOGICQ: return compute_all_logicq();
6973
6974 case CC_OP_INCQ: return compute_all_incq();
6975
6976 case CC_OP_DECQ: return compute_all_decq();
6977
6978 case CC_OP_SHLQ: return compute_all_shlq();
6979
6980 case CC_OP_SARQ: return compute_all_sarq();
6981#endif
6982 }
6983}
6984
6985uint32_t helper_cc_compute_c(int op)
6986{
6987 switch (op) {
6988 default: /* should never happen */ return 0;
6989
6990 case CC_OP_EFLAGS: return compute_c_eflags();
6991
6992 case CC_OP_MULB: return compute_c_mull();
6993 case CC_OP_MULW: return compute_c_mull();
6994 case CC_OP_MULL: return compute_c_mull();
6995
6996 case CC_OP_ADDB: return compute_c_addb();
6997 case CC_OP_ADDW: return compute_c_addw();
6998 case CC_OP_ADDL: return compute_c_addl();
6999
7000 case CC_OP_ADCB: return compute_c_adcb();
7001 case CC_OP_ADCW: return compute_c_adcw();
7002 case CC_OP_ADCL: return compute_c_adcl();
7003
7004 case CC_OP_SUBB: return compute_c_subb();
7005 case CC_OP_SUBW: return compute_c_subw();
7006 case CC_OP_SUBL: return compute_c_subl();
7007
7008 case CC_OP_SBBB: return compute_c_sbbb();
7009 case CC_OP_SBBW: return compute_c_sbbw();
7010 case CC_OP_SBBL: return compute_c_sbbl();
7011
7012 case CC_OP_LOGICB: return compute_c_logicb();
7013 case CC_OP_LOGICW: return compute_c_logicw();
7014 case CC_OP_LOGICL: return compute_c_logicl();
7015
7016 case CC_OP_INCB: return compute_c_incl();
7017 case CC_OP_INCW: return compute_c_incl();
7018 case CC_OP_INCL: return compute_c_incl();
7019
7020 case CC_OP_DECB: return compute_c_incl();
7021 case CC_OP_DECW: return compute_c_incl();
7022 case CC_OP_DECL: return compute_c_incl();
7023
7024 case CC_OP_SHLB: return compute_c_shlb();
7025 case CC_OP_SHLW: return compute_c_shlw();
7026 case CC_OP_SHLL: return compute_c_shll();
7027
7028 case CC_OP_SARB: return compute_c_sarl();
7029 case CC_OP_SARW: return compute_c_sarl();
7030 case CC_OP_SARL: return compute_c_sarl();
7031
7032#ifdef TARGET_X86_64
7033 case CC_OP_MULQ: return compute_c_mull();
7034
7035 case CC_OP_ADDQ: return compute_c_addq();
7036
7037 case CC_OP_ADCQ: return compute_c_adcq();
7038
7039 case CC_OP_SUBQ: return compute_c_subq();
7040
7041 case CC_OP_SBBQ: return compute_c_sbbq();
7042
7043 case CC_OP_LOGICQ: return compute_c_logicq();
7044
7045 case CC_OP_INCQ: return compute_c_incl();
7046
7047 case CC_OP_DECQ: return compute_c_incl();
7048
7049 case CC_OP_SHLQ: return compute_c_shlq();
7050
7051 case CC_OP_SARQ: return compute_c_sarl();
7052#endif
7053 }
7054}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette