VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 36170

最後變更 在這個檔案從36170是 36170,由 vboxsync 提交於 14 年 前

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • 屬性 svn:eol-style 設為 native
檔案大小: 190.7 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "exec-all.h"
33#include "host-utils.h"
34
35#ifdef VBOX
36# include "qemu-common.h"
37# include <math.h>
38# include "tcg.h"
39#endif /* VBOX */
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205#endif /* VBOX */
206
207/* return non zero if error */
208static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
209 int selector)
210{
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
214
215#ifdef VBOX
216 /* Trying to load a selector with CPL=1? */
217 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
218 {
219 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
220 selector = selector & 0xfffc;
221 }
222#endif /* VBOX */
223
224 if (selector & 0x4)
225 dt = &env->ldt;
226 else
227 dt = &env->gdt;
228 index = selector & ~7;
229 if ((index + 7) > dt->limit)
230 return -1;
231 ptr = dt->base + index;
232 *e1_ptr = ldl_kernel(ptr);
233 *e2_ptr = ldl_kernel(ptr + 4);
234 return 0;
235}
236
237static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
238{
239 unsigned int limit;
240 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
241 if (e2 & DESC_G_MASK)
242 limit = (limit << 12) | 0xfff;
243 return limit;
244}
245
246static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
247{
248 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
249}
250
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256}
257
258/* init the segment cache in vm86 mode. */
259static inline void load_seg_vm(int seg, int selector)
260{
261 selector &= 0xffff;
262#ifdef VBOX
263 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
264 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
265 flags |= (3 << DESC_DPL_SHIFT);
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else /* VBOX */
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif /* VBOX */
273}
274
275static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
276 uint32_t *esp_ptr, int dpl)
277{
278#ifndef VBOX
279 int type, index, shift;
280#else
281 unsigned int type, index, shift;
282#endif
283
284#if 0
285 {
286 int i;
287 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
288 for(i=0;i<env->tr.limit;i++) {
289 printf("%02x ", env->tr.base[i]);
290 if ((i & 7) == 7) printf("\n");
291 }
292 printf("\n");
293 }
294#endif
295
296 if (!(env->tr.flags & DESC_P_MASK))
297 cpu_abort(env, "invalid tss");
298 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 if ((type & 7) != 1)
300 cpu_abort(env, "invalid tss type");
301 shift = type >> 3;
302 index = (dpl * 4 + 2) << shift;
303 if (index + (4 << shift) - 1 > env->tr.limit)
304 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
305 if (shift == 0) {
306 *esp_ptr = lduw_kernel(env->tr.base + index);
307 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
308 } else {
309 *esp_ptr = ldl_kernel(env->tr.base + index);
310 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
311 }
312}
313
314/* XXX: merge with load_seg() */
315static void tss_load_seg(int seg_reg, int selector)
316{
317 uint32_t e1, e2;
318 int rpl, dpl, cpl;
319
320#ifdef VBOX
321 e1 = e2 = 0; /* gcc warning? */
322 cpl = env->hflags & HF_CPL_MASK;
323 /* Trying to load a selector with CPL=1? */
324 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
325 {
326 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
327 selector = selector & 0xfffc;
328 }
329#endif /* VBOX */
330
331 if ((selector & 0xfffc) != 0) {
332 if (load_segment(&e1, &e2, selector) != 0)
333 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
334 if (!(e2 & DESC_S_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 rpl = selector & 3;
337 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
338 cpl = env->hflags & HF_CPL_MASK;
339 if (seg_reg == R_CS) {
340 if (!(e2 & DESC_CS_MASK))
341 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342 /* XXX: is it correct ? */
343 if (dpl != rpl)
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if ((e2 & DESC_C_MASK) && dpl > rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else if (seg_reg == R_SS) {
348 /* SS must be writable data */
349 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 if (dpl != cpl || dpl != rpl)
352 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353 } else {
354 /* not readable code */
355 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 /* if data or non conforming code, checks the rights */
358 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
359 if (dpl < cpl || dpl < rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 }
362 }
363 if (!(e2 & DESC_P_MASK))
364 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
365 cpu_x86_load_seg_cache(env, seg_reg, selector,
366 get_seg_base(e1, e2),
367 get_seg_limit(e1, e2),
368 e2);
369 } else {
370 if (seg_reg == R_SS || seg_reg == R_CS)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372#ifdef VBOX
373# if 0
374 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
375 cpu_x86_load_seg_cache(env, seg_reg, selector,
376 0, 0, 0);
377# endif
378#endif /* VBOX */
379 }
380}
381
382#define SWITCH_TSS_JMP 0
383#define SWITCH_TSS_IRET 1
384#define SWITCH_TSS_CALL 2
385
386/* XXX: restore CPU state in registers (PowerPC case) */
387static void switch_tss(int tss_selector,
388 uint32_t e1, uint32_t e2, int source,
389 uint32_t next_eip)
390{
391 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
392 target_ulong tss_base;
393 uint32_t new_regs[8], new_segs[6];
394 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
395 uint32_t old_eflags, eflags_mask;
396 SegmentCache *dt;
397#ifndef VBOX
398 int index;
399#else
400 unsigned int index;
401#endif
402 target_ulong ptr;
403
404 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
405 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
406
407 /* if task gate, we read the TSS segment and we load it */
408 if (type == 5) {
409 if (!(e2 & DESC_P_MASK))
410 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
411 tss_selector = e1 >> 16;
412 if (tss_selector & 4)
413 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
414 if (load_segment(&e1, &e2, tss_selector) != 0)
415 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
416 if (e2 & DESC_S_MASK)
417 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419 if ((type & 7) != 1)
420 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
421 }
422
423 if (!(e2 & DESC_P_MASK))
424 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
425
426 if (type & 8)
427 tss_limit_max = 103;
428 else
429 tss_limit_max = 43;
430 tss_limit = get_seg_limit(e1, e2);
431 tss_base = get_seg_base(e1, e2);
432 if ((tss_selector & 4) != 0 ||
433 tss_limit < tss_limit_max)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
436 if (old_type & 8)
437 old_tss_limit_max = 103;
438 else
439 old_tss_limit_max = 43;
440
441 /* read all the registers from the new TSS */
442 if (type & 8) {
443 /* 32 bit */
444 new_cr3 = ldl_kernel(tss_base + 0x1c);
445 new_eip = ldl_kernel(tss_base + 0x20);
446 new_eflags = ldl_kernel(tss_base + 0x24);
447 for(i = 0; i < 8; i++)
448 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
449 for(i = 0; i < 6; i++)
450 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
451 new_ldt = lduw_kernel(tss_base + 0x60);
452 new_trap = ldl_kernel(tss_base + 0x64);
453 } else {
454 /* 16 bit */
455 new_cr3 = 0;
456 new_eip = lduw_kernel(tss_base + 0x0e);
457 new_eflags = lduw_kernel(tss_base + 0x10);
458 for(i = 0; i < 8; i++)
459 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
460 for(i = 0; i < 4; i++)
461 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
462 new_ldt = lduw_kernel(tss_base + 0x2a);
463 new_segs[R_FS] = 0;
464 new_segs[R_GS] = 0;
465 new_trap = 0;
466 }
467
468 /* NOTE: we must avoid memory exceptions during the task switch,
469 so we make dummy accesses before */
470 /* XXX: it can still fail in some cases, so a bigger hack is
471 necessary to valid the TLB after having done the accesses */
472
473 v1 = ldub_kernel(env->tr.base);
474 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
475 stb_kernel(env->tr.base, v1);
476 stb_kernel(env->tr.base + old_tss_limit_max, v2);
477
478 /* clear busy bit (it is restartable) */
479 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
480 target_ulong ptr;
481 uint32_t e2;
482 ptr = env->gdt.base + (env->tr.selector & ~7);
483 e2 = ldl_kernel(ptr + 4);
484 e2 &= ~DESC_TSS_BUSY_MASK;
485 stl_kernel(ptr + 4, e2);
486 }
487 old_eflags = compute_eflags();
488 if (source == SWITCH_TSS_IRET)
489 old_eflags &= ~NT_MASK;
490
491 /* save the current state in the old TSS */
492 if (type & 8) {
493 /* 32 bit */
494 stl_kernel(env->tr.base + 0x20, next_eip);
495 stl_kernel(env->tr.base + 0x24, old_eflags);
496 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
497 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
498 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
499 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
500 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
501 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
502 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
503 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
504 for(i = 0; i < 6; i++)
505 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
506#ifdef VBOX
507 /* Must store the ldt as it gets reloaded and might have been changed. */
508 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
509#endif
510#if defined(VBOX) && defined(DEBUG)
511 printf("TSS 32 bits switch\n");
512 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
513#endif
514 } else {
515 /* 16 bit */
516 stw_kernel(env->tr.base + 0x0e, next_eip);
517 stw_kernel(env->tr.base + 0x10, old_eflags);
518 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
519 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
520 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
521 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
522 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
523 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
524 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
525 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
526 for(i = 0; i < 4; i++)
527 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
528#ifdef VBOX
529 /* Must store the ldt as it gets reloaded and might have been changed. */
530 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
531#endif
532 }
533
534 /* now if an exception occurs, it will occurs in the next task
535 context */
536
537 if (source == SWITCH_TSS_CALL) {
538 stw_kernel(tss_base, env->tr.selector);
539 new_eflags |= NT_MASK;
540 }
541
542 /* set busy bit */
543 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
544 target_ulong ptr;
545 uint32_t e2;
546 ptr = env->gdt.base + (tss_selector & ~7);
547 e2 = ldl_kernel(ptr + 4);
548 e2 |= DESC_TSS_BUSY_MASK;
549 stl_kernel(ptr + 4, e2);
550 }
551
552 /* set the new CPU state */
553 /* from this point, any exception which occurs can give problems */
554 env->cr[0] |= CR0_TS_MASK;
555 env->hflags |= HF_TS_MASK;
556 env->tr.selector = tss_selector;
557 env->tr.base = tss_base;
558 env->tr.limit = tss_limit;
559 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
560
561 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
562 cpu_x86_update_cr3(env, new_cr3);
563 }
564
565 /* load all registers without an exception, then reload them with
566 possible exception */
567 env->eip = new_eip;
568 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
569 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
570 if (!(type & 8))
571 eflags_mask &= 0xffff;
572 load_eflags(new_eflags, eflags_mask);
573 /* XXX: what to do in 16 bit case ? */
574 EAX = new_regs[0];
575 ECX = new_regs[1];
576 EDX = new_regs[2];
577 EBX = new_regs[3];
578 ESP = new_regs[4];
579 EBP = new_regs[5];
580 ESI = new_regs[6];
581 EDI = new_regs[7];
582 if (new_eflags & VM_MASK) {
583 for(i = 0; i < 6; i++)
584 load_seg_vm(i, new_segs[i]);
585 /* in vm86, CPL is always 3 */
586 cpu_x86_set_cpl(env, 3);
587 } else {
588 /* CPL is set the RPL of CS */
589 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
590 /* first just selectors as the rest may trigger exceptions */
591 for(i = 0; i < 6; i++)
592 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
593 }
594
595 env->ldt.selector = new_ldt & ~4;
596 env->ldt.base = 0;
597 env->ldt.limit = 0;
598 env->ldt.flags = 0;
599
600 /* load the LDT */
601 if (new_ldt & 4)
602 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
603
604 if ((new_ldt & 0xfffc) != 0) {
605 dt = &env->gdt;
606 index = new_ldt & ~7;
607 if ((index + 7) > dt->limit)
608 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
609 ptr = dt->base + index;
610 e1 = ldl_kernel(ptr);
611 e2 = ldl_kernel(ptr + 4);
612 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
613 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
614 if (!(e2 & DESC_P_MASK))
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616 load_seg_cache_raw_dt(&env->ldt, e1, e2);
617 }
618
619 /* load the segments */
620 if (!(new_eflags & VM_MASK)) {
621 tss_load_seg(R_CS, new_segs[R_CS]);
622 tss_load_seg(R_SS, new_segs[R_SS]);
623 tss_load_seg(R_ES, new_segs[R_ES]);
624 tss_load_seg(R_DS, new_segs[R_DS]);
625 tss_load_seg(R_FS, new_segs[R_FS]);
626 tss_load_seg(R_GS, new_segs[R_GS]);
627 }
628
629 /* check that EIP is in the CS segment limits */
630 if (new_eip > env->segs[R_CS].limit) {
631 /* XXX: different exception if CALL ? */
632 raise_exception_err(EXCP0D_GPF, 0);
633 }
634
635#ifndef CONFIG_USER_ONLY
636 /* reset local breakpoints */
637 if (env->dr[7] & 0x55) {
638 for (i = 0; i < 4; i++) {
639 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
640 hw_breakpoint_remove(env, i);
641 }
642 env->dr[7] &= ~0x55;
643 }
644#endif
645}
646
647/* check if Port I/O is allowed in TSS */
648static inline void check_io(int addr, int size)
649{
650#ifndef VBOX
651 int io_offset, val, mask;
652#else
653 int val, mask;
654 unsigned int io_offset;
655#endif /* VBOX */
656
657 /* TSS must be a valid 32 bit one */
658 if (!(env->tr.flags & DESC_P_MASK) ||
659 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
660 env->tr.limit < 103)
661 goto fail;
662 io_offset = lduw_kernel(env->tr.base + 0x66);
663 io_offset += (addr >> 3);
664 /* Note: the check needs two bytes */
665 if ((io_offset + 1) > env->tr.limit)
666 goto fail;
667 val = lduw_kernel(env->tr.base + io_offset);
668 val >>= (addr & 7);
669 mask = (1 << size) - 1;
670 /* all bits must be zero to allow the I/O */
671 if ((val & mask) != 0) {
672 fail:
673 raise_exception_err(EXCP0D_GPF, 0);
674 }
675}
676
677#ifdef VBOX
678/* Keep in sync with gen_check_external_event() */
679void helper_check_external_event()
680{
681 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
682 | CPU_INTERRUPT_EXTERNAL_TIMER
683 | CPU_INTERRUPT_EXTERNAL_DMA))
684 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
685 && (env->eflags & IF_MASK)
686 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
687 {
688 helper_external_event();
689 }
690
691}
692
693void helper_sync_seg(uint32_t reg)
694{
695 if (env->segs[reg].newselector)
696 sync_seg(env, reg, env->segs[reg].newselector);
697}
698#endif /* VBOX */
699
700void helper_check_iob(uint32_t t0)
701{
702 check_io(t0, 1);
703}
704
705void helper_check_iow(uint32_t t0)
706{
707 check_io(t0, 2);
708}
709
710void helper_check_iol(uint32_t t0)
711{
712 check_io(t0, 4);
713}
714
715void helper_outb(uint32_t port, uint32_t data)
716{
717 cpu_outb(env, port, data & 0xff);
718}
719
720target_ulong helper_inb(uint32_t port)
721{
722 return cpu_inb(env, port);
723}
724
725void helper_outw(uint32_t port, uint32_t data)
726{
727 cpu_outw(env, port, data & 0xffff);
728}
729
730target_ulong helper_inw(uint32_t port)
731{
732 return cpu_inw(env, port);
733}
734
735void helper_outl(uint32_t port, uint32_t data)
736{
737 cpu_outl(env, port, data);
738}
739
740target_ulong helper_inl(uint32_t port)
741{
742 return cpu_inl(env, port);
743}
744
745static inline unsigned int get_sp_mask(unsigned int e2)
746{
747 if (e2 & DESC_B_MASK)
748 return 0xffffffff;
749 else
750 return 0xffff;
751}
752
753#ifdef TARGET_X86_64
754#define SET_ESP(val, sp_mask)\
755do {\
756 if ((sp_mask) == 0xffff)\
757 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
758 else if ((sp_mask) == 0xffffffffLL)\
759 ESP = (uint32_t)(val);\
760 else\
761 ESP = (val);\
762} while (0)
763#else
764#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
765#endif
766
767/* in 64-bit machines, this can overflow. So this segment addition macro
768 * can be used to trim the value to 32-bit whenever needed */
769#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
770
771/* XXX: add a is_user flag to have proper security support */
772#define PUSHW(ssp, sp, sp_mask, val)\
773{\
774 sp -= 2;\
775 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
776}
777
778#define PUSHL(ssp, sp, sp_mask, val)\
779{\
780 sp -= 4;\
781 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
782}
783
784#define POPW(ssp, sp, sp_mask, val)\
785{\
786 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
787 sp += 2;\
788}
789
790#define POPL(ssp, sp, sp_mask, val)\
791{\
792 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
793 sp += 4;\
794}
795
796/* protected mode interrupt */
797static void do_interrupt_protected(int intno, int is_int, int error_code,
798 unsigned int next_eip, int is_hw)
799{
800 SegmentCache *dt;
801 target_ulong ptr, ssp;
802 int type, dpl, selector, ss_dpl, cpl;
803 int has_error_code, new_stack, shift;
804 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
805 uint32_t old_eip, sp_mask;
806
807#ifdef VBOX
808 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
809 cpu_loop_exit();
810#endif
811
812 has_error_code = 0;
813 if (!is_int && !is_hw) {
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 has_error_code = 1;
823 break;
824 }
825 }
826 if (is_int)
827 old_eip = next_eip;
828 else
829 old_eip = env->eip;
830
831 dt = &env->idt;
832#ifndef VBOX
833 if (intno * 8 + 7 > dt->limit)
834#else
835 if ((unsigned)intno * 8 + 7 > dt->limit)
836#endif
837 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
838 ptr = dt->base + intno * 8;
839 e1 = ldl_kernel(ptr);
840 e2 = ldl_kernel(ptr + 4);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch(type) {
844 case 5: /* task gate */
845 /* must do that check here to return the correct error code */
846 if (!(e2 & DESC_P_MASK))
847 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
848 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
849 if (has_error_code) {
850 int type;
851 uint32_t mask;
852 /* push the error code */
853 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
854 shift = type >> 3;
855 if (env->segs[R_SS].flags & DESC_B_MASK)
856 mask = 0xffffffff;
857 else
858 mask = 0xffff;
859 esp = (ESP - (2 << shift)) & mask;
860 ssp = env->segs[R_SS].base + esp;
861 if (shift)
862 stl_kernel(ssp, error_code);
863 else
864 stw_kernel(ssp, error_code);
865 SET_ESP(esp, mask);
866 }
867 return;
868 case 6: /* 286 interrupt gate */
869 case 7: /* 286 trap gate */
870 case 14: /* 386 interrupt gate */
871 case 15: /* 386 trap gate */
872 break;
873 default:
874 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
875 break;
876 }
877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
878 cpl = env->hflags & HF_CPL_MASK;
879 /* check privilege if software int */
880 if (is_int && dpl < cpl)
881 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
882 /* check valid bit */
883 if (!(e2 & DESC_P_MASK))
884 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
885 selector = e1 >> 16;
886 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
887 if ((selector & 0xfffc) == 0)
888 raise_exception_err(EXCP0D_GPF, 0);
889
890 if (load_segment(&e1, &e2, selector) != 0)
891 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
892 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
893 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 if (dpl > cpl)
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
899 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
900 /* to inner privilege */
901 get_ss_esp_from_tss(&ss, &esp, dpl);
902 if ((ss & 0xfffc) == 0)
903 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
904 if ((ss & 3) != dpl)
905 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
906 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
907 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
908 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
909 if (ss_dpl != dpl)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 if (!(ss_e2 & DESC_S_MASK) ||
912 (ss_e2 & DESC_CS_MASK) ||
913 !(ss_e2 & DESC_W_MASK))
914 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
915 if (!(ss_e2 & DESC_P_MASK))
916#ifdef VBOX /* See page 3-477 of 253666.pdf */
917 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
918#else
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920#endif
921 new_stack = 1;
922 sp_mask = get_sp_mask(ss_e2);
923 ssp = get_seg_base(ss_e1, ss_e2);
924#if defined(VBOX) && defined(DEBUG)
925 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
926#endif
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 sp_mask = get_sp_mask(env->segs[R_SS].flags);
933 ssp = env->segs[R_SS].base;
934 esp = ESP;
935 dpl = cpl;
936 } else {
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 new_stack = 0; /* avoid warning */
939 sp_mask = 0; /* avoid warning */
940 ssp = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
942 }
943
944 shift = type >> 3;
945
946#if 0
947 /* XXX: check that enough room is available */
948 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
949 if (env->eflags & VM_MASK)
950 push_size += 8;
951 push_size <<= shift;
952#endif
953 if (shift == 1) {
954 if (new_stack) {
955 if (env->eflags & VM_MASK) {
956 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
957 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
958 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
959 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
960 }
961 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
962 PUSHL(ssp, esp, sp_mask, ESP);
963 }
964 PUSHL(ssp, esp, sp_mask, compute_eflags());
965 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
966 PUSHL(ssp, esp, sp_mask, old_eip);
967 if (has_error_code) {
968 PUSHL(ssp, esp, sp_mask, error_code);
969 }
970 } else {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHW(ssp, esp, sp_mask, ESP);
980 }
981 PUSHW(ssp, esp, sp_mask, compute_eflags());
982 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHW(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHW(ssp, esp, sp_mask, error_code);
986 }
987 }
988
989 if (new_stack) {
990 if (env->eflags & VM_MASK) {
991 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
992 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
993 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
994 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
995 }
996 ss = (ss & ~3) | dpl;
997 cpu_x86_load_seg_cache(env, R_SS, ss,
998 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
999 }
1000 SET_ESP(esp, sp_mask);
1001
1002 selector = (selector & ~3) | dpl;
1003 cpu_x86_load_seg_cache(env, R_CS, selector,
1004 get_seg_base(e1, e2),
1005 get_seg_limit(e1, e2),
1006 e2);
1007 cpu_x86_set_cpl(env, dpl);
1008 env->eip = offset;
1009
1010 /* interrupt gate clear IF mask */
1011 if ((type & 1) == 0) {
1012 env->eflags &= ~IF_MASK;
1013 }
1014#ifndef VBOX
1015 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1016#else
1017 /*
1018 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1019 * gets confused by seemingly changed EFLAGS. See #3491 and
1020 * public bug #2341.
1021 */
1022 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1023#endif
1024}
1025
1026#ifdef VBOX
1027
1028/* check if VME interrupt redirection is enabled in TSS */
1029DECLINLINE(bool) is_vme_irq_redirected(int intno)
1030{
1031 unsigned int io_offset, intredir_offset;
1032 unsigned char val, mask;
1033
1034 /* TSS must be a valid 32 bit one */
1035 if (!(env->tr.flags & DESC_P_MASK) ||
1036 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1037 env->tr.limit < 103)
1038 goto fail;
1039 io_offset = lduw_kernel(env->tr.base + 0x66);
1040 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1041 if (io_offset < 0x68 + 0x20)
1042 io_offset = 0x68 + 0x20;
1043 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1044 intredir_offset = io_offset - 0x20;
1045
1046 intredir_offset += (intno >> 3);
1047 if ((intredir_offset) > env->tr.limit)
1048 goto fail;
1049
1050 val = ldub_kernel(env->tr.base + intredir_offset);
1051 mask = 1 << (unsigned char)(intno & 7);
1052
1053 /* bit set means no redirection. */
1054 if ((val & mask) != 0) {
1055 return false;
1056 }
1057 return true;
1058
1059fail:
1060 raise_exception_err(EXCP0D_GPF, 0);
1061 return true;
1062}
1063
1064/* V86 mode software interrupt with CR4.VME=1 */
1065static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1066{
1067 target_ulong ptr, ssp;
1068 int selector;
1069 uint32_t offset, esp;
1070 uint32_t old_cs, old_eflags;
1071 uint32_t iopl;
1072
1073 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1074
1075 if (!is_vme_irq_redirected(intno))
1076 {
1077 if (iopl == 3)
1078 {
1079 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1080 return;
1081 }
1082 else
1083 raise_exception_err(EXCP0D_GPF, 0);
1084 }
1085
1086 /* virtual mode idt is at linear address 0 */
1087 ptr = 0 + intno * 4;
1088 offset = lduw_kernel(ptr);
1089 selector = lduw_kernel(ptr + 2);
1090 esp = ESP;
1091 ssp = env->segs[R_SS].base;
1092 old_cs = env->segs[R_CS].selector;
1093
1094 old_eflags = compute_eflags();
1095 if (iopl < 3)
1096 {
1097 /* copy VIF into IF and set IOPL to 3 */
1098 if (env->eflags & VIF_MASK)
1099 old_eflags |= IF_MASK;
1100 else
1101 old_eflags &= ~IF_MASK;
1102
1103 old_eflags |= (3 << IOPL_SHIFT);
1104 }
1105
1106 /* XXX: use SS segment size ? */
1107 PUSHW(ssp, esp, 0xffff, old_eflags);
1108 PUSHW(ssp, esp, 0xffff, old_cs);
1109 PUSHW(ssp, esp, 0xffff, next_eip);
1110
1111 /* update processor state */
1112 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1113 env->eip = offset;
1114 env->segs[R_CS].selector = selector;
1115 env->segs[R_CS].base = (selector << 4);
1116 env->eflags &= ~(TF_MASK | RF_MASK);
1117
1118 if (iopl < 3)
1119 env->eflags &= ~VIF_MASK;
1120 else
1121 env->eflags &= ~IF_MASK;
1122}
1123
1124#endif /* VBOX */
1125
1126#ifdef TARGET_X86_64
1127
1128#define PUSHQ(sp, val)\
1129{\
1130 sp -= 8;\
1131 stq_kernel(sp, (val));\
1132}
1133
1134#define POPQ(sp, val)\
1135{\
1136 val = ldq_kernel(sp);\
1137 sp += 8;\
1138}
1139
1140static inline target_ulong get_rsp_from_tss(int level)
1141{
1142 int index;
1143
1144#if 0
1145 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1146 env->tr.base, env->tr.limit);
1147#endif
1148
1149 if (!(env->tr.flags & DESC_P_MASK))
1150 cpu_abort(env, "invalid tss");
1151 index = 8 * level + 4;
1152 if ((index + 7) > env->tr.limit)
1153 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1154 return ldq_kernel(env->tr.base + index);
1155}
1156
1157/* 64 bit interrupt */
1158static void do_interrupt64(int intno, int is_int, int error_code,
1159 target_ulong next_eip, int is_hw)
1160{
1161 SegmentCache *dt;
1162 target_ulong ptr;
1163 int type, dpl, selector, cpl, ist;
1164 int has_error_code, new_stack;
1165 uint32_t e1, e2, e3, ss;
1166 target_ulong old_eip, esp, offset;
1167
1168#ifdef VBOX
1169 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1170 cpu_loop_exit();
1171#endif
1172
1173 has_error_code = 0;
1174 if (!is_int && !is_hw) {
1175 switch(intno) {
1176 case 8:
1177 case 10:
1178 case 11:
1179 case 12:
1180 case 13:
1181 case 14:
1182 case 17:
1183 has_error_code = 1;
1184 break;
1185 }
1186 }
1187 if (is_int)
1188 old_eip = next_eip;
1189 else
1190 old_eip = env->eip;
1191
1192 dt = &env->idt;
1193 if (intno * 16 + 15 > dt->limit)
1194 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1195 ptr = dt->base + intno * 16;
1196 e1 = ldl_kernel(ptr);
1197 e2 = ldl_kernel(ptr + 4);
1198 e3 = ldl_kernel(ptr + 8);
1199 /* check gate type */
1200 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1201 switch(type) {
1202 case 14: /* 386 interrupt gate */
1203 case 15: /* 386 trap gate */
1204 break;
1205 default:
1206 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1207 break;
1208 }
1209 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1210 cpl = env->hflags & HF_CPL_MASK;
1211 /* check privilege if software int */
1212 if (is_int && dpl < cpl)
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 /* check valid bit */
1215 if (!(e2 & DESC_P_MASK))
1216 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1217 selector = e1 >> 16;
1218 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1219 ist = e2 & 7;
1220 if ((selector & 0xfffc) == 0)
1221 raise_exception_err(EXCP0D_GPF, 0);
1222
1223 if (load_segment(&e1, &e2, selector) != 0)
1224 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1225 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1226 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1228 if (dpl > cpl)
1229 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1230 if (!(e2 & DESC_P_MASK))
1231 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1232 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1234 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1235 /* to inner privilege */
1236 if (ist != 0)
1237 esp = get_rsp_from_tss(ist + 3);
1238 else
1239 esp = get_rsp_from_tss(dpl);
1240 esp &= ~0xfLL; /* align stack */
1241 ss = 0;
1242 new_stack = 1;
1243 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1244 /* to same privilege */
1245 if (env->eflags & VM_MASK)
1246 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1247 new_stack = 0;
1248 if (ist != 0)
1249 esp = get_rsp_from_tss(ist + 3);
1250 else
1251 esp = ESP;
1252 esp &= ~0xfLL; /* align stack */
1253 dpl = cpl;
1254 } else {
1255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1256 new_stack = 0; /* avoid warning */
1257 esp = 0; /* avoid warning */
1258 }
1259
1260 PUSHQ(esp, env->segs[R_SS].selector);
1261 PUSHQ(esp, ESP);
1262 PUSHQ(esp, compute_eflags());
1263 PUSHQ(esp, env->segs[R_CS].selector);
1264 PUSHQ(esp, old_eip);
1265 if (has_error_code) {
1266 PUSHQ(esp, error_code);
1267 }
1268
1269 if (new_stack) {
1270 ss = 0 | dpl;
1271 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1272 }
1273 ESP = esp;
1274
1275 selector = (selector & ~3) | dpl;
1276 cpu_x86_load_seg_cache(env, R_CS, selector,
1277 get_seg_base(e1, e2),
1278 get_seg_limit(e1, e2),
1279 e2);
1280 cpu_x86_set_cpl(env, dpl);
1281 env->eip = offset;
1282
1283 /* interrupt gate clear IF mask */
1284 if ((type & 1) == 0) {
1285 env->eflags &= ~IF_MASK;
1286 }
1287#ifndef VBOX
1288 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1289#else /* VBOX */
1290 /*
1291 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1292 * gets confused by seemingly changed EFLAGS. See #3491 and
1293 * public bug #2341.
1294 */
1295 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1296#endif /* VBOX */
1297}
1298#endif
1299
1300#ifdef TARGET_X86_64
1301#if defined(CONFIG_USER_ONLY)
1302void helper_syscall(int next_eip_addend)
1303{
1304 env->exception_index = EXCP_SYSCALL;
1305 env->exception_next_eip = env->eip + next_eip_addend;
1306 cpu_loop_exit();
1307}
1308#else
1309void helper_syscall(int next_eip_addend)
1310{
1311 int selector;
1312
1313 if (!(env->efer & MSR_EFER_SCE)) {
1314 raise_exception_err(EXCP06_ILLOP, 0);
1315 }
1316 selector = (env->star >> 32) & 0xffff;
1317 if (env->hflags & HF_LMA_MASK) {
1318 int code64;
1319
1320 ECX = env->eip + next_eip_addend;
1321 env->regs[11] = compute_eflags();
1322
1323 code64 = env->hflags & HF_CS64_MASK;
1324
1325 cpu_x86_set_cpl(env, 0);
1326 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1327 0, 0xffffffff,
1328 DESC_G_MASK | DESC_P_MASK |
1329 DESC_S_MASK |
1330 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1331 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1332 0, 0xffffffff,
1333 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1334 DESC_S_MASK |
1335 DESC_W_MASK | DESC_A_MASK);
1336 env->eflags &= ~env->fmask;
1337 load_eflags(env->eflags, 0);
1338 if (code64)
1339 env->eip = env->lstar;
1340 else
1341 env->eip = env->cstar;
1342 } else {
1343 ECX = (uint32_t)(env->eip + next_eip_addend);
1344
1345 cpu_x86_set_cpl(env, 0);
1346 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1347 0, 0xffffffff,
1348 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1349 DESC_S_MASK |
1350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1351 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1352 0, 0xffffffff,
1353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1354 DESC_S_MASK |
1355 DESC_W_MASK | DESC_A_MASK);
1356 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1357 env->eip = (uint32_t)env->star;
1358 }
1359}
1360#endif
1361#endif
1362
1363#ifdef TARGET_X86_64
1364void helper_sysret(int dflag)
1365{
1366 int cpl, selector;
1367
1368 if (!(env->efer & MSR_EFER_SCE)) {
1369 raise_exception_err(EXCP06_ILLOP, 0);
1370 }
1371 cpl = env->hflags & HF_CPL_MASK;
1372 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1373 raise_exception_err(EXCP0D_GPF, 0);
1374 }
1375 selector = (env->star >> 48) & 0xffff;
1376 if (env->hflags & HF_LMA_MASK) {
1377 if (dflag == 2) {
1378 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1379 0, 0xffffffff,
1380 DESC_G_MASK | DESC_P_MASK |
1381 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1382 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1383 DESC_L_MASK);
1384 env->eip = ECX;
1385 } else {
1386 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1387 0, 0xffffffff,
1388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1389 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1390 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1391 env->eip = (uint32_t)ECX;
1392 }
1393 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1394 0, 0xffffffff,
1395 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1396 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1397 DESC_W_MASK | DESC_A_MASK);
1398 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1399 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1400 cpu_x86_set_cpl(env, 3);
1401 } else {
1402 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1403 0, 0xffffffff,
1404 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1405 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1406 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1407 env->eip = (uint32_t)ECX;
1408 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1409 0, 0xffffffff,
1410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1411 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1412 DESC_W_MASK | DESC_A_MASK);
1413 env->eflags |= IF_MASK;
1414 cpu_x86_set_cpl(env, 3);
1415 }
1416#ifdef USE_KQEMU
1417 if (kqemu_is_ok(env)) {
1418 if (env->hflags & HF_LMA_MASK)
1419 CC_OP = CC_OP_EFLAGS;
1420 env->exception_index = -1;
1421 cpu_loop_exit();
1422 }
1423#endif
1424}
1425#endif
1426
1427#ifdef VBOX
1428/**
1429 * Checks and processes external VMM events.
1430 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1431 */
1432void helper_external_event(void)
1433{
1434# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1435 uintptr_t uSP;
1436# ifdef RT_ARCH_AMD64
1437 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1438# else
1439 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1440# endif
1441 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1442# endif
1443 /* Keep in sync with flags checked by gen_check_external_event() */
1444 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1445 {
1446 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1447 ~CPU_INTERRUPT_EXTERNAL_HARD);
1448 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1449 }
1450 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1451 {
1452 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1453 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1454 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1455 }
1456 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1457 {
1458 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1459 ~CPU_INTERRUPT_EXTERNAL_DMA);
1460 remR3DmaRun(env);
1461 }
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1466 remR3TimersRun(env);
1467 }
1468}
1469/* helper for recording call instruction addresses for later scanning */
1470void helper_record_call()
1471{
1472 if ( !(env->state & CPU_RAW_RING0)
1473 && (env->cr[0] & CR0_PG_MASK)
1474 && !(env->eflags & X86_EFL_IF))
1475 remR3RecordCall(env);
1476}
1477#endif /* VBOX */
1478
1479/* real mode interrupt */
1480static void do_interrupt_real(int intno, int is_int, int error_code,
1481 unsigned int next_eip)
1482{
1483 SegmentCache *dt;
1484 target_ulong ptr, ssp;
1485 int selector;
1486 uint32_t offset, esp;
1487 uint32_t old_cs, old_eip;
1488
1489 /* real mode (simpler !) */
1490 dt = &env->idt;
1491#ifndef VBOX
1492 if (intno * 4 + 3 > dt->limit)
1493#else
1494 if ((unsigned)intno * 4 + 3 > dt->limit)
1495#endif
1496 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1497 ptr = dt->base + intno * 4;
1498 offset = lduw_kernel(ptr);
1499 selector = lduw_kernel(ptr + 2);
1500 esp = ESP;
1501 ssp = env->segs[R_SS].base;
1502 if (is_int)
1503 old_eip = next_eip;
1504 else
1505 old_eip = env->eip;
1506 old_cs = env->segs[R_CS].selector;
1507 /* XXX: use SS segment size ? */
1508 PUSHW(ssp, esp, 0xffff, compute_eflags());
1509 PUSHW(ssp, esp, 0xffff, old_cs);
1510 PUSHW(ssp, esp, 0xffff, old_eip);
1511
1512 /* update processor state */
1513 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1514 env->eip = offset;
1515 env->segs[R_CS].selector = selector;
1516 env->segs[R_CS].base = (selector << 4);
1517 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1518}
1519
1520/* fake user mode interrupt */
1521void do_interrupt_user(int intno, int is_int, int error_code,
1522 target_ulong next_eip)
1523{
1524 SegmentCache *dt;
1525 target_ulong ptr;
1526 int dpl, cpl, shift;
1527 uint32_t e2;
1528
1529 dt = &env->idt;
1530 if (env->hflags & HF_LMA_MASK) {
1531 shift = 4;
1532 } else {
1533 shift = 3;
1534 }
1535 ptr = dt->base + (intno << shift);
1536 e2 = ldl_kernel(ptr + 4);
1537
1538 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1539 cpl = env->hflags & HF_CPL_MASK;
1540 /* check privilege if software int */
1541 if (is_int && dpl < cpl)
1542 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1543
1544 /* Since we emulate only user space, we cannot do more than
1545 exiting the emulation with the suitable exception and error
1546 code */
1547 if (is_int)
1548 EIP = next_eip;
1549}
1550
1551/*
1552 * Begin execution of an interruption. is_int is TRUE if coming from
1553 * the int instruction. next_eip is the EIP value AFTER the interrupt
1554 * instruction. It is only relevant if is_int is TRUE.
1555 */
1556void do_interrupt(int intno, int is_int, int error_code,
1557 target_ulong next_eip, int is_hw)
1558{
1559 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1560 if ((env->cr[0] & CR0_PE_MASK)) {
1561 static int count;
1562 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1563 count, intno, error_code, is_int,
1564 env->hflags & HF_CPL_MASK,
1565 env->segs[R_CS].selector, EIP,
1566 (int)env->segs[R_CS].base + EIP,
1567 env->segs[R_SS].selector, ESP);
1568 if (intno == 0x0e) {
1569 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1570 } else {
1571 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1572 }
1573 qemu_log("\n");
1574 log_cpu_state(env, X86_DUMP_CCOP);
1575#if 0
1576 {
1577 int i;
1578 uint8_t *ptr;
1579 qemu_log(" code=");
1580 ptr = env->segs[R_CS].base + env->eip;
1581 for(i = 0; i < 16; i++) {
1582 qemu_log(" %02x", ldub(ptr + i));
1583 }
1584 qemu_log("\n");
1585 }
1586#endif
1587 count++;
1588 }
1589 }
1590#ifdef VBOX
1591 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1592 if (is_int) {
1593 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1594 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1595 } else {
1596 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1597 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1598 }
1599 }
1600#endif
1601 if (env->cr[0] & CR0_PE_MASK) {
1602#ifdef TARGET_X86_64
1603 if (env->hflags & HF_LMA_MASK) {
1604 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1605 } else
1606#endif
1607 {
1608#ifdef VBOX
1609 /* int xx *, v86 code and VME enabled? */
1610 if ( (env->eflags & VM_MASK)
1611 && (env->cr[4] & CR4_VME_MASK)
1612 && is_int
1613 && !is_hw
1614 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1615 )
1616 do_soft_interrupt_vme(intno, error_code, next_eip);
1617 else
1618#endif /* VBOX */
1619 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1620 }
1621 } else {
1622 do_interrupt_real(intno, is_int, error_code, next_eip);
1623 }
1624}
1625
1626/* This should come from sysemu.h - if we could include it here... */
1627void qemu_system_reset_request(void);
1628
1629/*
1630 * Check nested exceptions and change to double or triple fault if
1631 * needed. It should only be called, if this is not an interrupt.
1632 * Returns the new exception number.
1633 */
1634static int check_exception(int intno, int *error_code)
1635{
1636 int first_contributory = env->old_exception == 0 ||
1637 (env->old_exception >= 10 &&
1638 env->old_exception <= 13);
1639 int second_contributory = intno == 0 ||
1640 (intno >= 10 && intno <= 13);
1641
1642 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1643 env->old_exception, intno);
1644
1645#if !defined(CONFIG_USER_ONLY)
1646 if (env->old_exception == EXCP08_DBLE) {
1647 if (env->hflags & HF_SVMI_MASK)
1648 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1649
1650 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1651
1652# ifndef VBOX
1653 qemu_system_reset_request();
1654# else
1655 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1656# endif
1657 return EXCP_HLT;
1658 }
1659#endif
1660
1661 if ((first_contributory && second_contributory)
1662 || (env->old_exception == EXCP0E_PAGE &&
1663 (second_contributory || (intno == EXCP0E_PAGE)))) {
1664 intno = EXCP08_DBLE;
1665 *error_code = 0;
1666 }
1667
1668 if (second_contributory || (intno == EXCP0E_PAGE) ||
1669 (intno == EXCP08_DBLE))
1670 env->old_exception = intno;
1671
1672 return intno;
1673}
1674
1675/*
1676 * Signal an interruption. It is executed in the main CPU loop.
1677 * is_int is TRUE if coming from the int instruction. next_eip is the
1678 * EIP value AFTER the interrupt instruction. It is only relevant if
1679 * is_int is TRUE.
1680 */
1681static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1682 int next_eip_addend)
1683{
1684#if defined(VBOX) && defined(DEBUG)
1685 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1686#endif
1687 if (!is_int) {
1688 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1689 intno = check_exception(intno, &error_code);
1690 } else {
1691 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1692 }
1693
1694 env->exception_index = intno;
1695 env->error_code = error_code;
1696 env->exception_is_int = is_int;
1697 env->exception_next_eip = env->eip + next_eip_addend;
1698 cpu_loop_exit();
1699}
1700
1701/* shortcuts to generate exceptions */
1702
1703void raise_exception_err(int exception_index, int error_code)
1704{
1705 raise_interrupt(exception_index, 0, error_code, 0);
1706}
1707
1708void raise_exception(int exception_index)
1709{
1710 raise_interrupt(exception_index, 0, 0, 0);
1711}
1712
1713/* SMM support */
1714
1715#if defined(CONFIG_USER_ONLY)
1716
1717void do_smm_enter(void)
1718{
1719}
1720
1721void helper_rsm(void)
1722{
1723}
1724
1725#else
1726
1727#ifdef TARGET_X86_64
1728#define SMM_REVISION_ID 0x00020064
1729#else
1730#define SMM_REVISION_ID 0x00020000
1731#endif
1732
1733void do_smm_enter(void)
1734{
1735 target_ulong sm_state;
1736 SegmentCache *dt;
1737 int i, offset;
1738
1739 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1740 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1741
1742 env->hflags |= HF_SMM_MASK;
1743 cpu_smm_update(env);
1744
1745 sm_state = env->smbase + 0x8000;
1746
1747#ifdef TARGET_X86_64
1748 for(i = 0; i < 6; i++) {
1749 dt = &env->segs[i];
1750 offset = 0x7e00 + i * 16;
1751 stw_phys(sm_state + offset, dt->selector);
1752 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1753 stl_phys(sm_state + offset + 4, dt->limit);
1754 stq_phys(sm_state + offset + 8, dt->base);
1755 }
1756
1757 stq_phys(sm_state + 0x7e68, env->gdt.base);
1758 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1759
1760 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1761 stq_phys(sm_state + 0x7e78, env->ldt.base);
1762 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1763 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1764
1765 stq_phys(sm_state + 0x7e88, env->idt.base);
1766 stl_phys(sm_state + 0x7e84, env->idt.limit);
1767
1768 stw_phys(sm_state + 0x7e90, env->tr.selector);
1769 stq_phys(sm_state + 0x7e98, env->tr.base);
1770 stl_phys(sm_state + 0x7e94, env->tr.limit);
1771 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1772
1773 stq_phys(sm_state + 0x7ed0, env->efer);
1774
1775 stq_phys(sm_state + 0x7ff8, EAX);
1776 stq_phys(sm_state + 0x7ff0, ECX);
1777 stq_phys(sm_state + 0x7fe8, EDX);
1778 stq_phys(sm_state + 0x7fe0, EBX);
1779 stq_phys(sm_state + 0x7fd8, ESP);
1780 stq_phys(sm_state + 0x7fd0, EBP);
1781 stq_phys(sm_state + 0x7fc8, ESI);
1782 stq_phys(sm_state + 0x7fc0, EDI);
1783 for(i = 8; i < 16; i++)
1784 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1785 stq_phys(sm_state + 0x7f78, env->eip);
1786 stl_phys(sm_state + 0x7f70, compute_eflags());
1787 stl_phys(sm_state + 0x7f68, env->dr[6]);
1788 stl_phys(sm_state + 0x7f60, env->dr[7]);
1789
1790 stl_phys(sm_state + 0x7f48, env->cr[4]);
1791 stl_phys(sm_state + 0x7f50, env->cr[3]);
1792 stl_phys(sm_state + 0x7f58, env->cr[0]);
1793
1794 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1795 stl_phys(sm_state + 0x7f00, env->smbase);
1796#else
1797 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1798 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1799 stl_phys(sm_state + 0x7ff4, compute_eflags());
1800 stl_phys(sm_state + 0x7ff0, env->eip);
1801 stl_phys(sm_state + 0x7fec, EDI);
1802 stl_phys(sm_state + 0x7fe8, ESI);
1803 stl_phys(sm_state + 0x7fe4, EBP);
1804 stl_phys(sm_state + 0x7fe0, ESP);
1805 stl_phys(sm_state + 0x7fdc, EBX);
1806 stl_phys(sm_state + 0x7fd8, EDX);
1807 stl_phys(sm_state + 0x7fd4, ECX);
1808 stl_phys(sm_state + 0x7fd0, EAX);
1809 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1810 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1811
1812 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1813 stl_phys(sm_state + 0x7f64, env->tr.base);
1814 stl_phys(sm_state + 0x7f60, env->tr.limit);
1815 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1816
1817 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1818 stl_phys(sm_state + 0x7f80, env->ldt.base);
1819 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1820 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1821
1822 stl_phys(sm_state + 0x7f74, env->gdt.base);
1823 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1824
1825 stl_phys(sm_state + 0x7f58, env->idt.base);
1826 stl_phys(sm_state + 0x7f54, env->idt.limit);
1827
1828 for(i = 0; i < 6; i++) {
1829 dt = &env->segs[i];
1830 if (i < 3)
1831 offset = 0x7f84 + i * 12;
1832 else
1833 offset = 0x7f2c + (i - 3) * 12;
1834 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1835 stl_phys(sm_state + offset + 8, dt->base);
1836 stl_phys(sm_state + offset + 4, dt->limit);
1837 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1838 }
1839 stl_phys(sm_state + 0x7f14, env->cr[4]);
1840
1841 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1842 stl_phys(sm_state + 0x7ef8, env->smbase);
1843#endif
1844 /* init SMM cpu state */
1845
1846#ifdef TARGET_X86_64
1847 cpu_load_efer(env, 0);
1848#endif
1849 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1850 env->eip = 0x00008000;
1851 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1852 0xffffffff, 0);
1853 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1854 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1855 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1856 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1857 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1858
1859 cpu_x86_update_cr0(env,
1860 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1861 cpu_x86_update_cr4(env, 0);
1862 env->dr[7] = 0x00000400;
1863 CC_OP = CC_OP_EFLAGS;
1864}
1865
1866void helper_rsm(void)
1867{
1868#ifdef VBOX
1869 cpu_abort(env, "helper_rsm");
1870#else /* !VBOX */
1871 target_ulong sm_state;
1872 int i, offset;
1873 uint32_t val;
1874
1875 sm_state = env->smbase + 0x8000;
1876#ifdef TARGET_X86_64
1877 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1878
1879 for(i = 0; i < 6; i++) {
1880 offset = 0x7e00 + i * 16;
1881 cpu_x86_load_seg_cache(env, i,
1882 lduw_phys(sm_state + offset),
1883 ldq_phys(sm_state + offset + 8),
1884 ldl_phys(sm_state + offset + 4),
1885 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1886 }
1887
1888 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1889 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1890
1891 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1892 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1893 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1894 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1895
1896 env->idt.base = ldq_phys(sm_state + 0x7e88);
1897 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1898
1899 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1900 env->tr.base = ldq_phys(sm_state + 0x7e98);
1901 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1902 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1903
1904 EAX = ldq_phys(sm_state + 0x7ff8);
1905 ECX = ldq_phys(sm_state + 0x7ff0);
1906 EDX = ldq_phys(sm_state + 0x7fe8);
1907 EBX = ldq_phys(sm_state + 0x7fe0);
1908 ESP = ldq_phys(sm_state + 0x7fd8);
1909 EBP = ldq_phys(sm_state + 0x7fd0);
1910 ESI = ldq_phys(sm_state + 0x7fc8);
1911 EDI = ldq_phys(sm_state + 0x7fc0);
1912 for(i = 8; i < 16; i++)
1913 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1914 env->eip = ldq_phys(sm_state + 0x7f78);
1915 load_eflags(ldl_phys(sm_state + 0x7f70),
1916 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1917 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1918 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1919
1920 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1921 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1922 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1923
1924 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1925 if (val & 0x20000) {
1926 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1927 }
1928#else
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1930 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1931 load_eflags(ldl_phys(sm_state + 0x7ff4),
1932 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1933 env->eip = ldl_phys(sm_state + 0x7ff0);
1934 EDI = ldl_phys(sm_state + 0x7fec);
1935 ESI = ldl_phys(sm_state + 0x7fe8);
1936 EBP = ldl_phys(sm_state + 0x7fe4);
1937 ESP = ldl_phys(sm_state + 0x7fe0);
1938 EBX = ldl_phys(sm_state + 0x7fdc);
1939 EDX = ldl_phys(sm_state + 0x7fd8);
1940 ECX = ldl_phys(sm_state + 0x7fd4);
1941 EAX = ldl_phys(sm_state + 0x7fd0);
1942 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1943 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1944
1945 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1946 env->tr.base = ldl_phys(sm_state + 0x7f64);
1947 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1948 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1949
1950 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1951 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1952 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1953 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1954
1955 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1956 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1957
1958 env->idt.base = ldl_phys(sm_state + 0x7f58);
1959 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1960
1961 for(i = 0; i < 6; i++) {
1962 if (i < 3)
1963 offset = 0x7f84 + i * 12;
1964 else
1965 offset = 0x7f2c + (i - 3) * 12;
1966 cpu_x86_load_seg_cache(env, i,
1967 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1968 ldl_phys(sm_state + offset + 8),
1969 ldl_phys(sm_state + offset + 4),
1970 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1971 }
1972 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1973
1974 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1975 if (val & 0x20000) {
1976 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1977 }
1978#endif
1979 CC_OP = CC_OP_EFLAGS;
1980 env->hflags &= ~HF_SMM_MASK;
1981 cpu_smm_update(env);
1982
1983 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1984 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1985#endif /* !VBOX */
1986}
1987
1988#endif /* !CONFIG_USER_ONLY */
1989
1990
1991/* division, flags are undefined */
1992
1993void helper_divb_AL(target_ulong t0)
1994{
1995 unsigned int num, den, q, r;
1996
1997 num = (EAX & 0xffff);
1998 den = (t0 & 0xff);
1999 if (den == 0) {
2000 raise_exception(EXCP00_DIVZ);
2001 }
2002 q = (num / den);
2003 if (q > 0xff)
2004 raise_exception(EXCP00_DIVZ);
2005 q &= 0xff;
2006 r = (num % den) & 0xff;
2007 EAX = (EAX & ~0xffff) | (r << 8) | q;
2008}
2009
2010void helper_idivb_AL(target_ulong t0)
2011{
2012 int num, den, q, r;
2013
2014 num = (int16_t)EAX;
2015 den = (int8_t)t0;
2016 if (den == 0) {
2017 raise_exception(EXCP00_DIVZ);
2018 }
2019 q = (num / den);
2020 if (q != (int8_t)q)
2021 raise_exception(EXCP00_DIVZ);
2022 q &= 0xff;
2023 r = (num % den) & 0xff;
2024 EAX = (EAX & ~0xffff) | (r << 8) | q;
2025}
2026
2027void helper_divw_AX(target_ulong t0)
2028{
2029 unsigned int num, den, q, r;
2030
2031 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2032 den = (t0 & 0xffff);
2033 if (den == 0) {
2034 raise_exception(EXCP00_DIVZ);
2035 }
2036 q = (num / den);
2037 if (q > 0xffff)
2038 raise_exception(EXCP00_DIVZ);
2039 q &= 0xffff;
2040 r = (num % den) & 0xffff;
2041 EAX = (EAX & ~0xffff) | q;
2042 EDX = (EDX & ~0xffff) | r;
2043}
2044
2045void helper_idivw_AX(target_ulong t0)
2046{
2047 int num, den, q, r;
2048
2049 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2050 den = (int16_t)t0;
2051 if (den == 0) {
2052 raise_exception(EXCP00_DIVZ);
2053 }
2054 q = (num / den);
2055 if (q != (int16_t)q)
2056 raise_exception(EXCP00_DIVZ);
2057 q &= 0xffff;
2058 r = (num % den) & 0xffff;
2059 EAX = (EAX & ~0xffff) | q;
2060 EDX = (EDX & ~0xffff) | r;
2061}
2062
2063void helper_divl_EAX(target_ulong t0)
2064{
2065 unsigned int den, r;
2066 uint64_t num, q;
2067
2068 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2069 den = t0;
2070 if (den == 0) {
2071 raise_exception(EXCP00_DIVZ);
2072 }
2073 q = (num / den);
2074 r = (num % den);
2075 if (q > 0xffffffff)
2076 raise_exception(EXCP00_DIVZ);
2077 EAX = (uint32_t)q;
2078 EDX = (uint32_t)r;
2079}
2080
2081void helper_idivl_EAX(target_ulong t0)
2082{
2083 int den, r;
2084 int64_t num, q;
2085
2086 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2087 den = t0;
2088 if (den == 0) {
2089 raise_exception(EXCP00_DIVZ);
2090 }
2091 q = (num / den);
2092 r = (num % den);
2093 if (q != (int32_t)q)
2094 raise_exception(EXCP00_DIVZ);
2095 EAX = (uint32_t)q;
2096 EDX = (uint32_t)r;
2097}
2098
2099/* bcd */
2100
2101/* XXX: exception */
2102void helper_aam(int base)
2103{
2104 int al, ah;
2105 al = EAX & 0xff;
2106 ah = al / base;
2107 al = al % base;
2108 EAX = (EAX & ~0xffff) | al | (ah << 8);
2109 CC_DST = al;
2110}
2111
2112void helper_aad(int base)
2113{
2114 int al, ah;
2115 al = EAX & 0xff;
2116 ah = (EAX >> 8) & 0xff;
2117 al = ((ah * base) + al) & 0xff;
2118 EAX = (EAX & ~0xffff) | al;
2119 CC_DST = al;
2120}
2121
2122void helper_aaa(void)
2123{
2124 int icarry;
2125 int al, ah, af;
2126 int eflags;
2127
2128 eflags = helper_cc_compute_all(CC_OP);
2129 af = eflags & CC_A;
2130 al = EAX & 0xff;
2131 ah = (EAX >> 8) & 0xff;
2132
2133 icarry = (al > 0xf9);
2134 if (((al & 0x0f) > 9 ) || af) {
2135 al = (al + 6) & 0x0f;
2136 ah = (ah + 1 + icarry) & 0xff;
2137 eflags |= CC_C | CC_A;
2138 } else {
2139 eflags &= ~(CC_C | CC_A);
2140 al &= 0x0f;
2141 }
2142 EAX = (EAX & ~0xffff) | al | (ah << 8);
2143 CC_SRC = eflags;
2144}
2145
2146void helper_aas(void)
2147{
2148 int icarry;
2149 int al, ah, af;
2150 int eflags;
2151
2152 eflags = helper_cc_compute_all(CC_OP);
2153 af = eflags & CC_A;
2154 al = EAX & 0xff;
2155 ah = (EAX >> 8) & 0xff;
2156
2157 icarry = (al < 6);
2158 if (((al & 0x0f) > 9 ) || af) {
2159 al = (al - 6) & 0x0f;
2160 ah = (ah - 1 - icarry) & 0xff;
2161 eflags |= CC_C | CC_A;
2162 } else {
2163 eflags &= ~(CC_C | CC_A);
2164 al &= 0x0f;
2165 }
2166 EAX = (EAX & ~0xffff) | al | (ah << 8);
2167 CC_SRC = eflags;
2168}
2169
2170void helper_daa(void)
2171{
2172 int al, af, cf;
2173 int eflags;
2174
2175 eflags = helper_cc_compute_all(CC_OP);
2176 cf = eflags & CC_C;
2177 af = eflags & CC_A;
2178 al = EAX & 0xff;
2179
2180 eflags = 0;
2181 if (((al & 0x0f) > 9 ) || af) {
2182 al = (al + 6) & 0xff;
2183 eflags |= CC_A;
2184 }
2185 if ((al > 0x9f) || cf) {
2186 al = (al + 0x60) & 0xff;
2187 eflags |= CC_C;
2188 }
2189 EAX = (EAX & ~0xff) | al;
2190 /* well, speed is not an issue here, so we compute the flags by hand */
2191 eflags |= (al == 0) << 6; /* zf */
2192 eflags |= parity_table[al]; /* pf */
2193 eflags |= (al & 0x80); /* sf */
2194 CC_SRC = eflags;
2195}
2196
2197void helper_das(void)
2198{
2199 int al, al1, af, cf;
2200 int eflags;
2201
2202 eflags = helper_cc_compute_all(CC_OP);
2203 cf = eflags & CC_C;
2204 af = eflags & CC_A;
2205 al = EAX & 0xff;
2206
2207 eflags = 0;
2208 al1 = al;
2209 if (((al & 0x0f) > 9 ) || af) {
2210 eflags |= CC_A;
2211 if (al < 6 || cf)
2212 eflags |= CC_C;
2213 al = (al - 6) & 0xff;
2214 }
2215 if ((al1 > 0x99) || cf) {
2216 al = (al - 0x60) & 0xff;
2217 eflags |= CC_C;
2218 }
2219 EAX = (EAX & ~0xff) | al;
2220 /* well, speed is not an issue here, so we compute the flags by hand */
2221 eflags |= (al == 0) << 6; /* zf */
2222 eflags |= parity_table[al]; /* pf */
2223 eflags |= (al & 0x80); /* sf */
2224 CC_SRC = eflags;
2225}
2226
2227void helper_into(int next_eip_addend)
2228{
2229 int eflags;
2230 eflags = helper_cc_compute_all(CC_OP);
2231 if (eflags & CC_O) {
2232 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2233 }
2234}
2235
2236void helper_cmpxchg8b(target_ulong a0)
2237{
2238 uint64_t d;
2239 int eflags;
2240
2241 eflags = helper_cc_compute_all(CC_OP);
2242 d = ldq(a0);
2243 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2244 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2245 eflags |= CC_Z;
2246 } else {
2247 /* always do the store */
2248 stq(a0, d);
2249 EDX = (uint32_t)(d >> 32);
2250 EAX = (uint32_t)d;
2251 eflags &= ~CC_Z;
2252 }
2253 CC_SRC = eflags;
2254}
2255
2256#ifdef TARGET_X86_64
2257void helper_cmpxchg16b(target_ulong a0)
2258{
2259 uint64_t d0, d1;
2260 int eflags;
2261
2262 if ((a0 & 0xf) != 0)
2263 raise_exception(EXCP0D_GPF);
2264 eflags = helper_cc_compute_all(CC_OP);
2265 d0 = ldq(a0);
2266 d1 = ldq(a0 + 8);
2267 if (d0 == EAX && d1 == EDX) {
2268 stq(a0, EBX);
2269 stq(a0 + 8, ECX);
2270 eflags |= CC_Z;
2271 } else {
2272 /* always do the store */
2273 stq(a0, d0);
2274 stq(a0 + 8, d1);
2275 EDX = d1;
2276 EAX = d0;
2277 eflags &= ~CC_Z;
2278 }
2279 CC_SRC = eflags;
2280}
2281#endif
2282
2283void helper_single_step(void)
2284{
2285#ifndef CONFIG_USER_ONLY
2286 check_hw_breakpoints(env, 1);
2287 env->dr[6] |= DR6_BS;
2288#endif
2289 raise_exception(EXCP01_DB);
2290}
2291
2292void helper_cpuid(void)
2293{
2294 uint32_t eax, ebx, ecx, edx;
2295
2296 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2297
2298 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2299 EAX = eax;
2300 EBX = ebx;
2301 ECX = ecx;
2302 EDX = edx;
2303}
2304
2305void helper_enter_level(int level, int data32, target_ulong t1)
2306{
2307 target_ulong ssp;
2308 uint32_t esp_mask, esp, ebp;
2309
2310 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2311 ssp = env->segs[R_SS].base;
2312 ebp = EBP;
2313 esp = ESP;
2314 if (data32) {
2315 /* 32 bit */
2316 esp -= 4;
2317 while (--level) {
2318 esp -= 4;
2319 ebp -= 4;
2320 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2321 }
2322 esp -= 4;
2323 stl(ssp + (esp & esp_mask), t1);
2324 } else {
2325 /* 16 bit */
2326 esp -= 2;
2327 while (--level) {
2328 esp -= 2;
2329 ebp -= 2;
2330 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2331 }
2332 esp -= 2;
2333 stw(ssp + (esp & esp_mask), t1);
2334 }
2335}
2336
2337#ifdef TARGET_X86_64
2338void helper_enter64_level(int level, int data64, target_ulong t1)
2339{
2340 target_ulong esp, ebp;
2341 ebp = EBP;
2342 esp = ESP;
2343
2344 if (data64) {
2345 /* 64 bit */
2346 esp -= 8;
2347 while (--level) {
2348 esp -= 8;
2349 ebp -= 8;
2350 stq(esp, ldq(ebp));
2351 }
2352 esp -= 8;
2353 stq(esp, t1);
2354 } else {
2355 /* 16 bit */
2356 esp -= 2;
2357 while (--level) {
2358 esp -= 2;
2359 ebp -= 2;
2360 stw(esp, lduw(ebp));
2361 }
2362 esp -= 2;
2363 stw(esp, t1);
2364 }
2365}
2366#endif
2367
2368void helper_lldt(int selector)
2369{
2370 SegmentCache *dt;
2371 uint32_t e1, e2;
2372#ifndef VBOX
2373 int index, entry_limit;
2374#else
2375 unsigned int index, entry_limit;
2376#endif
2377 target_ulong ptr;
2378
2379#ifdef VBOX
2380 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2381 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2382#endif
2383
2384 selector &= 0xffff;
2385 if ((selector & 0xfffc) == 0) {
2386 /* XXX: NULL selector case: invalid LDT */
2387 env->ldt.base = 0;
2388 env->ldt.limit = 0;
2389 } else {
2390 if (selector & 0x4)
2391 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2392 dt = &env->gdt;
2393 index = selector & ~7;
2394#ifdef TARGET_X86_64
2395 if (env->hflags & HF_LMA_MASK)
2396 entry_limit = 15;
2397 else
2398#endif
2399 entry_limit = 7;
2400 if ((index + entry_limit) > dt->limit)
2401 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2402 ptr = dt->base + index;
2403 e1 = ldl_kernel(ptr);
2404 e2 = ldl_kernel(ptr + 4);
2405 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2406 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2407 if (!(e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2409#ifdef TARGET_X86_64
2410 if (env->hflags & HF_LMA_MASK) {
2411 uint32_t e3;
2412 e3 = ldl_kernel(ptr + 8);
2413 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2414 env->ldt.base |= (target_ulong)e3 << 32;
2415 } else
2416#endif
2417 {
2418 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2419 }
2420 }
2421 env->ldt.selector = selector;
2422#ifdef VBOX
2423 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2424 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2425#endif
2426}
2427
2428void helper_ltr(int selector)
2429{
2430 SegmentCache *dt;
2431 uint32_t e1, e2;
2432#ifndef VBOX
2433 int index, type, entry_limit;
2434#else
2435 unsigned int index;
2436 int type, entry_limit;
2437#endif
2438 target_ulong ptr;
2439
2440#ifdef VBOX
2441 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2442 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2443 env->tr.flags, (RTSEL)(selector & 0xffff)));
2444#endif
2445 selector &= 0xffff;
2446 if ((selector & 0xfffc) == 0) {
2447 /* NULL selector case: invalid TR */
2448 env->tr.base = 0;
2449 env->tr.limit = 0;
2450 env->tr.flags = 0;
2451 } else {
2452 if (selector & 0x4)
2453 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2454 dt = &env->gdt;
2455 index = selector & ~7;
2456#ifdef TARGET_X86_64
2457 if (env->hflags & HF_LMA_MASK)
2458 entry_limit = 15;
2459 else
2460#endif
2461 entry_limit = 7;
2462 if ((index + entry_limit) > dt->limit)
2463 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2464 ptr = dt->base + index;
2465 e1 = ldl_kernel(ptr);
2466 e2 = ldl_kernel(ptr + 4);
2467 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2468 if ((e2 & DESC_S_MASK) ||
2469 (type != 1 && type != 9))
2470 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2471 if (!(e2 & DESC_P_MASK))
2472 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2473#ifdef TARGET_X86_64
2474 if (env->hflags & HF_LMA_MASK) {
2475 uint32_t e3, e4;
2476 e3 = ldl_kernel(ptr + 8);
2477 e4 = ldl_kernel(ptr + 12);
2478 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2479 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2480 load_seg_cache_raw_dt(&env->tr, e1, e2);
2481 env->tr.base |= (target_ulong)e3 << 32;
2482 } else
2483#endif
2484 {
2485 load_seg_cache_raw_dt(&env->tr, e1, e2);
2486 }
2487 e2 |= DESC_TSS_BUSY_MASK;
2488 stl_kernel(ptr + 4, e2);
2489 }
2490 env->tr.selector = selector;
2491#ifdef VBOX
2492 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2493 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2494 env->tr.flags, (RTSEL)(selector & 0xffff)));
2495#endif
2496}
2497
2498/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2499void helper_load_seg(int seg_reg, int selector)
2500{
2501 uint32_t e1, e2;
2502 int cpl, dpl, rpl;
2503 SegmentCache *dt;
2504#ifndef VBOX
2505 int index;
2506#else
2507 unsigned int index;
2508#endif
2509 target_ulong ptr;
2510
2511 selector &= 0xffff;
2512 cpl = env->hflags & HF_CPL_MASK;
2513#ifdef VBOX
2514
2515 /* Trying to load a selector with CPL=1? */
2516 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2517 {
2518 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2519 selector = selector & 0xfffc;
2520 }
2521#endif /* VBOX */
2522 if ((selector & 0xfffc) == 0) {
2523 /* null selector case */
2524 if (seg_reg == R_SS
2525#ifdef TARGET_X86_64
2526 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2527#endif
2528 )
2529 raise_exception_err(EXCP0D_GPF, 0);
2530 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2531 } else {
2532
2533 if (selector & 0x4)
2534 dt = &env->ldt;
2535 else
2536 dt = &env->gdt;
2537 index = selector & ~7;
2538 if ((index + 7) > dt->limit)
2539 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2540 ptr = dt->base + index;
2541 e1 = ldl_kernel(ptr);
2542 e2 = ldl_kernel(ptr + 4);
2543
2544 if (!(e2 & DESC_S_MASK))
2545 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2546 rpl = selector & 3;
2547 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2548 if (seg_reg == R_SS) {
2549 /* must be writable segment */
2550 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2551 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2552 if (rpl != cpl || dpl != cpl)
2553 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2554 } else {
2555 /* must be readable segment */
2556 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2557 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2558
2559 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2560 /* if not conforming code, test rights */
2561 if (dpl < cpl || dpl < rpl)
2562 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2563 }
2564 }
2565
2566 if (!(e2 & DESC_P_MASK)) {
2567 if (seg_reg == R_SS)
2568 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2569 else
2570 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2571 }
2572
2573 /* set the access bit if not already set */
2574 if (!(e2 & DESC_A_MASK)) {
2575 e2 |= DESC_A_MASK;
2576 stl_kernel(ptr + 4, e2);
2577 }
2578
2579 cpu_x86_load_seg_cache(env, seg_reg, selector,
2580 get_seg_base(e1, e2),
2581 get_seg_limit(e1, e2),
2582 e2);
2583#if 0
2584 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2585 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2586#endif
2587 }
2588}
2589
2590/* protected mode jump */
2591void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2592 int next_eip_addend)
2593{
2594 int gate_cs, type;
2595 uint32_t e1, e2, cpl, dpl, rpl, limit;
2596 target_ulong next_eip;
2597
2598#ifdef VBOX /** @todo Why do we do this? */
2599 e1 = e2 = 0;
2600#endif
2601 if ((new_cs & 0xfffc) == 0)
2602 raise_exception_err(EXCP0D_GPF, 0);
2603 if (load_segment(&e1, &e2, new_cs) != 0)
2604 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2605 cpl = env->hflags & HF_CPL_MASK;
2606 if (e2 & DESC_S_MASK) {
2607 if (!(e2 & DESC_CS_MASK))
2608 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2609 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2610 if (e2 & DESC_C_MASK) {
2611 /* conforming code segment */
2612 if (dpl > cpl)
2613 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2614 } else {
2615 /* non conforming code segment */
2616 rpl = new_cs & 3;
2617 if (rpl > cpl)
2618 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2619 if (dpl != cpl)
2620 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621 }
2622 if (!(e2 & DESC_P_MASK))
2623 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2624 limit = get_seg_limit(e1, e2);
2625 if (new_eip > limit &&
2626 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2627 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2629 get_seg_base(e1, e2), limit, e2);
2630 EIP = new_eip;
2631 } else {
2632 /* jump to call or task gate */
2633 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2634 rpl = new_cs & 3;
2635 cpl = env->hflags & HF_CPL_MASK;
2636 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2637 switch(type) {
2638 case 1: /* 286 TSS */
2639 case 9: /* 386 TSS */
2640 case 5: /* task gate */
2641 if (dpl < cpl || dpl < rpl)
2642 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2643 next_eip = env->eip + next_eip_addend;
2644 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2645 CC_OP = CC_OP_EFLAGS;
2646 break;
2647 case 4: /* 286 call gate */
2648 case 12: /* 386 call gate */
2649 if ((dpl < cpl) || (dpl < rpl))
2650 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2651 if (!(e2 & DESC_P_MASK))
2652 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2653 gate_cs = e1 >> 16;
2654 new_eip = (e1 & 0xffff);
2655 if (type == 12)
2656 new_eip |= (e2 & 0xffff0000);
2657 if (load_segment(&e1, &e2, gate_cs) != 0)
2658 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2659 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2660 /* must be code segment */
2661 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2662 (DESC_S_MASK | DESC_CS_MASK)))
2663 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2664 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2665 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2666 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2667 if (!(e2 & DESC_P_MASK))
2668#ifdef VBOX /* See page 3-514 of 253666.pdf */
2669 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2670#else
2671 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2672#endif
2673 limit = get_seg_limit(e1, e2);
2674 if (new_eip > limit)
2675 raise_exception_err(EXCP0D_GPF, 0);
2676 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2677 get_seg_base(e1, e2), limit, e2);
2678 EIP = new_eip;
2679 break;
2680 default:
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 break;
2683 }
2684 }
2685}
2686
2687/* real mode call */
2688void helper_lcall_real(int new_cs, target_ulong new_eip1,
2689 int shift, int next_eip)
2690{
2691 int new_eip;
2692 uint32_t esp, esp_mask;
2693 target_ulong ssp;
2694
2695 new_eip = new_eip1;
2696 esp = ESP;
2697 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2698 ssp = env->segs[R_SS].base;
2699 if (shift) {
2700 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2701 PUSHL(ssp, esp, esp_mask, next_eip);
2702 } else {
2703 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2704 PUSHW(ssp, esp, esp_mask, next_eip);
2705 }
2706
2707 SET_ESP(esp, esp_mask);
2708 env->eip = new_eip;
2709 env->segs[R_CS].selector = new_cs;
2710 env->segs[R_CS].base = (new_cs << 4);
2711}
2712
2713/* protected mode call */
2714void helper_lcall_protected(int new_cs, target_ulong new_eip,
2715 int shift, int next_eip_addend)
2716{
2717 int new_stack, i;
2718 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2719 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2720 uint32_t val, limit, old_sp_mask;
2721 target_ulong ssp, old_ssp, next_eip;
2722
2723#ifdef VBOX /** @todo Why do we do this? */
2724 e1 = e2 = 0;
2725#endif
2726 next_eip = env->eip + next_eip_addend;
2727 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2728 LOG_PCALL_STATE(env);
2729 if ((new_cs & 0xfffc) == 0)
2730 raise_exception_err(EXCP0D_GPF, 0);
2731 if (load_segment(&e1, &e2, new_cs) != 0)
2732 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2733 cpl = env->hflags & HF_CPL_MASK;
2734 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2735 if (e2 & DESC_S_MASK) {
2736 if (!(e2 & DESC_CS_MASK))
2737 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2738 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2739 if (e2 & DESC_C_MASK) {
2740 /* conforming code segment */
2741 if (dpl > cpl)
2742 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2743 } else {
2744 /* non conforming code segment */
2745 rpl = new_cs & 3;
2746 if (rpl > cpl)
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 if (dpl != cpl)
2749 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2750 }
2751 if (!(e2 & DESC_P_MASK))
2752 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2753
2754#ifdef TARGET_X86_64
2755 /* XXX: check 16/32 bit cases in long mode */
2756 if (shift == 2) {
2757 target_ulong rsp;
2758 /* 64 bit case */
2759 rsp = ESP;
2760 PUSHQ(rsp, env->segs[R_CS].selector);
2761 PUSHQ(rsp, next_eip);
2762 /* from this point, not restartable */
2763 ESP = rsp;
2764 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2765 get_seg_base(e1, e2),
2766 get_seg_limit(e1, e2), e2);
2767 EIP = new_eip;
2768 } else
2769#endif
2770 {
2771 sp = ESP;
2772 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2773 ssp = env->segs[R_SS].base;
2774 if (shift) {
2775 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2776 PUSHL(ssp, sp, sp_mask, next_eip);
2777 } else {
2778 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2779 PUSHW(ssp, sp, sp_mask, next_eip);
2780 }
2781
2782 limit = get_seg_limit(e1, e2);
2783 if (new_eip > limit)
2784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2785 /* from this point, not restartable */
2786 SET_ESP(sp, sp_mask);
2787 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2788 get_seg_base(e1, e2), limit, e2);
2789 EIP = new_eip;
2790 }
2791 } else {
2792 /* check gate type */
2793 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2794 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2795 rpl = new_cs & 3;
2796 switch(type) {
2797 case 1: /* available 286 TSS */
2798 case 9: /* available 386 TSS */
2799 case 5: /* task gate */
2800 if (dpl < cpl || dpl < rpl)
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2803 CC_OP = CC_OP_EFLAGS;
2804 return;
2805 case 4: /* 286 call gate */
2806 case 12: /* 386 call gate */
2807 break;
2808 default:
2809 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2810 break;
2811 }
2812 shift = type >> 3;
2813
2814 if (dpl < cpl || dpl < rpl)
2815 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2816 /* check valid bit */
2817 if (!(e2 & DESC_P_MASK))
2818 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2819 selector = e1 >> 16;
2820 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2821 param_count = e2 & 0x1f;
2822 if ((selector & 0xfffc) == 0)
2823 raise_exception_err(EXCP0D_GPF, 0);
2824
2825 if (load_segment(&e1, &e2, selector) != 0)
2826 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2827 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2828 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2829 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2830 if (dpl > cpl)
2831 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2832 if (!(e2 & DESC_P_MASK))
2833 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2834
2835 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2836 /* to inner privilege */
2837 get_ss_esp_from_tss(&ss, &sp, dpl);
2838 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2839 ss, sp, param_count, ESP);
2840 if ((ss & 0xfffc) == 0)
2841 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2842 if ((ss & 3) != dpl)
2843 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2844 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2845 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2846 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2847 if (ss_dpl != dpl)
2848 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2849 if (!(ss_e2 & DESC_S_MASK) ||
2850 (ss_e2 & DESC_CS_MASK) ||
2851 !(ss_e2 & DESC_W_MASK))
2852 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2853 if (!(ss_e2 & DESC_P_MASK))
2854#ifdef VBOX /* See page 3-99 of 253666.pdf */
2855 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2856#else
2857 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2858#endif
2859
2860 // push_size = ((param_count * 2) + 8) << shift;
2861
2862 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2863 old_ssp = env->segs[R_SS].base;
2864
2865 sp_mask = get_sp_mask(ss_e2);
2866 ssp = get_seg_base(ss_e1, ss_e2);
2867 if (shift) {
2868 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2869 PUSHL(ssp, sp, sp_mask, ESP);
2870 for(i = param_count - 1; i >= 0; i--) {
2871 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2872 PUSHL(ssp, sp, sp_mask, val);
2873 }
2874 } else {
2875 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2876 PUSHW(ssp, sp, sp_mask, ESP);
2877 for(i = param_count - 1; i >= 0; i--) {
2878 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2879 PUSHW(ssp, sp, sp_mask, val);
2880 }
2881 }
2882 new_stack = 1;
2883 } else {
2884 /* to same privilege */
2885 sp = ESP;
2886 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2887 ssp = env->segs[R_SS].base;
2888 // push_size = (4 << shift);
2889 new_stack = 0;
2890 }
2891
2892 if (shift) {
2893 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2894 PUSHL(ssp, sp, sp_mask, next_eip);
2895 } else {
2896 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2897 PUSHW(ssp, sp, sp_mask, next_eip);
2898 }
2899
2900 /* from this point, not restartable */
2901
2902 if (new_stack) {
2903 ss = (ss & ~3) | dpl;
2904 cpu_x86_load_seg_cache(env, R_SS, ss,
2905 ssp,
2906 get_seg_limit(ss_e1, ss_e2),
2907 ss_e2);
2908 }
2909
2910 selector = (selector & ~3) | dpl;
2911 cpu_x86_load_seg_cache(env, R_CS, selector,
2912 get_seg_base(e1, e2),
2913 get_seg_limit(e1, e2),
2914 e2);
2915 cpu_x86_set_cpl(env, dpl);
2916 SET_ESP(sp, sp_mask);
2917 EIP = offset;
2918 }
2919#ifdef USE_KQEMU
2920 if (kqemu_is_ok(env)) {
2921 env->exception_index = -1;
2922 cpu_loop_exit();
2923 }
2924#endif
2925}
2926
2927/* real and vm86 mode iret */
2928void helper_iret_real(int shift)
2929{
2930 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2931 target_ulong ssp;
2932 int eflags_mask;
2933#ifdef VBOX
2934 bool fVME = false;
2935
2936 remR3TrapClear(env->pVM);
2937#endif /* VBOX */
2938
2939 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2940 sp = ESP;
2941 ssp = env->segs[R_SS].base;
2942 if (shift == 1) {
2943 /* 32 bits */
2944 POPL(ssp, sp, sp_mask, new_eip);
2945 POPL(ssp, sp, sp_mask, new_cs);
2946 new_cs &= 0xffff;
2947 POPL(ssp, sp, sp_mask, new_eflags);
2948 } else {
2949 /* 16 bits */
2950 POPW(ssp, sp, sp_mask, new_eip);
2951 POPW(ssp, sp, sp_mask, new_cs);
2952 POPW(ssp, sp, sp_mask, new_eflags);
2953 }
2954#ifdef VBOX
2955 if ( (env->eflags & VM_MASK)
2956 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2957 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2958 {
2959 fVME = true;
2960 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2961 /* if TF will be set -> #GP */
2962 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2963 || (new_eflags & TF_MASK))
2964 raise_exception(EXCP0D_GPF);
2965 }
2966#endif /* VBOX */
2967 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2968 env->segs[R_CS].selector = new_cs;
2969 env->segs[R_CS].base = (new_cs << 4);
2970 env->eip = new_eip;
2971#ifdef VBOX
2972 if (fVME)
2973 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2974 else
2975#endif
2976 if (env->eflags & VM_MASK)
2977 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2978 else
2979 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2980 if (shift == 0)
2981 eflags_mask &= 0xffff;
2982 load_eflags(new_eflags, eflags_mask);
2983 env->hflags2 &= ~HF2_NMI_MASK;
2984#ifdef VBOX
2985 if (fVME)
2986 {
2987 if (new_eflags & IF_MASK)
2988 env->eflags |= VIF_MASK;
2989 else
2990 env->eflags &= ~VIF_MASK;
2991 }
2992#endif /* VBOX */
2993}
2994
2995static inline void validate_seg(int seg_reg, int cpl)
2996{
2997 int dpl;
2998 uint32_t e2;
2999
3000 /* XXX: on x86_64, we do not want to nullify FS and GS because
3001 they may still contain a valid base. I would be interested to
3002 know how a real x86_64 CPU behaves */
3003 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3004 (env->segs[seg_reg].selector & 0xfffc) == 0)
3005 return;
3006
3007 e2 = env->segs[seg_reg].flags;
3008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3010 /* data or non conforming code segment */
3011 if (dpl < cpl) {
3012 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3013 }
3014 }
3015}
3016
3017/* protected mode iret */
3018static inline void helper_ret_protected(int shift, int is_iret, int addend)
3019{
3020 uint32_t new_cs, new_eflags, new_ss;
3021 uint32_t new_es, new_ds, new_fs, new_gs;
3022 uint32_t e1, e2, ss_e1, ss_e2;
3023 int cpl, dpl, rpl, eflags_mask, iopl;
3024 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3025
3026#ifdef VBOX /** @todo Why do we do this? */
3027 ss_e1 = ss_e2 = e1 = e2 = 0;
3028#endif
3029
3030#ifdef TARGET_X86_64
3031 if (shift == 2)
3032 sp_mask = -1;
3033 else
3034#endif
3035 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3036 sp = ESP;
3037 ssp = env->segs[R_SS].base;
3038 new_eflags = 0; /* avoid warning */
3039#ifdef TARGET_X86_64
3040 if (shift == 2) {
3041 POPQ(sp, new_eip);
3042 POPQ(sp, new_cs);
3043 new_cs &= 0xffff;
3044 if (is_iret) {
3045 POPQ(sp, new_eflags);
3046 }
3047 } else
3048#endif
3049 if (shift == 1) {
3050 /* 32 bits */
3051 POPL(ssp, sp, sp_mask, new_eip);
3052 POPL(ssp, sp, sp_mask, new_cs);
3053 new_cs &= 0xffff;
3054 if (is_iret) {
3055 POPL(ssp, sp, sp_mask, new_eflags);
3056#if defined(VBOX) && defined(DEBUG)
3057 printf("iret: new CS %04X\n", new_cs);
3058 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3059 printf("iret: new EFLAGS %08X\n", new_eflags);
3060 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3061#endif
3062 if (new_eflags & VM_MASK)
3063 goto return_to_vm86;
3064 }
3065#ifdef VBOX
3066 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3067 {
3068#ifdef DEBUG
3069 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3070#endif
3071 new_cs = new_cs & 0xfffc;
3072 }
3073#endif
3074 } else {
3075 /* 16 bits */
3076 POPW(ssp, sp, sp_mask, new_eip);
3077 POPW(ssp, sp, sp_mask, new_cs);
3078 if (is_iret)
3079 POPW(ssp, sp, sp_mask, new_eflags);
3080 }
3081 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3082 new_cs, new_eip, shift, addend);
3083 LOG_PCALL_STATE(env);
3084 if ((new_cs & 0xfffc) == 0)
3085 {
3086#if defined(VBOX) && defined(DEBUG)
3087 printf("new_cs & 0xfffc) == 0\n");
3088#endif
3089 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3090 }
3091 if (load_segment(&e1, &e2, new_cs) != 0)
3092 {
3093#if defined(VBOX) && defined(DEBUG)
3094 printf("load_segment failed\n");
3095#endif
3096 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3097 }
3098 if (!(e2 & DESC_S_MASK) ||
3099 !(e2 & DESC_CS_MASK))
3100 {
3101#if defined(VBOX) && defined(DEBUG)
3102 printf("e2 mask %08x\n", e2);
3103#endif
3104 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3105 }
3106 cpl = env->hflags & HF_CPL_MASK;
3107 rpl = new_cs & 3;
3108 if (rpl < cpl)
3109 {
3110#if defined(VBOX) && defined(DEBUG)
3111 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3112#endif
3113 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3114 }
3115 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3116 if (e2 & DESC_C_MASK) {
3117 if (dpl > rpl)
3118 {
3119#if defined(VBOX) && defined(DEBUG)
3120 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3121#endif
3122 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3123 }
3124 } else {
3125 if (dpl != rpl)
3126 {
3127#if defined(VBOX) && defined(DEBUG)
3128 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3129#endif
3130 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3131 }
3132 }
3133 if (!(e2 & DESC_P_MASK))
3134 {
3135#if defined(VBOX) && defined(DEBUG)
3136 printf("DESC_P_MASK e2=%08x\n", e2);
3137#endif
3138 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3139 }
3140
3141 sp += addend;
3142 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3143 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3144 /* return to same privilege level */
3145 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3146 get_seg_base(e1, e2),
3147 get_seg_limit(e1, e2),
3148 e2);
3149 } else {
3150 /* return to different privilege level */
3151#ifdef TARGET_X86_64
3152 if (shift == 2) {
3153 POPQ(sp, new_esp);
3154 POPQ(sp, new_ss);
3155 new_ss &= 0xffff;
3156 } else
3157#endif
3158 if (shift == 1) {
3159 /* 32 bits */
3160 POPL(ssp, sp, sp_mask, new_esp);
3161 POPL(ssp, sp, sp_mask, new_ss);
3162 new_ss &= 0xffff;
3163 } else {
3164 /* 16 bits */
3165 POPW(ssp, sp, sp_mask, new_esp);
3166 POPW(ssp, sp, sp_mask, new_ss);
3167 }
3168 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3169 new_ss, new_esp);
3170 if ((new_ss & 0xfffc) == 0) {
3171#ifdef TARGET_X86_64
3172 /* NULL ss is allowed in long mode if cpl != 3*/
3173 /* XXX: test CS64 ? */
3174 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3175 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3176 0, 0xffffffff,
3177 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3178 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3179 DESC_W_MASK | DESC_A_MASK);
3180 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3181 } else
3182#endif
3183 {
3184 raise_exception_err(EXCP0D_GPF, 0);
3185 }
3186 } else {
3187 if ((new_ss & 3) != rpl)
3188 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3189 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3190 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3191 if (!(ss_e2 & DESC_S_MASK) ||
3192 (ss_e2 & DESC_CS_MASK) ||
3193 !(ss_e2 & DESC_W_MASK))
3194 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3195 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3196 if (dpl != rpl)
3197 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3198 if (!(ss_e2 & DESC_P_MASK))
3199 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3200 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3201 get_seg_base(ss_e1, ss_e2),
3202 get_seg_limit(ss_e1, ss_e2),
3203 ss_e2);
3204 }
3205
3206 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3207 get_seg_base(e1, e2),
3208 get_seg_limit(e1, e2),
3209 e2);
3210 cpu_x86_set_cpl(env, rpl);
3211 sp = new_esp;
3212#ifdef TARGET_X86_64
3213 if (env->hflags & HF_CS64_MASK)
3214 sp_mask = -1;
3215 else
3216#endif
3217 sp_mask = get_sp_mask(ss_e2);
3218
3219 /* validate data segments */
3220 validate_seg(R_ES, rpl);
3221 validate_seg(R_DS, rpl);
3222 validate_seg(R_FS, rpl);
3223 validate_seg(R_GS, rpl);
3224
3225 sp += addend;
3226 }
3227 SET_ESP(sp, sp_mask);
3228 env->eip = new_eip;
3229 if (is_iret) {
3230 /* NOTE: 'cpl' is the _old_ CPL */
3231 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3232 if (cpl == 0)
3233#ifdef VBOX
3234 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3235#else
3236 eflags_mask |= IOPL_MASK;
3237#endif
3238 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3239 if (cpl <= iopl)
3240 eflags_mask |= IF_MASK;
3241 if (shift == 0)
3242 eflags_mask &= 0xffff;
3243 load_eflags(new_eflags, eflags_mask);
3244 }
3245 return;
3246
3247 return_to_vm86:
3248 POPL(ssp, sp, sp_mask, new_esp);
3249 POPL(ssp, sp, sp_mask, new_ss);
3250 POPL(ssp, sp, sp_mask, new_es);
3251 POPL(ssp, sp, sp_mask, new_ds);
3252 POPL(ssp, sp, sp_mask, new_fs);
3253 POPL(ssp, sp, sp_mask, new_gs);
3254
3255 /* modify processor state */
3256 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3257 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3258 load_seg_vm(R_CS, new_cs & 0xffff);
3259 cpu_x86_set_cpl(env, 3);
3260 load_seg_vm(R_SS, new_ss & 0xffff);
3261 load_seg_vm(R_ES, new_es & 0xffff);
3262 load_seg_vm(R_DS, new_ds & 0xffff);
3263 load_seg_vm(R_FS, new_fs & 0xffff);
3264 load_seg_vm(R_GS, new_gs & 0xffff);
3265
3266 env->eip = new_eip & 0xffff;
3267 ESP = new_esp;
3268}
3269
3270void helper_iret_protected(int shift, int next_eip)
3271{
3272 int tss_selector, type;
3273 uint32_t e1, e2;
3274
3275#ifdef VBOX
3276 e1 = e2 = 0; /** @todo Why do we do this? */
3277 remR3TrapClear(env->pVM);
3278#endif
3279
3280 /* specific case for TSS */
3281 if (env->eflags & NT_MASK) {
3282#ifdef TARGET_X86_64
3283 if (env->hflags & HF_LMA_MASK)
3284 raise_exception_err(EXCP0D_GPF, 0);
3285#endif
3286 tss_selector = lduw_kernel(env->tr.base + 0);
3287 if (tss_selector & 4)
3288 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3289 if (load_segment(&e1, &e2, tss_selector) != 0)
3290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3291 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3292 /* NOTE: we check both segment and busy TSS */
3293 if (type != 3)
3294 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3295 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3296 } else {
3297 helper_ret_protected(shift, 1, 0);
3298 }
3299 env->hflags2 &= ~HF2_NMI_MASK;
3300#ifdef USE_KQEMU
3301 if (kqemu_is_ok(env)) {
3302 CC_OP = CC_OP_EFLAGS;
3303 env->exception_index = -1;
3304 cpu_loop_exit();
3305 }
3306#endif
3307}
3308
3309void helper_lret_protected(int shift, int addend)
3310{
3311 helper_ret_protected(shift, 0, addend);
3312#ifdef USE_KQEMU
3313 if (kqemu_is_ok(env)) {
3314 env->exception_index = -1;
3315 cpu_loop_exit();
3316 }
3317#endif
3318}
3319
3320void helper_sysenter(void)
3321{
3322 if (env->sysenter_cs == 0) {
3323 raise_exception_err(EXCP0D_GPF, 0);
3324 }
3325 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3326 cpu_x86_set_cpl(env, 0);
3327
3328#ifdef TARGET_X86_64
3329 if (env->hflags & HF_LMA_MASK) {
3330 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3331 0, 0xffffffff,
3332 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3333 DESC_S_MASK |
3334 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3335 } else
3336#endif
3337 {
3338 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3339 0, 0xffffffff,
3340 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3341 DESC_S_MASK |
3342 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3343 }
3344 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3345 0, 0xffffffff,
3346 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3347 DESC_S_MASK |
3348 DESC_W_MASK | DESC_A_MASK);
3349 ESP = env->sysenter_esp;
3350 EIP = env->sysenter_eip;
3351}
3352
3353void helper_sysexit(int dflag)
3354{
3355 int cpl;
3356
3357 cpl = env->hflags & HF_CPL_MASK;
3358 if (env->sysenter_cs == 0 || cpl != 0) {
3359 raise_exception_err(EXCP0D_GPF, 0);
3360 }
3361 cpu_x86_set_cpl(env, 3);
3362#ifdef TARGET_X86_64
3363 if (dflag == 2) {
3364 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3365 0, 0xffffffff,
3366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3367 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3369 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3370 0, 0xffffffff,
3371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3372 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3373 DESC_W_MASK | DESC_A_MASK);
3374 } else
3375#endif
3376 {
3377 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3378 0, 0xffffffff,
3379 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3380 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3381 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3382 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3383 0, 0xffffffff,
3384 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3385 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3386 DESC_W_MASK | DESC_A_MASK);
3387 }
3388 ESP = ECX;
3389 EIP = EDX;
3390#ifdef USE_KQEMU
3391 if (kqemu_is_ok(env)) {
3392 env->exception_index = -1;
3393 cpu_loop_exit();
3394 }
3395#endif
3396}
3397
3398#if defined(CONFIG_USER_ONLY)
3399target_ulong helper_read_crN(int reg)
3400{
3401 return 0;
3402}
3403
3404void helper_write_crN(int reg, target_ulong t0)
3405{
3406}
3407
3408void helper_movl_drN_T0(int reg, target_ulong t0)
3409{
3410}
3411#else
3412target_ulong helper_read_crN(int reg)
3413{
3414 target_ulong val;
3415
3416 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3417 switch(reg) {
3418 default:
3419 val = env->cr[reg];
3420 break;
3421 case 8:
3422 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3423 val = cpu_get_apic_tpr(env);
3424 } else {
3425 val = env->v_tpr;
3426 }
3427 break;
3428 }
3429 return val;
3430}
3431
3432void helper_write_crN(int reg, target_ulong t0)
3433{
3434 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3435 switch(reg) {
3436 case 0:
3437 cpu_x86_update_cr0(env, t0);
3438 break;
3439 case 3:
3440 cpu_x86_update_cr3(env, t0);
3441 break;
3442 case 4:
3443 cpu_x86_update_cr4(env, t0);
3444 break;
3445 case 8:
3446 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3447 cpu_set_apic_tpr(env, t0);
3448 }
3449 env->v_tpr = t0 & 0x0f;
3450 break;
3451 default:
3452 env->cr[reg] = t0;
3453 break;
3454 }
3455}
3456
3457void helper_movl_drN_T0(int reg, target_ulong t0)
3458{
3459 int i;
3460
3461 if (reg < 4) {
3462 hw_breakpoint_remove(env, reg);
3463 env->dr[reg] = t0;
3464 hw_breakpoint_insert(env, reg);
3465 } else if (reg == 7) {
3466 for (i = 0; i < 4; i++)
3467 hw_breakpoint_remove(env, i);
3468 env->dr[7] = t0;
3469 for (i = 0; i < 4; i++)
3470 hw_breakpoint_insert(env, i);
3471 } else
3472 env->dr[reg] = t0;
3473}
3474#endif
3475
3476void helper_lmsw(target_ulong t0)
3477{
3478 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3479 if already set to one. */
3480 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3481 helper_write_crN(0, t0);
3482}
3483
3484void helper_clts(void)
3485{
3486 env->cr[0] &= ~CR0_TS_MASK;
3487 env->hflags &= ~HF_TS_MASK;
3488}
3489
3490void helper_invlpg(target_ulong addr)
3491{
3492 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3493 tlb_flush_page(env, addr);
3494}
3495
3496void helper_rdtsc(void)
3497{
3498 uint64_t val;
3499
3500 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3501 raise_exception(EXCP0D_GPF);
3502 }
3503 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3504
3505 val = cpu_get_tsc(env) + env->tsc_offset;
3506 EAX = (uint32_t)(val);
3507 EDX = (uint32_t)(val >> 32);
3508}
3509
3510#ifdef VBOX
3511void helper_rdtscp(void)
3512{
3513 uint64_t val;
3514 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3515 raise_exception(EXCP0D_GPF);
3516 }
3517
3518 val = cpu_get_tsc(env);
3519 EAX = (uint32_t)(val);
3520 EDX = (uint32_t)(val >> 32);
3521 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3522 ECX = (uint32_t)(val);
3523 else
3524 ECX = 0;
3525}
3526#endif /* VBOX */
3527
3528void helper_rdpmc(void)
3529{
3530#ifdef VBOX
3531 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3532 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3533 raise_exception(EXCP0D_GPF);
3534 }
3535 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3536 EAX = 0;
3537 EDX = 0;
3538#else /* !VBOX */
3539 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3540 raise_exception(EXCP0D_GPF);
3541 }
3542 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3543
3544 /* currently unimplemented */
3545 raise_exception_err(EXCP06_ILLOP, 0);
3546#endif /* !VBOX */
3547}
3548
3549#if defined(CONFIG_USER_ONLY)
3550void helper_wrmsr(void)
3551{
3552}
3553
3554void helper_rdmsr(void)
3555{
3556}
3557#else
3558void helper_wrmsr(void)
3559{
3560 uint64_t val;
3561
3562 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3563
3564 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3565
3566 switch((uint32_t)ECX) {
3567 case MSR_IA32_SYSENTER_CS:
3568 env->sysenter_cs = val & 0xffff;
3569 break;
3570 case MSR_IA32_SYSENTER_ESP:
3571 env->sysenter_esp = val;
3572 break;
3573 case MSR_IA32_SYSENTER_EIP:
3574 env->sysenter_eip = val;
3575 break;
3576 case MSR_IA32_APICBASE:
3577# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3578 cpu_set_apic_base(env, val);
3579# endif
3580 break;
3581 case MSR_EFER:
3582 {
3583 uint64_t update_mask;
3584 update_mask = 0;
3585 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3586 update_mask |= MSR_EFER_SCE;
3587 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3588 update_mask |= MSR_EFER_LME;
3589 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3590 update_mask |= MSR_EFER_FFXSR;
3591 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3592 update_mask |= MSR_EFER_NXE;
3593 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3594 update_mask |= MSR_EFER_SVME;
3595 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3596 update_mask |= MSR_EFER_FFXSR;
3597 cpu_load_efer(env, (env->efer & ~update_mask) |
3598 (val & update_mask));
3599 }
3600 break;
3601 case MSR_STAR:
3602 env->star = val;
3603 break;
3604 case MSR_PAT:
3605 env->pat = val;
3606 break;
3607 case MSR_VM_HSAVE_PA:
3608 env->vm_hsave = val;
3609 break;
3610#ifdef TARGET_X86_64
3611 case MSR_LSTAR:
3612 env->lstar = val;
3613 break;
3614 case MSR_CSTAR:
3615 env->cstar = val;
3616 break;
3617 case MSR_FMASK:
3618 env->fmask = val;
3619 break;
3620 case MSR_FSBASE:
3621 env->segs[R_FS].base = val;
3622 break;
3623 case MSR_GSBASE:
3624 env->segs[R_GS].base = val;
3625 break;
3626 case MSR_KERNELGSBASE:
3627 env->kernelgsbase = val;
3628 break;
3629#endif
3630# ifndef VBOX
3631 case MSR_MTRRphysBase(0):
3632 case MSR_MTRRphysBase(1):
3633 case MSR_MTRRphysBase(2):
3634 case MSR_MTRRphysBase(3):
3635 case MSR_MTRRphysBase(4):
3636 case MSR_MTRRphysBase(5):
3637 case MSR_MTRRphysBase(6):
3638 case MSR_MTRRphysBase(7):
3639 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3640 break;
3641 case MSR_MTRRphysMask(0):
3642 case MSR_MTRRphysMask(1):
3643 case MSR_MTRRphysMask(2):
3644 case MSR_MTRRphysMask(3):
3645 case MSR_MTRRphysMask(4):
3646 case MSR_MTRRphysMask(5):
3647 case MSR_MTRRphysMask(6):
3648 case MSR_MTRRphysMask(7):
3649 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3650 break;
3651 case MSR_MTRRfix64K_00000:
3652 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3653 break;
3654 case MSR_MTRRfix16K_80000:
3655 case MSR_MTRRfix16K_A0000:
3656 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3657 break;
3658 case MSR_MTRRfix4K_C0000:
3659 case MSR_MTRRfix4K_C8000:
3660 case MSR_MTRRfix4K_D0000:
3661 case MSR_MTRRfix4K_D8000:
3662 case MSR_MTRRfix4K_E0000:
3663 case MSR_MTRRfix4K_E8000:
3664 case MSR_MTRRfix4K_F0000:
3665 case MSR_MTRRfix4K_F8000:
3666 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3667 break;
3668 case MSR_MTRRdefType:
3669 env->mtrr_deftype = val;
3670 break;
3671# endif /* !VBOX */
3672 default:
3673# ifndef VBOX
3674 /* XXX: exception ? */
3675# endif
3676 break;
3677 }
3678
3679# ifdef VBOX
3680 /* call CPUM. */
3681 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3682 {
3683 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3684 }
3685# endif
3686}
3687
3688void helper_rdmsr(void)
3689{
3690 uint64_t val;
3691
3692 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3693
3694 switch((uint32_t)ECX) {
3695 case MSR_IA32_SYSENTER_CS:
3696 val = env->sysenter_cs;
3697 break;
3698 case MSR_IA32_SYSENTER_ESP:
3699 val = env->sysenter_esp;
3700 break;
3701 case MSR_IA32_SYSENTER_EIP:
3702 val = env->sysenter_eip;
3703 break;
3704 case MSR_IA32_APICBASE:
3705 val = cpu_get_apic_base(env);
3706 break;
3707 case MSR_EFER:
3708 val = env->efer;
3709 break;
3710 case MSR_STAR:
3711 val = env->star;
3712 break;
3713 case MSR_PAT:
3714 val = env->pat;
3715 break;
3716 case MSR_VM_HSAVE_PA:
3717 val = env->vm_hsave;
3718 break;
3719# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3720 case MSR_IA32_PERF_STATUS:
3721 /* tsc_increment_by_tick */
3722 val = 1000ULL;
3723 /* CPU multiplier */
3724 val |= (((uint64_t)4ULL) << 40);
3725 break;
3726# endif /* !VBOX */
3727#ifdef TARGET_X86_64
3728 case MSR_LSTAR:
3729 val = env->lstar;
3730 break;
3731 case MSR_CSTAR:
3732 val = env->cstar;
3733 break;
3734 case MSR_FMASK:
3735 val = env->fmask;
3736 break;
3737 case MSR_FSBASE:
3738 val = env->segs[R_FS].base;
3739 break;
3740 case MSR_GSBASE:
3741 val = env->segs[R_GS].base;
3742 break;
3743 case MSR_KERNELGSBASE:
3744 val = env->kernelgsbase;
3745 break;
3746#endif
3747#ifdef USE_KQEMU
3748 case MSR_QPI_COMMBASE:
3749 if (env->kqemu_enabled) {
3750 val = kqemu_comm_base;
3751 } else {
3752 val = 0;
3753 }
3754 break;
3755#endif
3756# ifndef VBOX
3757 case MSR_MTRRphysBase(0):
3758 case MSR_MTRRphysBase(1):
3759 case MSR_MTRRphysBase(2):
3760 case MSR_MTRRphysBase(3):
3761 case MSR_MTRRphysBase(4):
3762 case MSR_MTRRphysBase(5):
3763 case MSR_MTRRphysBase(6):
3764 case MSR_MTRRphysBase(7):
3765 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3766 break;
3767 case MSR_MTRRphysMask(0):
3768 case MSR_MTRRphysMask(1):
3769 case MSR_MTRRphysMask(2):
3770 case MSR_MTRRphysMask(3):
3771 case MSR_MTRRphysMask(4):
3772 case MSR_MTRRphysMask(5):
3773 case MSR_MTRRphysMask(6):
3774 case MSR_MTRRphysMask(7):
3775 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3776 break;
3777 case MSR_MTRRfix64K_00000:
3778 val = env->mtrr_fixed[0];
3779 break;
3780 case MSR_MTRRfix16K_80000:
3781 case MSR_MTRRfix16K_A0000:
3782 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3783 break;
3784 case MSR_MTRRfix4K_C0000:
3785 case MSR_MTRRfix4K_C8000:
3786 case MSR_MTRRfix4K_D0000:
3787 case MSR_MTRRfix4K_D8000:
3788 case MSR_MTRRfix4K_E0000:
3789 case MSR_MTRRfix4K_E8000:
3790 case MSR_MTRRfix4K_F0000:
3791 case MSR_MTRRfix4K_F8000:
3792 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3793 break;
3794 case MSR_MTRRdefType:
3795 val = env->mtrr_deftype;
3796 break;
3797 case MSR_MTRRcap:
3798 if (env->cpuid_features & CPUID_MTRR)
3799 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3800 else
3801 /* XXX: exception ? */
3802 val = 0;
3803 break;
3804# endif /* !VBOX */
3805 default:
3806# ifndef VBOX
3807 /* XXX: exception ? */
3808 val = 0;
3809# else /* VBOX */
3810 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3811 {
3812 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3813 val = 0;
3814 }
3815# endif /* VBOX */
3816 break;
3817 }
3818 EAX = (uint32_t)(val);
3819 EDX = (uint32_t)(val >> 32);
3820
3821# ifdef VBOX_STRICT
3822 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3823 val = 0;
3824 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3825# endif
3826}
3827#endif
3828
3829target_ulong helper_lsl(target_ulong selector1)
3830{
3831 unsigned int limit;
3832 uint32_t e1, e2, eflags, selector;
3833 int rpl, dpl, cpl, type;
3834
3835 selector = selector1 & 0xffff;
3836 eflags = helper_cc_compute_all(CC_OP);
3837 if (load_segment(&e1, &e2, selector) != 0)
3838 goto fail;
3839 rpl = selector & 3;
3840 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3841 cpl = env->hflags & HF_CPL_MASK;
3842 if (e2 & DESC_S_MASK) {
3843 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3844 /* conforming */
3845 } else {
3846 if (dpl < cpl || dpl < rpl)
3847 goto fail;
3848 }
3849 } else {
3850 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3851 switch(type) {
3852 case 1:
3853 case 2:
3854 case 3:
3855 case 9:
3856 case 11:
3857 break;
3858 default:
3859 goto fail;
3860 }
3861 if (dpl < cpl || dpl < rpl) {
3862 fail:
3863 CC_SRC = eflags & ~CC_Z;
3864 return 0;
3865 }
3866 }
3867 limit = get_seg_limit(e1, e2);
3868 CC_SRC = eflags | CC_Z;
3869 return limit;
3870}
3871
3872target_ulong helper_lar(target_ulong selector1)
3873{
3874 uint32_t e1, e2, eflags, selector;
3875 int rpl, dpl, cpl, type;
3876
3877 selector = selector1 & 0xffff;
3878 eflags = helper_cc_compute_all(CC_OP);
3879 if ((selector & 0xfffc) == 0)
3880 goto fail;
3881 if (load_segment(&e1, &e2, selector) != 0)
3882 goto fail;
3883 rpl = selector & 3;
3884 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3885 cpl = env->hflags & HF_CPL_MASK;
3886 if (e2 & DESC_S_MASK) {
3887 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3888 /* conforming */
3889 } else {
3890 if (dpl < cpl || dpl < rpl)
3891 goto fail;
3892 }
3893 } else {
3894 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3895 switch(type) {
3896 case 1:
3897 case 2:
3898 case 3:
3899 case 4:
3900 case 5:
3901 case 9:
3902 case 11:
3903 case 12:
3904 break;
3905 default:
3906 goto fail;
3907 }
3908 if (dpl < cpl || dpl < rpl) {
3909 fail:
3910 CC_SRC = eflags & ~CC_Z;
3911 return 0;
3912 }
3913 }
3914 CC_SRC = eflags | CC_Z;
3915 return e2 & 0x00f0ff00;
3916}
3917
3918void helper_verr(target_ulong selector1)
3919{
3920 uint32_t e1, e2, eflags, selector;
3921 int rpl, dpl, cpl;
3922
3923 selector = selector1 & 0xffff;
3924 eflags = helper_cc_compute_all(CC_OP);
3925 if ((selector & 0xfffc) == 0)
3926 goto fail;
3927 if (load_segment(&e1, &e2, selector) != 0)
3928 goto fail;
3929 if (!(e2 & DESC_S_MASK))
3930 goto fail;
3931 rpl = selector & 3;
3932 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3933 cpl = env->hflags & HF_CPL_MASK;
3934 if (e2 & DESC_CS_MASK) {
3935 if (!(e2 & DESC_R_MASK))
3936 goto fail;
3937 if (!(e2 & DESC_C_MASK)) {
3938 if (dpl < cpl || dpl < rpl)
3939 goto fail;
3940 }
3941 } else {
3942 if (dpl < cpl || dpl < rpl) {
3943 fail:
3944 CC_SRC = eflags & ~CC_Z;
3945 return;
3946 }
3947 }
3948 CC_SRC = eflags | CC_Z;
3949}
3950
3951void helper_verw(target_ulong selector1)
3952{
3953 uint32_t e1, e2, eflags, selector;
3954 int rpl, dpl, cpl;
3955
3956 selector = selector1 & 0xffff;
3957 eflags = helper_cc_compute_all(CC_OP);
3958 if ((selector & 0xfffc) == 0)
3959 goto fail;
3960 if (load_segment(&e1, &e2, selector) != 0)
3961 goto fail;
3962 if (!(e2 & DESC_S_MASK))
3963 goto fail;
3964 rpl = selector & 3;
3965 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3966 cpl = env->hflags & HF_CPL_MASK;
3967 if (e2 & DESC_CS_MASK) {
3968 goto fail;
3969 } else {
3970 if (dpl < cpl || dpl < rpl)
3971 goto fail;
3972 if (!(e2 & DESC_W_MASK)) {
3973 fail:
3974 CC_SRC = eflags & ~CC_Z;
3975 return;
3976 }
3977 }
3978 CC_SRC = eflags | CC_Z;
3979}
3980
3981/* x87 FPU helpers */
3982
3983static void fpu_set_exception(int mask)
3984{
3985 env->fpus |= mask;
3986 if (env->fpus & (~env->fpuc & FPUC_EM))
3987 env->fpus |= FPUS_SE | FPUS_B;
3988}
3989
3990static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3991{
3992 if (b == 0.0)
3993 fpu_set_exception(FPUS_ZE);
3994 return a / b;
3995}
3996
3997static void fpu_raise_exception(void)
3998{
3999 if (env->cr[0] & CR0_NE_MASK) {
4000 raise_exception(EXCP10_COPR);
4001 }
4002#if !defined(CONFIG_USER_ONLY)
4003 else {
4004 cpu_set_ferr(env);
4005 }
4006#endif
4007}
4008
4009void helper_flds_FT0(uint32_t val)
4010{
4011 union {
4012 float32 f;
4013 uint32_t i;
4014 } u;
4015 u.i = val;
4016 FT0 = float32_to_floatx(u.f, &env->fp_status);
4017}
4018
4019void helper_fldl_FT0(uint64_t val)
4020{
4021 union {
4022 float64 f;
4023 uint64_t i;
4024 } u;
4025 u.i = val;
4026 FT0 = float64_to_floatx(u.f, &env->fp_status);
4027}
4028
4029void helper_fildl_FT0(int32_t val)
4030{
4031 FT0 = int32_to_floatx(val, &env->fp_status);
4032}
4033
4034void helper_flds_ST0(uint32_t val)
4035{
4036 int new_fpstt;
4037 union {
4038 float32 f;
4039 uint32_t i;
4040 } u;
4041 new_fpstt = (env->fpstt - 1) & 7;
4042 u.i = val;
4043 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4044 env->fpstt = new_fpstt;
4045 env->fptags[new_fpstt] = 0; /* validate stack entry */
4046}
4047
4048void helper_fldl_ST0(uint64_t val)
4049{
4050 int new_fpstt;
4051 union {
4052 float64 f;
4053 uint64_t i;
4054 } u;
4055 new_fpstt = (env->fpstt - 1) & 7;
4056 u.i = val;
4057 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4058 env->fpstt = new_fpstt;
4059 env->fptags[new_fpstt] = 0; /* validate stack entry */
4060}
4061
4062void helper_fildl_ST0(int32_t val)
4063{
4064 int new_fpstt;
4065 new_fpstt = (env->fpstt - 1) & 7;
4066 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4067 env->fpstt = new_fpstt;
4068 env->fptags[new_fpstt] = 0; /* validate stack entry */
4069}
4070
4071void helper_fildll_ST0(int64_t val)
4072{
4073 int new_fpstt;
4074 new_fpstt = (env->fpstt - 1) & 7;
4075 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4076 env->fpstt = new_fpstt;
4077 env->fptags[new_fpstt] = 0; /* validate stack entry */
4078}
4079
4080#ifndef VBOX
4081uint32_t helper_fsts_ST0(void)
4082#else
4083RTCCUINTREG helper_fsts_ST0(void)
4084#endif
4085{
4086 union {
4087 float32 f;
4088 uint32_t i;
4089 } u;
4090 u.f = floatx_to_float32(ST0, &env->fp_status);
4091 return u.i;
4092}
4093
4094uint64_t helper_fstl_ST0(void)
4095{
4096 union {
4097 float64 f;
4098 uint64_t i;
4099 } u;
4100 u.f = floatx_to_float64(ST0, &env->fp_status);
4101 return u.i;
4102}
4103
4104#ifndef VBOX
4105int32_t helper_fist_ST0(void)
4106#else
4107RTCCINTREG helper_fist_ST0(void)
4108#endif
4109{
4110 int32_t val;
4111 val = floatx_to_int32(ST0, &env->fp_status);
4112 if (val != (int16_t)val)
4113 val = -32768;
4114 return val;
4115}
4116
4117#ifndef VBOX
4118int32_t helper_fistl_ST0(void)
4119#else
4120RTCCINTREG helper_fistl_ST0(void)
4121#endif
4122{
4123 int32_t val;
4124 val = floatx_to_int32(ST0, &env->fp_status);
4125 return val;
4126}
4127
4128int64_t helper_fistll_ST0(void)
4129{
4130 int64_t val;
4131 val = floatx_to_int64(ST0, &env->fp_status);
4132 return val;
4133}
4134
4135#ifndef VBOX
4136int32_t helper_fistt_ST0(void)
4137#else
4138RTCCINTREG helper_fistt_ST0(void)
4139#endif
4140{
4141 int32_t val;
4142 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4143 if (val != (int16_t)val)
4144 val = -32768;
4145 return val;
4146}
4147
4148#ifndef VBOX
4149int32_t helper_fisttl_ST0(void)
4150#else
4151RTCCINTREG helper_fisttl_ST0(void)
4152#endif
4153{
4154 int32_t val;
4155 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4156 return val;
4157}
4158
4159int64_t helper_fisttll_ST0(void)
4160{
4161 int64_t val;
4162 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4163 return val;
4164}
4165
4166void helper_fldt_ST0(target_ulong ptr)
4167{
4168 int new_fpstt;
4169 new_fpstt = (env->fpstt - 1) & 7;
4170 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4171 env->fpstt = new_fpstt;
4172 env->fptags[new_fpstt] = 0; /* validate stack entry */
4173}
4174
4175void helper_fstt_ST0(target_ulong ptr)
4176{
4177 helper_fstt(ST0, ptr);
4178}
4179
4180void helper_fpush(void)
4181{
4182 fpush();
4183}
4184
4185void helper_fpop(void)
4186{
4187 fpop();
4188}
4189
4190void helper_fdecstp(void)
4191{
4192 env->fpstt = (env->fpstt - 1) & 7;
4193 env->fpus &= (~0x4700);
4194}
4195
4196void helper_fincstp(void)
4197{
4198 env->fpstt = (env->fpstt + 1) & 7;
4199 env->fpus &= (~0x4700);
4200}
4201
4202/* FPU move */
4203
4204void helper_ffree_STN(int st_index)
4205{
4206 env->fptags[(env->fpstt + st_index) & 7] = 1;
4207}
4208
4209void helper_fmov_ST0_FT0(void)
4210{
4211 ST0 = FT0;
4212}
4213
4214void helper_fmov_FT0_STN(int st_index)
4215{
4216 FT0 = ST(st_index);
4217}
4218
4219void helper_fmov_ST0_STN(int st_index)
4220{
4221 ST0 = ST(st_index);
4222}
4223
4224void helper_fmov_STN_ST0(int st_index)
4225{
4226 ST(st_index) = ST0;
4227}
4228
4229void helper_fxchg_ST0_STN(int st_index)
4230{
4231 CPU86_LDouble tmp;
4232 tmp = ST(st_index);
4233 ST(st_index) = ST0;
4234 ST0 = tmp;
4235}
4236
4237/* FPU operations */
4238
4239static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4240
4241void helper_fcom_ST0_FT0(void)
4242{
4243 int ret;
4244
4245 ret = floatx_compare(ST0, FT0, &env->fp_status);
4246 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4247}
4248
4249void helper_fucom_ST0_FT0(void)
4250{
4251 int ret;
4252
4253 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4254 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4255}
4256
4257static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4258
4259void helper_fcomi_ST0_FT0(void)
4260{
4261 int eflags;
4262 int ret;
4263
4264 ret = floatx_compare(ST0, FT0, &env->fp_status);
4265 eflags = helper_cc_compute_all(CC_OP);
4266 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4267 CC_SRC = eflags;
4268}
4269
4270void helper_fucomi_ST0_FT0(void)
4271{
4272 int eflags;
4273 int ret;
4274
4275 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4276 eflags = helper_cc_compute_all(CC_OP);
4277 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4278 CC_SRC = eflags;
4279}
4280
4281void helper_fadd_ST0_FT0(void)
4282{
4283 ST0 += FT0;
4284}
4285
4286void helper_fmul_ST0_FT0(void)
4287{
4288 ST0 *= FT0;
4289}
4290
4291void helper_fsub_ST0_FT0(void)
4292{
4293 ST0 -= FT0;
4294}
4295
4296void helper_fsubr_ST0_FT0(void)
4297{
4298 ST0 = FT0 - ST0;
4299}
4300
4301void helper_fdiv_ST0_FT0(void)
4302{
4303 ST0 = helper_fdiv(ST0, FT0);
4304}
4305
4306void helper_fdivr_ST0_FT0(void)
4307{
4308 ST0 = helper_fdiv(FT0, ST0);
4309}
4310
4311/* fp operations between STN and ST0 */
4312
4313void helper_fadd_STN_ST0(int st_index)
4314{
4315 ST(st_index) += ST0;
4316}
4317
4318void helper_fmul_STN_ST0(int st_index)
4319{
4320 ST(st_index) *= ST0;
4321}
4322
4323void helper_fsub_STN_ST0(int st_index)
4324{
4325 ST(st_index) -= ST0;
4326}
4327
4328void helper_fsubr_STN_ST0(int st_index)
4329{
4330 CPU86_LDouble *p;
4331 p = &ST(st_index);
4332 *p = ST0 - *p;
4333}
4334
4335void helper_fdiv_STN_ST0(int st_index)
4336{
4337 CPU86_LDouble *p;
4338 p = &ST(st_index);
4339 *p = helper_fdiv(*p, ST0);
4340}
4341
4342void helper_fdivr_STN_ST0(int st_index)
4343{
4344 CPU86_LDouble *p;
4345 p = &ST(st_index);
4346 *p = helper_fdiv(ST0, *p);
4347}
4348
4349/* misc FPU operations */
4350void helper_fchs_ST0(void)
4351{
4352 ST0 = floatx_chs(ST0);
4353}
4354
4355void helper_fabs_ST0(void)
4356{
4357 ST0 = floatx_abs(ST0);
4358}
4359
4360void helper_fld1_ST0(void)
4361{
4362 ST0 = f15rk[1];
4363}
4364
4365void helper_fldl2t_ST0(void)
4366{
4367 ST0 = f15rk[6];
4368}
4369
4370void helper_fldl2e_ST0(void)
4371{
4372 ST0 = f15rk[5];
4373}
4374
4375void helper_fldpi_ST0(void)
4376{
4377 ST0 = f15rk[2];
4378}
4379
4380void helper_fldlg2_ST0(void)
4381{
4382 ST0 = f15rk[3];
4383}
4384
4385void helper_fldln2_ST0(void)
4386{
4387 ST0 = f15rk[4];
4388}
4389
4390void helper_fldz_ST0(void)
4391{
4392 ST0 = f15rk[0];
4393}
4394
4395void helper_fldz_FT0(void)
4396{
4397 FT0 = f15rk[0];
4398}
4399
4400#ifndef VBOX
4401uint32_t helper_fnstsw(void)
4402#else
4403RTCCUINTREG helper_fnstsw(void)
4404#endif
4405{
4406 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4407}
4408
4409#ifndef VBOX
4410uint32_t helper_fnstcw(void)
4411#else
4412RTCCUINTREG helper_fnstcw(void)
4413#endif
4414{
4415 return env->fpuc;
4416}
4417
4418static void update_fp_status(void)
4419{
4420 int rnd_type;
4421
4422 /* set rounding mode */
4423 switch(env->fpuc & RC_MASK) {
4424 default:
4425 case RC_NEAR:
4426 rnd_type = float_round_nearest_even;
4427 break;
4428 case RC_DOWN:
4429 rnd_type = float_round_down;
4430 break;
4431 case RC_UP:
4432 rnd_type = float_round_up;
4433 break;
4434 case RC_CHOP:
4435 rnd_type = float_round_to_zero;
4436 break;
4437 }
4438 set_float_rounding_mode(rnd_type, &env->fp_status);
4439#ifdef FLOATX80
4440 switch((env->fpuc >> 8) & 3) {
4441 case 0:
4442 rnd_type = 32;
4443 break;
4444 case 2:
4445 rnd_type = 64;
4446 break;
4447 case 3:
4448 default:
4449 rnd_type = 80;
4450 break;
4451 }
4452 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4453#endif
4454}
4455
4456void helper_fldcw(uint32_t val)
4457{
4458 env->fpuc = val;
4459 update_fp_status();
4460}
4461
4462void helper_fclex(void)
4463{
4464 env->fpus &= 0x7f00;
4465}
4466
4467void helper_fwait(void)
4468{
4469 if (env->fpus & FPUS_SE)
4470 fpu_raise_exception();
4471}
4472
4473void helper_fninit(void)
4474{
4475 env->fpus = 0;
4476 env->fpstt = 0;
4477 env->fpuc = 0x37f;
4478 env->fptags[0] = 1;
4479 env->fptags[1] = 1;
4480 env->fptags[2] = 1;
4481 env->fptags[3] = 1;
4482 env->fptags[4] = 1;
4483 env->fptags[5] = 1;
4484 env->fptags[6] = 1;
4485 env->fptags[7] = 1;
4486}
4487
4488/* BCD ops */
4489
4490void helper_fbld_ST0(target_ulong ptr)
4491{
4492 CPU86_LDouble tmp;
4493 uint64_t val;
4494 unsigned int v;
4495 int i;
4496
4497 val = 0;
4498 for(i = 8; i >= 0; i--) {
4499 v = ldub(ptr + i);
4500 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4501 }
4502 tmp = val;
4503 if (ldub(ptr + 9) & 0x80)
4504 tmp = -tmp;
4505 fpush();
4506 ST0 = tmp;
4507}
4508
4509void helper_fbst_ST0(target_ulong ptr)
4510{
4511 int v;
4512 target_ulong mem_ref, mem_end;
4513 int64_t val;
4514
4515 val = floatx_to_int64(ST0, &env->fp_status);
4516 mem_ref = ptr;
4517 mem_end = mem_ref + 9;
4518 if (val < 0) {
4519 stb(mem_end, 0x80);
4520 val = -val;
4521 } else {
4522 stb(mem_end, 0x00);
4523 }
4524 while (mem_ref < mem_end) {
4525 if (val == 0)
4526 break;
4527 v = val % 100;
4528 val = val / 100;
4529 v = ((v / 10) << 4) | (v % 10);
4530 stb(mem_ref++, v);
4531 }
4532 while (mem_ref < mem_end) {
4533 stb(mem_ref++, 0);
4534 }
4535}
4536
4537void helper_f2xm1(void)
4538{
4539 ST0 = pow(2.0,ST0) - 1.0;
4540}
4541
4542void helper_fyl2x(void)
4543{
4544 CPU86_LDouble fptemp;
4545
4546 fptemp = ST0;
4547 if (fptemp>0.0){
4548 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4549 ST1 *= fptemp;
4550 fpop();
4551 } else {
4552 env->fpus &= (~0x4700);
4553 env->fpus |= 0x400;
4554 }
4555}
4556
4557void helper_fptan(void)
4558{
4559 CPU86_LDouble fptemp;
4560
4561 fptemp = ST0;
4562 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4563 env->fpus |= 0x400;
4564 } else {
4565 ST0 = tan(fptemp);
4566 fpush();
4567 ST0 = 1.0;
4568 env->fpus &= (~0x400); /* C2 <-- 0 */
4569 /* the above code is for |arg| < 2**52 only */
4570 }
4571}
4572
4573void helper_fpatan(void)
4574{
4575 CPU86_LDouble fptemp, fpsrcop;
4576
4577 fpsrcop = ST1;
4578 fptemp = ST0;
4579 ST1 = atan2(fpsrcop,fptemp);
4580 fpop();
4581}
4582
4583void helper_fxtract(void)
4584{
4585 CPU86_LDoubleU temp;
4586 unsigned int expdif;
4587
4588 temp.d = ST0;
4589 expdif = EXPD(temp) - EXPBIAS;
4590 /*DP exponent bias*/
4591 ST0 = expdif;
4592 fpush();
4593 BIASEXPONENT(temp);
4594 ST0 = temp.d;
4595}
4596
4597void helper_fprem1(void)
4598{
4599 CPU86_LDouble dblq, fpsrcop, fptemp;
4600 CPU86_LDoubleU fpsrcop1, fptemp1;
4601 int expdif;
4602 signed long long int q;
4603
4604#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4605 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4606#else
4607 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4608#endif
4609 ST0 = 0.0 / 0.0; /* NaN */
4610 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4611 return;
4612 }
4613
4614 fpsrcop = ST0;
4615 fptemp = ST1;
4616 fpsrcop1.d = fpsrcop;
4617 fptemp1.d = fptemp;
4618 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4619
4620 if (expdif < 0) {
4621 /* optimisation? taken from the AMD docs */
4622 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4623 /* ST0 is unchanged */
4624 return;
4625 }
4626
4627 if (expdif < 53) {
4628 dblq = fpsrcop / fptemp;
4629 /* round dblq towards nearest integer */
4630 dblq = rint(dblq);
4631 ST0 = fpsrcop - fptemp * dblq;
4632
4633 /* convert dblq to q by truncating towards zero */
4634 if (dblq < 0.0)
4635 q = (signed long long int)(-dblq);
4636 else
4637 q = (signed long long int)dblq;
4638
4639 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4640 /* (C0,C3,C1) <-- (q2,q1,q0) */
4641 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4642 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4643 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4644 } else {
4645 env->fpus |= 0x400; /* C2 <-- 1 */
4646 fptemp = pow(2.0, expdif - 50);
4647 fpsrcop = (ST0 / ST1) / fptemp;
4648 /* fpsrcop = integer obtained by chopping */
4649 fpsrcop = (fpsrcop < 0.0) ?
4650 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4651 ST0 -= (ST1 * fpsrcop * fptemp);
4652 }
4653}
4654
4655void helper_fprem(void)
4656{
4657 CPU86_LDouble dblq, fpsrcop, fptemp;
4658 CPU86_LDoubleU fpsrcop1, fptemp1;
4659 int expdif;
4660 signed long long int q;
4661
4662#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4663 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4664#else
4665 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4666#endif
4667 ST0 = 0.0 / 0.0; /* NaN */
4668 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4669 return;
4670 }
4671
4672 fpsrcop = (CPU86_LDouble)ST0;
4673 fptemp = (CPU86_LDouble)ST1;
4674 fpsrcop1.d = fpsrcop;
4675 fptemp1.d = fptemp;
4676 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4677
4678 if (expdif < 0) {
4679 /* optimisation? taken from the AMD docs */
4680 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4681 /* ST0 is unchanged */
4682 return;
4683 }
4684
4685 if ( expdif < 53 ) {
4686 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4687 /* round dblq towards zero */
4688 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4689 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4690
4691 /* convert dblq to q by truncating towards zero */
4692 if (dblq < 0.0)
4693 q = (signed long long int)(-dblq);
4694 else
4695 q = (signed long long int)dblq;
4696
4697 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4698 /* (C0,C3,C1) <-- (q2,q1,q0) */
4699 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4700 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4701 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4702 } else {
4703 int N = 32 + (expdif % 32); /* as per AMD docs */
4704 env->fpus |= 0x400; /* C2 <-- 1 */
4705 fptemp = pow(2.0, (double)(expdif - N));
4706 fpsrcop = (ST0 / ST1) / fptemp;
4707 /* fpsrcop = integer obtained by chopping */
4708 fpsrcop = (fpsrcop < 0.0) ?
4709 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4710 ST0 -= (ST1 * fpsrcop * fptemp);
4711 }
4712}
4713
4714void helper_fyl2xp1(void)
4715{
4716 CPU86_LDouble fptemp;
4717
4718 fptemp = ST0;
4719 if ((fptemp+1.0)>0.0) {
4720 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4721 ST1 *= fptemp;
4722 fpop();
4723 } else {
4724 env->fpus &= (~0x4700);
4725 env->fpus |= 0x400;
4726 }
4727}
4728
4729void helper_fsqrt(void)
4730{
4731 CPU86_LDouble fptemp;
4732
4733 fptemp = ST0;
4734 if (fptemp<0.0) {
4735 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4736 env->fpus |= 0x400;
4737 }
4738 ST0 = sqrt(fptemp);
4739}
4740
4741void helper_fsincos(void)
4742{
4743 CPU86_LDouble fptemp;
4744
4745 fptemp = ST0;
4746 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4747 env->fpus |= 0x400;
4748 } else {
4749 ST0 = sin(fptemp);
4750 fpush();
4751 ST0 = cos(fptemp);
4752 env->fpus &= (~0x400); /* C2 <-- 0 */
4753 /* the above code is for |arg| < 2**63 only */
4754 }
4755}
4756
4757void helper_frndint(void)
4758{
4759 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4760}
4761
4762void helper_fscale(void)
4763{
4764 ST0 = ldexp (ST0, (int)(ST1));
4765}
4766
4767void helper_fsin(void)
4768{
4769 CPU86_LDouble fptemp;
4770
4771 fptemp = ST0;
4772 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4773 env->fpus |= 0x400;
4774 } else {
4775 ST0 = sin(fptemp);
4776 env->fpus &= (~0x400); /* C2 <-- 0 */
4777 /* the above code is for |arg| < 2**53 only */
4778 }
4779}
4780
4781void helper_fcos(void)
4782{
4783 CPU86_LDouble fptemp;
4784
4785 fptemp = ST0;
4786 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4787 env->fpus |= 0x400;
4788 } else {
4789 ST0 = cos(fptemp);
4790 env->fpus &= (~0x400); /* C2 <-- 0 */
4791 /* the above code is for |arg5 < 2**63 only */
4792 }
4793}
4794
4795void helper_fxam_ST0(void)
4796{
4797 CPU86_LDoubleU temp;
4798 int expdif;
4799
4800 temp.d = ST0;
4801
4802 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4803 if (SIGND(temp))
4804 env->fpus |= 0x200; /* C1 <-- 1 */
4805
4806 /* XXX: test fptags too */
4807 expdif = EXPD(temp);
4808 if (expdif == MAXEXPD) {
4809#ifdef USE_X86LDOUBLE
4810 if (MANTD(temp) == 0x8000000000000000ULL)
4811#else
4812 if (MANTD(temp) == 0)
4813#endif
4814 env->fpus |= 0x500 /*Infinity*/;
4815 else
4816 env->fpus |= 0x100 /*NaN*/;
4817 } else if (expdif == 0) {
4818 if (MANTD(temp) == 0)
4819 env->fpus |= 0x4000 /*Zero*/;
4820 else
4821 env->fpus |= 0x4400 /*Denormal*/;
4822 } else {
4823 env->fpus |= 0x400;
4824 }
4825}
4826
4827void helper_fstenv(target_ulong ptr, int data32)
4828{
4829 int fpus, fptag, exp, i;
4830 uint64_t mant;
4831 CPU86_LDoubleU tmp;
4832
4833 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4834 fptag = 0;
4835 for (i=7; i>=0; i--) {
4836 fptag <<= 2;
4837 if (env->fptags[i]) {
4838 fptag |= 3;
4839 } else {
4840 tmp.d = env->fpregs[i].d;
4841 exp = EXPD(tmp);
4842 mant = MANTD(tmp);
4843 if (exp == 0 && mant == 0) {
4844 /* zero */
4845 fptag |= 1;
4846 } else if (exp == 0 || exp == MAXEXPD
4847#ifdef USE_X86LDOUBLE
4848 || (mant & (1LL << 63)) == 0
4849#endif
4850 ) {
4851 /* NaNs, infinity, denormal */
4852 fptag |= 2;
4853 }
4854 }
4855 }
4856 if (data32) {
4857 /* 32 bit */
4858 stl(ptr, env->fpuc);
4859 stl(ptr + 4, fpus);
4860 stl(ptr + 8, fptag);
4861 stl(ptr + 12, 0); /* fpip */
4862 stl(ptr + 16, 0); /* fpcs */
4863 stl(ptr + 20, 0); /* fpoo */
4864 stl(ptr + 24, 0); /* fpos */
4865 } else {
4866 /* 16 bit */
4867 stw(ptr, env->fpuc);
4868 stw(ptr + 2, fpus);
4869 stw(ptr + 4, fptag);
4870 stw(ptr + 6, 0);
4871 stw(ptr + 8, 0);
4872 stw(ptr + 10, 0);
4873 stw(ptr + 12, 0);
4874 }
4875}
4876
4877void helper_fldenv(target_ulong ptr, int data32)
4878{
4879 int i, fpus, fptag;
4880
4881 if (data32) {
4882 env->fpuc = lduw(ptr);
4883 fpus = lduw(ptr + 4);
4884 fptag = lduw(ptr + 8);
4885 }
4886 else {
4887 env->fpuc = lduw(ptr);
4888 fpus = lduw(ptr + 2);
4889 fptag = lduw(ptr + 4);
4890 }
4891 env->fpstt = (fpus >> 11) & 7;
4892 env->fpus = fpus & ~0x3800;
4893 for(i = 0;i < 8; i++) {
4894 env->fptags[i] = ((fptag & 3) == 3);
4895 fptag >>= 2;
4896 }
4897}
4898
4899void helper_fsave(target_ulong ptr, int data32)
4900{
4901 CPU86_LDouble tmp;
4902 int i;
4903
4904 helper_fstenv(ptr, data32);
4905
4906 ptr += (14 << data32);
4907 for(i = 0;i < 8; i++) {
4908 tmp = ST(i);
4909 helper_fstt(tmp, ptr);
4910 ptr += 10;
4911 }
4912
4913 /* fninit */
4914 env->fpus = 0;
4915 env->fpstt = 0;
4916 env->fpuc = 0x37f;
4917 env->fptags[0] = 1;
4918 env->fptags[1] = 1;
4919 env->fptags[2] = 1;
4920 env->fptags[3] = 1;
4921 env->fptags[4] = 1;
4922 env->fptags[5] = 1;
4923 env->fptags[6] = 1;
4924 env->fptags[7] = 1;
4925}
4926
4927void helper_frstor(target_ulong ptr, int data32)
4928{
4929 CPU86_LDouble tmp;
4930 int i;
4931
4932 helper_fldenv(ptr, data32);
4933 ptr += (14 << data32);
4934
4935 for(i = 0;i < 8; i++) {
4936 tmp = helper_fldt(ptr);
4937 ST(i) = tmp;
4938 ptr += 10;
4939 }
4940}
4941
4942void helper_fxsave(target_ulong ptr, int data64)
4943{
4944 int fpus, fptag, i, nb_xmm_regs;
4945 CPU86_LDouble tmp;
4946 target_ulong addr;
4947
4948 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4949 fptag = 0;
4950 for(i = 0; i < 8; i++) {
4951 fptag |= (env->fptags[i] << i);
4952 }
4953 stw(ptr, env->fpuc);
4954 stw(ptr + 2, fpus);
4955 stw(ptr + 4, fptag ^ 0xff);
4956#ifdef TARGET_X86_64
4957 if (data64) {
4958 stq(ptr + 0x08, 0); /* rip */
4959 stq(ptr + 0x10, 0); /* rdp */
4960 } else
4961#endif
4962 {
4963 stl(ptr + 0x08, 0); /* eip */
4964 stl(ptr + 0x0c, 0); /* sel */
4965 stl(ptr + 0x10, 0); /* dp */
4966 stl(ptr + 0x14, 0); /* sel */
4967 }
4968
4969 addr = ptr + 0x20;
4970 for(i = 0;i < 8; i++) {
4971 tmp = ST(i);
4972 helper_fstt(tmp, addr);
4973 addr += 16;
4974 }
4975
4976 if (env->cr[4] & CR4_OSFXSR_MASK) {
4977 /* XXX: finish it */
4978 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4979 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4980 if (env->hflags & HF_CS64_MASK)
4981 nb_xmm_regs = 16;
4982 else
4983 nb_xmm_regs = 8;
4984 addr = ptr + 0xa0;
4985 /* Fast FXSAVE leaves out the XMM registers */
4986 if (!(env->efer & MSR_EFER_FFXSR)
4987 || (env->hflags & HF_CPL_MASK)
4988 || !(env->hflags & HF_LMA_MASK)) {
4989 for(i = 0; i < nb_xmm_regs; i++) {
4990 stq(addr, env->xmm_regs[i].XMM_Q(0));
4991 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4992 addr += 16;
4993 }
4994 }
4995 }
4996}
4997
4998void helper_fxrstor(target_ulong ptr, int data64)
4999{
5000 int i, fpus, fptag, nb_xmm_regs;
5001 CPU86_LDouble tmp;
5002 target_ulong addr;
5003
5004 env->fpuc = lduw(ptr);
5005 fpus = lduw(ptr + 2);
5006 fptag = lduw(ptr + 4);
5007 env->fpstt = (fpus >> 11) & 7;
5008 env->fpus = fpus & ~0x3800;
5009 fptag ^= 0xff;
5010 for(i = 0;i < 8; i++) {
5011 env->fptags[i] = ((fptag >> i) & 1);
5012 }
5013
5014 addr = ptr + 0x20;
5015 for(i = 0;i < 8; i++) {
5016 tmp = helper_fldt(addr);
5017 ST(i) = tmp;
5018 addr += 16;
5019 }
5020
5021 if (env->cr[4] & CR4_OSFXSR_MASK) {
5022 /* XXX: finish it */
5023 env->mxcsr = ldl(ptr + 0x18);
5024 //ldl(ptr + 0x1c);
5025 if (env->hflags & HF_CS64_MASK)
5026 nb_xmm_regs = 16;
5027 else
5028 nb_xmm_regs = 8;
5029 addr = ptr + 0xa0;
5030 /* Fast FXRESTORE leaves out the XMM registers */
5031 if (!(env->efer & MSR_EFER_FFXSR)
5032 || (env->hflags & HF_CPL_MASK)
5033 || !(env->hflags & HF_LMA_MASK)) {
5034 for(i = 0; i < nb_xmm_regs; i++) {
5035#if !defined(VBOX) || __GNUC__ < 4
5036 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5037 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5038#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5039# if 1
5040 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5041 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5042 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5043 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5044# else
5045 /* this works fine on Mac OS X, gcc 4.0.1 */
5046 uint64_t u64 = ldq(addr);
5047 env->xmm_regs[i].XMM_Q(0);
5048 u64 = ldq(addr + 4);
5049 env->xmm_regs[i].XMM_Q(1) = u64;
5050# endif
5051#endif
5052 addr += 16;
5053 }
5054 }
5055 }
5056}
5057
5058#ifndef USE_X86LDOUBLE
5059
5060void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5061{
5062 CPU86_LDoubleU temp;
5063 int e;
5064
5065 temp.d = f;
5066 /* mantissa */
5067 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5068 /* exponent + sign */
5069 e = EXPD(temp) - EXPBIAS + 16383;
5070 e |= SIGND(temp) >> 16;
5071 *pexp = e;
5072}
5073
5074CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5075{
5076 CPU86_LDoubleU temp;
5077 int e;
5078 uint64_t ll;
5079
5080 /* XXX: handle overflow ? */
5081 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5082 e |= (upper >> 4) & 0x800; /* sign */
5083 ll = (mant >> 11) & ((1LL << 52) - 1);
5084#ifdef __arm__
5085 temp.l.upper = (e << 20) | (ll >> 32);
5086 temp.l.lower = ll;
5087#else
5088 temp.ll = ll | ((uint64_t)e << 52);
5089#endif
5090 return temp.d;
5091}
5092
5093#else
5094
5095void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5096{
5097 CPU86_LDoubleU temp;
5098
5099 temp.d = f;
5100 *pmant = temp.l.lower;
5101 *pexp = temp.l.upper;
5102}
5103
5104CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5105{
5106 CPU86_LDoubleU temp;
5107
5108 temp.l.upper = upper;
5109 temp.l.lower = mant;
5110 return temp.d;
5111}
5112#endif
5113
5114#ifdef TARGET_X86_64
5115
5116//#define DEBUG_MULDIV
5117
5118static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5119{
5120 *plow += a;
5121 /* carry test */
5122 if (*plow < a)
5123 (*phigh)++;
5124 *phigh += b;
5125}
5126
5127static void neg128(uint64_t *plow, uint64_t *phigh)
5128{
5129 *plow = ~ *plow;
5130 *phigh = ~ *phigh;
5131 add128(plow, phigh, 1, 0);
5132}
5133
5134/* return TRUE if overflow */
5135static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5136{
5137 uint64_t q, r, a1, a0;
5138 int i, qb, ab;
5139
5140 a0 = *plow;
5141 a1 = *phigh;
5142 if (a1 == 0) {
5143 q = a0 / b;
5144 r = a0 % b;
5145 *plow = q;
5146 *phigh = r;
5147 } else {
5148 if (a1 >= b)
5149 return 1;
5150 /* XXX: use a better algorithm */
5151 for(i = 0; i < 64; i++) {
5152 ab = a1 >> 63;
5153 a1 = (a1 << 1) | (a0 >> 63);
5154 if (ab || a1 >= b) {
5155 a1 -= b;
5156 qb = 1;
5157 } else {
5158 qb = 0;
5159 }
5160 a0 = (a0 << 1) | qb;
5161 }
5162#if defined(DEBUG_MULDIV)
5163 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5164 *phigh, *plow, b, a0, a1);
5165#endif
5166 *plow = a0;
5167 *phigh = a1;
5168 }
5169 return 0;
5170}
5171
5172/* return TRUE if overflow */
5173static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5174{
5175 int sa, sb;
5176 sa = ((int64_t)*phigh < 0);
5177 if (sa)
5178 neg128(plow, phigh);
5179 sb = (b < 0);
5180 if (sb)
5181 b = -b;
5182 if (div64(plow, phigh, b) != 0)
5183 return 1;
5184 if (sa ^ sb) {
5185 if (*plow > (1ULL << 63))
5186 return 1;
5187 *plow = - *plow;
5188 } else {
5189 if (*plow >= (1ULL << 63))
5190 return 1;
5191 }
5192 if (sa)
5193 *phigh = - *phigh;
5194 return 0;
5195}
5196
5197void helper_mulq_EAX_T0(target_ulong t0)
5198{
5199 uint64_t r0, r1;
5200
5201 mulu64(&r0, &r1, EAX, t0);
5202 EAX = r0;
5203 EDX = r1;
5204 CC_DST = r0;
5205 CC_SRC = r1;
5206}
5207
5208void helper_imulq_EAX_T0(target_ulong t0)
5209{
5210 uint64_t r0, r1;
5211
5212 muls64(&r0, &r1, EAX, t0);
5213 EAX = r0;
5214 EDX = r1;
5215 CC_DST = r0;
5216 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5217}
5218
5219target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5220{
5221 uint64_t r0, r1;
5222
5223 muls64(&r0, &r1, t0, t1);
5224 CC_DST = r0;
5225 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5226 return r0;
5227}
5228
5229void helper_divq_EAX(target_ulong t0)
5230{
5231 uint64_t r0, r1;
5232 if (t0 == 0) {
5233 raise_exception(EXCP00_DIVZ);
5234 }
5235 r0 = EAX;
5236 r1 = EDX;
5237 if (div64(&r0, &r1, t0))
5238 raise_exception(EXCP00_DIVZ);
5239 EAX = r0;
5240 EDX = r1;
5241}
5242
5243void helper_idivq_EAX(target_ulong t0)
5244{
5245 uint64_t r0, r1;
5246 if (t0 == 0) {
5247 raise_exception(EXCP00_DIVZ);
5248 }
5249 r0 = EAX;
5250 r1 = EDX;
5251 if (idiv64(&r0, &r1, t0))
5252 raise_exception(EXCP00_DIVZ);
5253 EAX = r0;
5254 EDX = r1;
5255}
5256#endif
5257
5258static void do_hlt(void)
5259{
5260 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5261 env->halted = 1;
5262 env->exception_index = EXCP_HLT;
5263 cpu_loop_exit();
5264}
5265
5266void helper_hlt(int next_eip_addend)
5267{
5268 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5269 EIP += next_eip_addend;
5270
5271 do_hlt();
5272}
5273
5274void helper_monitor(target_ulong ptr)
5275{
5276#ifdef VBOX
5277 if ((uint32_t)ECX > 1)
5278 raise_exception(EXCP0D_GPF);
5279#else /* !VBOX */
5280 if ((uint32_t)ECX != 0)
5281 raise_exception(EXCP0D_GPF);
5282#endif /* !VBOX */
5283 /* XXX: store address ? */
5284 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5285}
5286
5287void helper_mwait(int next_eip_addend)
5288{
5289 if ((uint32_t)ECX != 0)
5290 raise_exception(EXCP0D_GPF);
5291#ifdef VBOX
5292 helper_hlt(next_eip_addend);
5293#else /* !VBOX */
5294 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5295 EIP += next_eip_addend;
5296
5297 /* XXX: not complete but not completely erroneous */
5298 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5299 /* more than one CPU: do not sleep because another CPU may
5300 wake this one */
5301 } else {
5302 do_hlt();
5303 }
5304#endif /* !VBOX */
5305}
5306
5307void helper_debug(void)
5308{
5309 env->exception_index = EXCP_DEBUG;
5310 cpu_loop_exit();
5311}
5312
5313void helper_raise_interrupt(int intno, int next_eip_addend)
5314{
5315 raise_interrupt(intno, 1, 0, next_eip_addend);
5316}
5317
5318void helper_raise_exception(int exception_index)
5319{
5320 raise_exception(exception_index);
5321}
5322
5323void helper_cli(void)
5324{
5325 env->eflags &= ~IF_MASK;
5326}
5327
5328void helper_sti(void)
5329{
5330 env->eflags |= IF_MASK;
5331}
5332
5333#ifdef VBOX
5334void helper_cli_vme(void)
5335{
5336 env->eflags &= ~VIF_MASK;
5337}
5338
5339void helper_sti_vme(void)
5340{
5341 /* First check, then change eflags according to the AMD manual */
5342 if (env->eflags & VIP_MASK) {
5343 raise_exception(EXCP0D_GPF);
5344 }
5345 env->eflags |= VIF_MASK;
5346}
5347#endif /* VBOX */
5348
5349#if 0
5350/* vm86plus instructions */
5351void helper_cli_vm(void)
5352{
5353 env->eflags &= ~VIF_MASK;
5354}
5355
5356void helper_sti_vm(void)
5357{
5358 env->eflags |= VIF_MASK;
5359 if (env->eflags & VIP_MASK) {
5360 raise_exception(EXCP0D_GPF);
5361 }
5362}
5363#endif
5364
5365void helper_set_inhibit_irq(void)
5366{
5367 env->hflags |= HF_INHIBIT_IRQ_MASK;
5368}
5369
5370void helper_reset_inhibit_irq(void)
5371{
5372 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5373}
5374
5375void helper_boundw(target_ulong a0, int v)
5376{
5377 int low, high;
5378 low = ldsw(a0);
5379 high = ldsw(a0 + 2);
5380 v = (int16_t)v;
5381 if (v < low || v > high) {
5382 raise_exception(EXCP05_BOUND);
5383 }
5384}
5385
5386void helper_boundl(target_ulong a0, int v)
5387{
5388 int low, high;
5389 low = ldl(a0);
5390 high = ldl(a0 + 4);
5391 if (v < low || v > high) {
5392 raise_exception(EXCP05_BOUND);
5393 }
5394}
5395
5396static float approx_rsqrt(float a)
5397{
5398 return 1.0 / sqrt(a);
5399}
5400
5401static float approx_rcp(float a)
5402{
5403 return 1.0 / a;
5404}
5405
5406#if !defined(CONFIG_USER_ONLY)
5407
5408#define MMUSUFFIX _mmu
5409
5410#define SHIFT 0
5411#include "softmmu_template.h"
5412
5413#define SHIFT 1
5414#include "softmmu_template.h"
5415
5416#define SHIFT 2
5417#include "softmmu_template.h"
5418
5419#define SHIFT 3
5420#include "softmmu_template.h"
5421
5422#endif
5423
5424#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5425/* This code assumes real physical address always fit into host CPU reg,
5426 which is wrong in general, but true for our current use cases. */
5427RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5428{
5429 return remR3PhysReadS8(addr);
5430}
5431RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5432{
5433 return remR3PhysReadU8(addr);
5434}
5435void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5436{
5437 remR3PhysWriteU8(addr, val);
5438}
5439RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5440{
5441 return remR3PhysReadS16(addr);
5442}
5443RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5444{
5445 return remR3PhysReadU16(addr);
5446}
5447void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5448{
5449 remR3PhysWriteU16(addr, val);
5450}
5451RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5452{
5453 return remR3PhysReadS32(addr);
5454}
5455RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5456{
5457 return remR3PhysReadU32(addr);
5458}
5459void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5460{
5461 remR3PhysWriteU32(addr, val);
5462}
5463uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5464{
5465 return remR3PhysReadU64(addr);
5466}
5467void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5468{
5469 remR3PhysWriteU64(addr, val);
5470}
5471#endif /* VBOX */
5472
5473#if !defined(CONFIG_USER_ONLY)
5474/* try to fill the TLB and return an exception if error. If retaddr is
5475 NULL, it means that the function was called in C code (i.e. not
5476 from generated code or from helper.c) */
5477/* XXX: fix it to restore all registers */
5478void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5479{
5480 TranslationBlock *tb;
5481 int ret;
5482 unsigned long pc;
5483 CPUX86State *saved_env;
5484
5485 /* XXX: hack to restore env in all cases, even if not called from
5486 generated code */
5487 saved_env = env;
5488 env = cpu_single_env;
5489
5490 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5491 if (ret) {
5492 if (retaddr) {
5493 /* now we have a real cpu fault */
5494 pc = (unsigned long)retaddr;
5495 tb = tb_find_pc(pc);
5496 if (tb) {
5497 /* the PC is inside the translated code. It means that we have
5498 a virtual CPU fault */
5499 cpu_restore_state(tb, env, pc, NULL);
5500 }
5501 }
5502 raise_exception_err(env->exception_index, env->error_code);
5503 }
5504 env = saved_env;
5505}
5506#endif
5507
5508#ifdef VBOX
5509
5510/**
5511 * Correctly computes the eflags.
5512 * @returns eflags.
5513 * @param env1 CPU environment.
5514 */
5515uint32_t raw_compute_eflags(CPUX86State *env1)
5516{
5517 CPUX86State *savedenv = env;
5518 uint32_t efl;
5519 env = env1;
5520 efl = compute_eflags();
5521 env = savedenv;
5522 return efl;
5523}
5524
5525/**
5526 * Reads byte from virtual address in guest memory area.
5527 * XXX: is it working for any addresses? swapped out pages?
5528 * @returns read data byte.
5529 * @param env1 CPU environment.
5530 * @param pvAddr GC Virtual address.
5531 */
5532uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5533{
5534 CPUX86State *savedenv = env;
5535 uint8_t u8;
5536 env = env1;
5537 u8 = ldub_kernel(addr);
5538 env = savedenv;
5539 return u8;
5540}
5541
5542/**
5543 * Reads byte from virtual address in guest memory area.
5544 * XXX: is it working for any addresses? swapped out pages?
5545 * @returns read data byte.
5546 * @param env1 CPU environment.
5547 * @param pvAddr GC Virtual address.
5548 */
5549uint16_t read_word(CPUX86State *env1, target_ulong addr)
5550{
5551 CPUX86State *savedenv = env;
5552 uint16_t u16;
5553 env = env1;
5554 u16 = lduw_kernel(addr);
5555 env = savedenv;
5556 return u16;
5557}
5558
5559/**
5560 * Reads byte from virtual address in guest memory area.
5561 * XXX: is it working for any addresses? swapped out pages?
5562 * @returns read data byte.
5563 * @param env1 CPU environment.
5564 * @param pvAddr GC Virtual address.
5565 */
5566uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5567{
5568 CPUX86State *savedenv = env;
5569 uint32_t u32;
5570 env = env1;
5571 u32 = ldl_kernel(addr);
5572 env = savedenv;
5573 return u32;
5574}
5575
5576/**
5577 * Writes byte to virtual address in guest memory area.
5578 * XXX: is it working for any addresses? swapped out pages?
5579 * @returns read data byte.
5580 * @param env1 CPU environment.
5581 * @param pvAddr GC Virtual address.
5582 * @param val byte value
5583 */
5584void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5585{
5586 CPUX86State *savedenv = env;
5587 env = env1;
5588 stb(addr, val);
5589 env = savedenv;
5590}
5591
5592void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5593{
5594 CPUX86State *savedenv = env;
5595 env = env1;
5596 stw(addr, val);
5597 env = savedenv;
5598}
5599
5600void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5601{
5602 CPUX86State *savedenv = env;
5603 env = env1;
5604 stl(addr, val);
5605 env = savedenv;
5606}
5607
5608/**
5609 * Correctly loads selector into segment register with updating internal
5610 * qemu data/caches.
5611 * @param env1 CPU environment.
5612 * @param seg_reg Segment register.
5613 * @param selector Selector to load.
5614 */
5615void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5616{
5617 CPUX86State *savedenv = env;
5618#ifdef FORCE_SEGMENT_SYNC
5619 jmp_buf old_buf;
5620#endif
5621
5622 env = env1;
5623
5624 if ( env->eflags & X86_EFL_VM
5625 || !(env->cr[0] & X86_CR0_PE))
5626 {
5627 load_seg_vm(seg_reg, selector);
5628
5629 env = savedenv;
5630
5631 /* Successful sync. */
5632 env1->segs[seg_reg].newselector = 0;
5633 }
5634 else
5635 {
5636 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5637 time critical - let's not do that */
5638#ifdef FORCE_SEGMENT_SYNC
5639 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5640#endif
5641 if (setjmp(env1->jmp_env) == 0)
5642 {
5643 if (seg_reg == R_CS)
5644 {
5645 uint32_t e1, e2;
5646 e1 = e2 = 0;
5647 load_segment(&e1, &e2, selector);
5648 cpu_x86_load_seg_cache(env, R_CS, selector,
5649 get_seg_base(e1, e2),
5650 get_seg_limit(e1, e2),
5651 e2);
5652 }
5653 else
5654 helper_load_seg(seg_reg, selector);
5655 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5656 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5657
5658 env = savedenv;
5659
5660 /* Successful sync. */
5661 env1->segs[seg_reg].newselector = 0;
5662 }
5663 else
5664 {
5665 env = savedenv;
5666
5667 /* Postpone sync until the guest uses the selector. */
5668 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5669 env1->segs[seg_reg].newselector = selector;
5670 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5671 env1->exception_index = -1;
5672 env1->error_code = 0;
5673 env1->old_exception = -1;
5674 }
5675#ifdef FORCE_SEGMENT_SYNC
5676 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5677#endif
5678 }
5679
5680}
5681
5682DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5683{
5684 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5685}
5686
5687
5688int emulate_single_instr(CPUX86State *env1)
5689{
5690 TranslationBlock *tb;
5691 TranslationBlock *current;
5692 int flags;
5693 uint8_t *tc_ptr;
5694 target_ulong old_eip;
5695
5696 /* ensures env is loaded! */
5697 CPUX86State *savedenv = env;
5698 env = env1;
5699
5700 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5701
5702 current = env->current_tb;
5703 env->current_tb = NULL;
5704 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5705
5706 /*
5707 * Translate only one instruction.
5708 */
5709 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5710 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5711 env->segs[R_CS].base, flags, 0);
5712
5713 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5714
5715
5716 /* tb_link_phys: */
5717 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5718 tb->jmp_next[0] = NULL;
5719 tb->jmp_next[1] = NULL;
5720 Assert(tb->jmp_next[0] == NULL);
5721 Assert(tb->jmp_next[1] == NULL);
5722 if (tb->tb_next_offset[0] != 0xffff)
5723 tb_reset_jump(tb, 0);
5724 if (tb->tb_next_offset[1] != 0xffff)
5725 tb_reset_jump(tb, 1);
5726
5727 /*
5728 * Execute it using emulation
5729 */
5730 old_eip = env->eip;
5731 env->current_tb = tb;
5732
5733 /*
5734 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5735 * perhaps not a very safe hack
5736 */
5737 while(old_eip == env->eip)
5738 {
5739 tc_ptr = tb->tc_ptr;
5740
5741#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5742 int fake_ret;
5743 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5744#else
5745 tcg_qemu_tb_exec(tc_ptr);
5746#endif
5747 /*
5748 * Exit once we detect an external interrupt and interrupts are enabled
5749 */
5750 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5751 ( (env->eflags & IF_MASK) &&
5752 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5753 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5754 {
5755 break;
5756 }
5757 }
5758 env->current_tb = current;
5759
5760 tb_phys_invalidate(tb, -1);
5761 tb_free(tb);
5762/*
5763 Assert(tb->tb_next_offset[0] == 0xffff);
5764 Assert(tb->tb_next_offset[1] == 0xffff);
5765 Assert(tb->tb_next[0] == 0xffff);
5766 Assert(tb->tb_next[1] == 0xffff);
5767 Assert(tb->jmp_next[0] == NULL);
5768 Assert(tb->jmp_next[1] == NULL);
5769 Assert(tb->jmp_first == NULL); */
5770
5771 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5772
5773 /*
5774 * Execute the next instruction when we encounter instruction fusing.
5775 */
5776 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5777 {
5778 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5779 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5780 emulate_single_instr(env);
5781 }
5782
5783 env = savedenv;
5784 return 0;
5785}
5786
5787/**
5788 * Correctly loads a new ldtr selector.
5789 *
5790 * @param env1 CPU environment.
5791 * @param selector Selector to load.
5792 */
5793void sync_ldtr(CPUX86State *env1, int selector)
5794{
5795 CPUX86State *saved_env = env;
5796 if (setjmp(env1->jmp_env) == 0)
5797 {
5798 env = env1;
5799 helper_lldt(selector);
5800 env = saved_env;
5801 }
5802 else
5803 {
5804 env = saved_env;
5805#ifdef VBOX_STRICT
5806 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5807#endif
5808 }
5809}
5810
5811int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5812 uint32_t *esp_ptr, int dpl)
5813{
5814 int type, index, shift;
5815
5816 CPUX86State *savedenv = env;
5817 env = env1;
5818
5819 if (!(env->tr.flags & DESC_P_MASK))
5820 cpu_abort(env, "invalid tss");
5821 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5822 if ((type & 7) != 1)
5823 cpu_abort(env, "invalid tss type %d", type);
5824 shift = type >> 3;
5825 index = (dpl * 4 + 2) << shift;
5826 if (index + (4 << shift) - 1 > env->tr.limit)
5827 {
5828 env = savedenv;
5829 return 0;
5830 }
5831 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5832
5833 if (shift == 0) {
5834 *esp_ptr = lduw_kernel(env->tr.base + index);
5835 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5836 } else {
5837 *esp_ptr = ldl_kernel(env->tr.base + index);
5838 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5839 }
5840
5841 env = savedenv;
5842 return 1;
5843}
5844
5845//*****************************************************************************
5846// Needs to be at the bottom of the file (overriding macros)
5847
5848static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5849{
5850 return *(CPU86_LDouble *)ptr;
5851}
5852
5853static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5854{
5855 *(CPU86_LDouble *)ptr = f;
5856}
5857
5858#undef stw
5859#undef stl
5860#undef stq
5861#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5862#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5863#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5864
5865//*****************************************************************************
5866void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5867{
5868 int fpus, fptag, i, nb_xmm_regs;
5869 CPU86_LDouble tmp;
5870 uint8_t *addr;
5871 int data64 = !!(env->hflags & HF_LMA_MASK);
5872
5873 if (env->cpuid_features & CPUID_FXSR)
5874 {
5875 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5876 fptag = 0;
5877 for(i = 0; i < 8; i++) {
5878 fptag |= (env->fptags[i] << i);
5879 }
5880 stw(ptr, env->fpuc);
5881 stw(ptr + 2, fpus);
5882 stw(ptr + 4, fptag ^ 0xff);
5883
5884 addr = ptr + 0x20;
5885 for(i = 0;i < 8; i++) {
5886 tmp = ST(i);
5887 helper_fstt_raw(tmp, addr);
5888 addr += 16;
5889 }
5890
5891 if (env->cr[4] & CR4_OSFXSR_MASK) {
5892 /* XXX: finish it */
5893 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5894 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5895 nb_xmm_regs = 8 << data64;
5896 addr = ptr + 0xa0;
5897 for(i = 0; i < nb_xmm_regs; i++) {
5898#if __GNUC__ < 4
5899 stq(addr, env->xmm_regs[i].XMM_Q(0));
5900 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5901#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5902 stl(addr, env->xmm_regs[i].XMM_L(0));
5903 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5904 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5905 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5906#endif
5907 addr += 16;
5908 }
5909 }
5910 }
5911 else
5912 {
5913 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5914 int fptag;
5915
5916 fp->FCW = env->fpuc;
5917 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5918 fptag = 0;
5919 for (i=7; i>=0; i--) {
5920 fptag <<= 2;
5921 if (env->fptags[i]) {
5922 fptag |= 3;
5923 } else {
5924 /* the FPU automatically computes it */
5925 }
5926 }
5927 fp->FTW = fptag;
5928
5929 for(i = 0;i < 8; i++) {
5930 tmp = ST(i);
5931 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
5932 }
5933 }
5934}
5935
5936//*****************************************************************************
5937#undef lduw
5938#undef ldl
5939#undef ldq
5940#define lduw(a) *(uint16_t *)(a)
5941#define ldl(a) *(uint32_t *)(a)
5942#define ldq(a) *(uint64_t *)(a)
5943//*****************************************************************************
5944void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5945{
5946 int i, fpus, fptag, nb_xmm_regs;
5947 CPU86_LDouble tmp;
5948 uint8_t *addr;
5949 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
5950
5951 if (env->cpuid_features & CPUID_FXSR)
5952 {
5953 env->fpuc = lduw(ptr);
5954 fpus = lduw(ptr + 2);
5955 fptag = lduw(ptr + 4);
5956 env->fpstt = (fpus >> 11) & 7;
5957 env->fpus = fpus & ~0x3800;
5958 fptag ^= 0xff;
5959 for(i = 0;i < 8; i++) {
5960 env->fptags[i] = ((fptag >> i) & 1);
5961 }
5962
5963 addr = ptr + 0x20;
5964 for(i = 0;i < 8; i++) {
5965 tmp = helper_fldt_raw(addr);
5966 ST(i) = tmp;
5967 addr += 16;
5968 }
5969
5970 if (env->cr[4] & CR4_OSFXSR_MASK) {
5971 /* XXX: finish it, endianness */
5972 env->mxcsr = ldl(ptr + 0x18);
5973 //ldl(ptr + 0x1c);
5974 nb_xmm_regs = 8 << data64;
5975 addr = ptr + 0xa0;
5976 for(i = 0; i < nb_xmm_regs; i++) {
5977#if HC_ARCH_BITS == 32
5978 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
5979 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5980 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5981 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5982 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5983#else
5984 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5985 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5986#endif
5987 addr += 16;
5988 }
5989 }
5990 }
5991 else
5992 {
5993 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5994 int fptag, j;
5995
5996 env->fpuc = fp->FCW;
5997 env->fpstt = (fp->FSW >> 11) & 7;
5998 env->fpus = fp->FSW & ~0x3800;
5999 fptag = fp->FTW;
6000 for(i = 0;i < 8; i++) {
6001 env->fptags[i] = ((fptag & 3) == 3);
6002 fptag >>= 2;
6003 }
6004 j = env->fpstt;
6005 for(i = 0;i < 8; i++) {
6006 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6007 ST(i) = tmp;
6008 }
6009 }
6010}
6011//*****************************************************************************
6012//*****************************************************************************
6013
6014#endif /* VBOX */
6015
6016/* Secure Virtual Machine helpers */
6017
6018#if defined(CONFIG_USER_ONLY)
6019
6020void helper_vmrun(int aflag, int next_eip_addend)
6021{
6022}
6023void helper_vmmcall(void)
6024{
6025}
6026void helper_vmload(int aflag)
6027{
6028}
6029void helper_vmsave(int aflag)
6030{
6031}
6032void helper_stgi(void)
6033{
6034}
6035void helper_clgi(void)
6036{
6037}
6038void helper_skinit(void)
6039{
6040}
6041void helper_invlpga(int aflag)
6042{
6043}
6044void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6045{
6046}
6047void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6048{
6049}
6050
6051void helper_svm_check_io(uint32_t port, uint32_t param,
6052 uint32_t next_eip_addend)
6053{
6054}
6055#else
6056
6057static inline void svm_save_seg(target_phys_addr_t addr,
6058 const SegmentCache *sc)
6059{
6060 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6061 sc->selector);
6062 stq_phys(addr + offsetof(struct vmcb_seg, base),
6063 sc->base);
6064 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6065 sc->limit);
6066 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6067 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6068}
6069
6070static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6071{
6072 unsigned int flags;
6073
6074 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6075 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6076 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6077 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6078 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6079}
6080
6081static inline void svm_load_seg_cache(target_phys_addr_t addr,
6082 CPUState *env, int seg_reg)
6083{
6084 SegmentCache sc1, *sc = &sc1;
6085 svm_load_seg(addr, sc);
6086 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6087 sc->base, sc->limit, sc->flags);
6088}
6089
6090void helper_vmrun(int aflag, int next_eip_addend)
6091{
6092 target_ulong addr;
6093 uint32_t event_inj;
6094 uint32_t int_ctl;
6095
6096 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6097
6098 if (aflag == 2)
6099 addr = EAX;
6100 else
6101 addr = (uint32_t)EAX;
6102
6103 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6104
6105 env->vm_vmcb = addr;
6106
6107 /* save the current CPU state in the hsave page */
6108 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6109 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6110
6111 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6112 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6113
6114 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6115 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6116 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6117 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6118 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6119 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6120
6121 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6122 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6123
6124 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6125 &env->segs[R_ES]);
6126 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6127 &env->segs[R_CS]);
6128 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6129 &env->segs[R_SS]);
6130 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6131 &env->segs[R_DS]);
6132
6133 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6134 EIP + next_eip_addend);
6135 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6136 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6137
6138 /* load the interception bitmaps so we do not need to access the
6139 vmcb in svm mode */
6140 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6141 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6142 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6143 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6144 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6145 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6146
6147 /* enable intercepts */
6148 env->hflags |= HF_SVMI_MASK;
6149
6150 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6151
6152 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6153 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6154
6155 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6156 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6157
6158 /* clear exit_info_2 so we behave like the real hardware */
6159 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6160
6161 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6162 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6163 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6164 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6165 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6166 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6167 if (int_ctl & V_INTR_MASKING_MASK) {
6168 env->v_tpr = int_ctl & V_TPR_MASK;
6169 env->hflags2 |= HF2_VINTR_MASK;
6170 if (env->eflags & IF_MASK)
6171 env->hflags2 |= HF2_HIF_MASK;
6172 }
6173
6174 cpu_load_efer(env,
6175 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6176 env->eflags = 0;
6177 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6178 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6179 CC_OP = CC_OP_EFLAGS;
6180
6181 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6182 env, R_ES);
6183 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6184 env, R_CS);
6185 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6186 env, R_SS);
6187 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6188 env, R_DS);
6189
6190 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6191 env->eip = EIP;
6192 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6193 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6194 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6195 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6196 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6197
6198 /* FIXME: guest state consistency checks */
6199
6200 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6201 case TLB_CONTROL_DO_NOTHING:
6202 break;
6203 case TLB_CONTROL_FLUSH_ALL_ASID:
6204 /* FIXME: this is not 100% correct but should work for now */
6205 tlb_flush(env, 1);
6206 break;
6207 }
6208
6209 env->hflags2 |= HF2_GIF_MASK;
6210
6211 if (int_ctl & V_IRQ_MASK) {
6212 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6213 }
6214
6215 /* maybe we need to inject an event */
6216 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6217 if (event_inj & SVM_EVTINJ_VALID) {
6218 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6219 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6220 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6221 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6222
6223 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6224 /* FIXME: need to implement valid_err */
6225 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6226 case SVM_EVTINJ_TYPE_INTR:
6227 env->exception_index = vector;
6228 env->error_code = event_inj_err;
6229 env->exception_is_int = 0;
6230 env->exception_next_eip = -1;
6231 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6232 /* XXX: is it always correct ? */
6233 do_interrupt(vector, 0, 0, 0, 1);
6234 break;
6235 case SVM_EVTINJ_TYPE_NMI:
6236 env->exception_index = EXCP02_NMI;
6237 env->error_code = event_inj_err;
6238 env->exception_is_int = 0;
6239 env->exception_next_eip = EIP;
6240 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6241 cpu_loop_exit();
6242 break;
6243 case SVM_EVTINJ_TYPE_EXEPT:
6244 env->exception_index = vector;
6245 env->error_code = event_inj_err;
6246 env->exception_is_int = 0;
6247 env->exception_next_eip = -1;
6248 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6249 cpu_loop_exit();
6250 break;
6251 case SVM_EVTINJ_TYPE_SOFT:
6252 env->exception_index = vector;
6253 env->error_code = event_inj_err;
6254 env->exception_is_int = 1;
6255 env->exception_next_eip = EIP;
6256 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6257 cpu_loop_exit();
6258 break;
6259 }
6260 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6261 }
6262}
6263
6264void helper_vmmcall(void)
6265{
6266 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6267 raise_exception(EXCP06_ILLOP);
6268}
6269
6270void helper_vmload(int aflag)
6271{
6272 target_ulong addr;
6273 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6274
6275 if (aflag == 2)
6276 addr = EAX;
6277 else
6278 addr = (uint32_t)EAX;
6279
6280 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6281 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6282 env->segs[R_FS].base);
6283
6284 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6285 env, R_FS);
6286 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6287 env, R_GS);
6288 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6289 &env->tr);
6290 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6291 &env->ldt);
6292
6293#ifdef TARGET_X86_64
6294 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6295 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6296 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6297 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6298#endif
6299 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6300 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6301 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6302 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6303}
6304
6305void helper_vmsave(int aflag)
6306{
6307 target_ulong addr;
6308 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6309
6310 if (aflag == 2)
6311 addr = EAX;
6312 else
6313 addr = (uint32_t)EAX;
6314
6315 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6316 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6317 env->segs[R_FS].base);
6318
6319 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6320 &env->segs[R_FS]);
6321 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6322 &env->segs[R_GS]);
6323 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6324 &env->tr);
6325 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6326 &env->ldt);
6327
6328#ifdef TARGET_X86_64
6329 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6330 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6331 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6332 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6333#endif
6334 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6335 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6336 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6337 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6338}
6339
6340void helper_stgi(void)
6341{
6342 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6343 env->hflags2 |= HF2_GIF_MASK;
6344}
6345
6346void helper_clgi(void)
6347{
6348 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6349 env->hflags2 &= ~HF2_GIF_MASK;
6350}
6351
6352void helper_skinit(void)
6353{
6354 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6355 /* XXX: not implemented */
6356 raise_exception(EXCP06_ILLOP);
6357}
6358
6359void helper_invlpga(int aflag)
6360{
6361 target_ulong addr;
6362 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6363
6364 if (aflag == 2)
6365 addr = EAX;
6366 else
6367 addr = (uint32_t)EAX;
6368
6369 /* XXX: could use the ASID to see if it is needed to do the
6370 flush */
6371 tlb_flush_page(env, addr);
6372}
6373
6374void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6375{
6376 if (likely(!(env->hflags & HF_SVMI_MASK)))
6377 return;
6378#ifndef VBOX
6379 switch(type) {
6380 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6381 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6382 helper_vmexit(type, param);
6383 }
6384 break;
6385 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6386 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6387 helper_vmexit(type, param);
6388 }
6389 break;
6390 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6391 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6392 helper_vmexit(type, param);
6393 }
6394 break;
6395 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6396 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6397 helper_vmexit(type, param);
6398 }
6399 break;
6400 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6401 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6402 helper_vmexit(type, param);
6403 }
6404 break;
6405 case SVM_EXIT_MSR:
6406 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6407 /* FIXME: this should be read in at vmrun (faster this way?) */
6408 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6409 uint32_t t0, t1;
6410 switch((uint32_t)ECX) {
6411 case 0 ... 0x1fff:
6412 t0 = (ECX * 2) % 8;
6413 t1 = ECX / 8;
6414 break;
6415 case 0xc0000000 ... 0xc0001fff:
6416 t0 = (8192 + ECX - 0xc0000000) * 2;
6417 t1 = (t0 / 8);
6418 t0 %= 8;
6419 break;
6420 case 0xc0010000 ... 0xc0011fff:
6421 t0 = (16384 + ECX - 0xc0010000) * 2;
6422 t1 = (t0 / 8);
6423 t0 %= 8;
6424 break;
6425 default:
6426 helper_vmexit(type, param);
6427 t0 = 0;
6428 t1 = 0;
6429 break;
6430 }
6431 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6432 helper_vmexit(type, param);
6433 }
6434 break;
6435 default:
6436 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6437 helper_vmexit(type, param);
6438 }
6439 break;
6440 }
6441#else /* VBOX */
6442 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6443#endif /* VBOX */
6444}
6445
6446void helper_svm_check_io(uint32_t port, uint32_t param,
6447 uint32_t next_eip_addend)
6448{
6449 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6450 /* FIXME: this should be read in at vmrun (faster this way?) */
6451 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6452 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6453 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6454 /* next EIP */
6455 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6456 env->eip + next_eip_addend);
6457 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6458 }
6459 }
6460}
6461
6462/* Note: currently only 32 bits of exit_code are used */
6463void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6464{
6465 uint32_t int_ctl;
6466
6467 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6468 exit_code, exit_info_1,
6469 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6470 EIP);
6471
6472 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6473 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6474 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6475 } else {
6476 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6477 }
6478
6479 /* Save the VM state in the vmcb */
6480 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6481 &env->segs[R_ES]);
6482 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6483 &env->segs[R_CS]);
6484 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6485 &env->segs[R_SS]);
6486 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6487 &env->segs[R_DS]);
6488
6489 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6490 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6491
6492 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6493 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6494
6495 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6496 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6497 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6498 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6499 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6500
6501 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6502 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6503 int_ctl |= env->v_tpr & V_TPR_MASK;
6504 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6505 int_ctl |= V_IRQ_MASK;
6506 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6507
6508 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6509 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6510 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6511 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6512 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6513 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6514 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6515
6516 /* Reload the host state from vm_hsave */
6517 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6518 env->hflags &= ~HF_SVMI_MASK;
6519 env->intercept = 0;
6520 env->intercept_exceptions = 0;
6521 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6522 env->tsc_offset = 0;
6523
6524 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6525 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6526
6527 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6528 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6529
6530 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6531 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6532 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6533 /* we need to set the efer after the crs so the hidden flags get
6534 set properly */
6535 cpu_load_efer(env,
6536 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6537 env->eflags = 0;
6538 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6539 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6540 CC_OP = CC_OP_EFLAGS;
6541
6542 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6543 env, R_ES);
6544 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6545 env, R_CS);
6546 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6547 env, R_SS);
6548 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6549 env, R_DS);
6550
6551 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6552 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6553 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6554
6555 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6556 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6557
6558 /* other setups */
6559 cpu_x86_set_cpl(env, 0);
6560 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6561 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6562
6563 env->hflags2 &= ~HF2_GIF_MASK;
6564 /* FIXME: Resets the current ASID register to zero (host ASID). */
6565
6566 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6567
6568 /* Clears the TSC_OFFSET inside the processor. */
6569
6570 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6571 from the page table indicated the host's CR3. If the PDPEs contain
6572 illegal state, the processor causes a shutdown. */
6573
6574 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6575 env->cr[0] |= CR0_PE_MASK;
6576 env->eflags &= ~VM_MASK;
6577
6578 /* Disables all breakpoints in the host DR7 register. */
6579
6580 /* Checks the reloaded host state for consistency. */
6581
6582 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6583 host's code segment or non-canonical (in the case of long mode), a
6584 #GP fault is delivered inside the host.) */
6585
6586 /* remove any pending exception */
6587 env->exception_index = -1;
6588 env->error_code = 0;
6589 env->old_exception = -1;
6590
6591 cpu_loop_exit();
6592}
6593
6594#endif
6595
6596/* MMX/SSE */
6597/* XXX: optimize by storing fptt and fptags in the static cpu state */
6598void helper_enter_mmx(void)
6599{
6600 env->fpstt = 0;
6601 *(uint32_t *)(env->fptags) = 0;
6602 *(uint32_t *)(env->fptags + 4) = 0;
6603}
6604
6605void helper_emms(void)
6606{
6607 /* set to empty state */
6608 *(uint32_t *)(env->fptags) = 0x01010101;
6609 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6610}
6611
6612/* XXX: suppress */
6613void helper_movq(void *d, void *s)
6614{
6615 *(uint64_t *)d = *(uint64_t *)s;
6616}
6617
6618#define SHIFT 0
6619#include "ops_sse.h"
6620
6621#define SHIFT 1
6622#include "ops_sse.h"
6623
6624#define SHIFT 0
6625#include "helper_template.h"
6626#undef SHIFT
6627
6628#define SHIFT 1
6629#include "helper_template.h"
6630#undef SHIFT
6631
6632#define SHIFT 2
6633#include "helper_template.h"
6634#undef SHIFT
6635
6636#ifdef TARGET_X86_64
6637
6638#define SHIFT 3
6639#include "helper_template.h"
6640#undef SHIFT
6641
6642#endif
6643
6644/* bit operations */
6645target_ulong helper_bsf(target_ulong t0)
6646{
6647 int count;
6648 target_ulong res;
6649
6650 res = t0;
6651 count = 0;
6652 while ((res & 1) == 0) {
6653 count++;
6654 res >>= 1;
6655 }
6656 return count;
6657}
6658
6659target_ulong helper_bsr(target_ulong t0)
6660{
6661 int count;
6662 target_ulong res, mask;
6663
6664 res = t0;
6665 count = TARGET_LONG_BITS - 1;
6666 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6667 while ((res & mask) == 0) {
6668 count--;
6669 res <<= 1;
6670 }
6671 return count;
6672}
6673
6674
6675static int compute_all_eflags(void)
6676{
6677 return CC_SRC;
6678}
6679
6680static int compute_c_eflags(void)
6681{
6682 return CC_SRC & CC_C;
6683}
6684
6685uint32_t helper_cc_compute_all(int op)
6686{
6687 switch (op) {
6688 default: /* should never happen */ return 0;
6689
6690 case CC_OP_EFLAGS: return compute_all_eflags();
6691
6692 case CC_OP_MULB: return compute_all_mulb();
6693 case CC_OP_MULW: return compute_all_mulw();
6694 case CC_OP_MULL: return compute_all_mull();
6695
6696 case CC_OP_ADDB: return compute_all_addb();
6697 case CC_OP_ADDW: return compute_all_addw();
6698 case CC_OP_ADDL: return compute_all_addl();
6699
6700 case CC_OP_ADCB: return compute_all_adcb();
6701 case CC_OP_ADCW: return compute_all_adcw();
6702 case CC_OP_ADCL: return compute_all_adcl();
6703
6704 case CC_OP_SUBB: return compute_all_subb();
6705 case CC_OP_SUBW: return compute_all_subw();
6706 case CC_OP_SUBL: return compute_all_subl();
6707
6708 case CC_OP_SBBB: return compute_all_sbbb();
6709 case CC_OP_SBBW: return compute_all_sbbw();
6710 case CC_OP_SBBL: return compute_all_sbbl();
6711
6712 case CC_OP_LOGICB: return compute_all_logicb();
6713 case CC_OP_LOGICW: return compute_all_logicw();
6714 case CC_OP_LOGICL: return compute_all_logicl();
6715
6716 case CC_OP_INCB: return compute_all_incb();
6717 case CC_OP_INCW: return compute_all_incw();
6718 case CC_OP_INCL: return compute_all_incl();
6719
6720 case CC_OP_DECB: return compute_all_decb();
6721 case CC_OP_DECW: return compute_all_decw();
6722 case CC_OP_DECL: return compute_all_decl();
6723
6724 case CC_OP_SHLB: return compute_all_shlb();
6725 case CC_OP_SHLW: return compute_all_shlw();
6726 case CC_OP_SHLL: return compute_all_shll();
6727
6728 case CC_OP_SARB: return compute_all_sarb();
6729 case CC_OP_SARW: return compute_all_sarw();
6730 case CC_OP_SARL: return compute_all_sarl();
6731
6732#ifdef TARGET_X86_64
6733 case CC_OP_MULQ: return compute_all_mulq();
6734
6735 case CC_OP_ADDQ: return compute_all_addq();
6736
6737 case CC_OP_ADCQ: return compute_all_adcq();
6738
6739 case CC_OP_SUBQ: return compute_all_subq();
6740
6741 case CC_OP_SBBQ: return compute_all_sbbq();
6742
6743 case CC_OP_LOGICQ: return compute_all_logicq();
6744
6745 case CC_OP_INCQ: return compute_all_incq();
6746
6747 case CC_OP_DECQ: return compute_all_decq();
6748
6749 case CC_OP_SHLQ: return compute_all_shlq();
6750
6751 case CC_OP_SARQ: return compute_all_sarq();
6752#endif
6753 }
6754}
6755
6756uint32_t helper_cc_compute_c(int op)
6757{
6758 switch (op) {
6759 default: /* should never happen */ return 0;
6760
6761 case CC_OP_EFLAGS: return compute_c_eflags();
6762
6763 case CC_OP_MULB: return compute_c_mull();
6764 case CC_OP_MULW: return compute_c_mull();
6765 case CC_OP_MULL: return compute_c_mull();
6766
6767 case CC_OP_ADDB: return compute_c_addb();
6768 case CC_OP_ADDW: return compute_c_addw();
6769 case CC_OP_ADDL: return compute_c_addl();
6770
6771 case CC_OP_ADCB: return compute_c_adcb();
6772 case CC_OP_ADCW: return compute_c_adcw();
6773 case CC_OP_ADCL: return compute_c_adcl();
6774
6775 case CC_OP_SUBB: return compute_c_subb();
6776 case CC_OP_SUBW: return compute_c_subw();
6777 case CC_OP_SUBL: return compute_c_subl();
6778
6779 case CC_OP_SBBB: return compute_c_sbbb();
6780 case CC_OP_SBBW: return compute_c_sbbw();
6781 case CC_OP_SBBL: return compute_c_sbbl();
6782
6783 case CC_OP_LOGICB: return compute_c_logicb();
6784 case CC_OP_LOGICW: return compute_c_logicw();
6785 case CC_OP_LOGICL: return compute_c_logicl();
6786
6787 case CC_OP_INCB: return compute_c_incl();
6788 case CC_OP_INCW: return compute_c_incl();
6789 case CC_OP_INCL: return compute_c_incl();
6790
6791 case CC_OP_DECB: return compute_c_incl();
6792 case CC_OP_DECW: return compute_c_incl();
6793 case CC_OP_DECL: return compute_c_incl();
6794
6795 case CC_OP_SHLB: return compute_c_shlb();
6796 case CC_OP_SHLW: return compute_c_shlw();
6797 case CC_OP_SHLL: return compute_c_shll();
6798
6799 case CC_OP_SARB: return compute_c_sarl();
6800 case CC_OP_SARW: return compute_c_sarl();
6801 case CC_OP_SARL: return compute_c_sarl();
6802
6803#ifdef TARGET_X86_64
6804 case CC_OP_MULQ: return compute_c_mull();
6805
6806 case CC_OP_ADDQ: return compute_c_addq();
6807
6808 case CC_OP_ADCQ: return compute_c_adcq();
6809
6810 case CC_OP_SUBQ: return compute_c_subq();
6811
6812 case CC_OP_SBBQ: return compute_c_sbbq();
6813
6814 case CC_OP_LOGICQ: return compute_c_logicq();
6815
6816 case CC_OP_INCQ: return compute_c_incl();
6817
6818 case CC_OP_DECQ: return compute_c_incl();
6819
6820 case CC_OP_SHLQ: return compute_c_shlq();
6821
6822 case CC_OP_SARQ: return compute_c_sarl();
6823#endif
6824 }
6825}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette