VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 36125

最後變更 在這個檔案從36125是 36125,由 vboxsync 提交於 14 年 前

recompiler: Removing traces of attempts at making the recompiler compile with the microsoft compiler. (untested)

  • 屬性 svn:eol-style 設為 native
檔案大小: 193.1 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "host-utils.h"
33
34#ifdef VBOX
35#include "qemu-common.h"
36#include <math.h>
37#include "tcg.h"
38#endif
39//#define DEBUG_PCALL
40
41#if 0
42#define raise_exception_err(a, b)\
43do {\
44 if (logfile)\
45 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
46 (raise_exception_err)(a, b);\
47} while (0)
48#endif
49
50const uint8_t parity_table[256] = {
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83};
84
85/* modulo 17 table */
86const uint8_t rclw_table[32] = {
87 0, 1, 2, 3, 4, 5, 6, 7,
88 8, 9,10,11,12,13,14,15,
89 16, 0, 1, 2, 3, 4, 5, 6,
90 7, 8, 9,10,11,12,13,14,
91};
92
93/* modulo 9 table */
94const uint8_t rclb_table[32] = {
95 0, 1, 2, 3, 4, 5, 6, 7,
96 8, 0, 1, 2, 3, 4, 5, 6,
97 7, 8, 0, 1, 2, 3, 4, 5,
98 6, 7, 8, 0, 1, 2, 3, 4,
99};
100
101const CPU86_LDouble f15rk[7] =
102{
103 0.00000000000000000000L,
104 1.00000000000000000000L,
105 3.14159265358979323851L, /*pi*/
106 0.30102999566398119523L, /*lg2*/
107 0.69314718055994530943L, /*ln2*/
108 1.44269504088896340739L, /*l2e*/
109 3.32192809488736234781L, /*l2t*/
110};
111
112/* broken thread support */
113
114spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
115
116void helper_lock(void)
117{
118 spin_lock(&global_cpu_lock);
119}
120
121void helper_unlock(void)
122{
123 spin_unlock(&global_cpu_lock);
124}
125
126void helper_write_eflags(target_ulong t0, uint32_t update_mask)
127{
128 load_eflags(t0, update_mask);
129}
130
131target_ulong helper_read_eflags(void)
132{
133 uint32_t eflags;
134 eflags = cc_table[CC_OP].compute_all();
135 eflags |= (DF & DF_MASK);
136 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
137 return eflags;
138}
139
140#ifdef VBOX
141void helper_write_eflags_vme(target_ulong t0)
142{
143 unsigned int new_eflags = t0;
144
145 assert(env->eflags & (1<<VM_SHIFT));
146
147 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
148 /* if TF will be set -> #GP */
149 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
150 || (new_eflags & TF_MASK)) {
151 raise_exception(EXCP0D_GPF);
152 } else {
153 load_eflags(new_eflags,
154 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
155
156 if (new_eflags & IF_MASK) {
157 env->eflags |= VIF_MASK;
158 } else {
159 env->eflags &= ~VIF_MASK;
160 }
161 }
162}
163
164target_ulong helper_read_eflags_vme(void)
165{
166 uint32_t eflags;
167 eflags = cc_table[CC_OP].compute_all();
168 eflags |= (DF & DF_MASK);
169 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
170 if (env->eflags & VIF_MASK)
171 eflags |= IF_MASK;
172 else
173 eflags &= ~IF_MASK;
174
175 /* According to AMD manual, should be read with IOPL == 3 */
176 eflags |= (3 << IOPL_SHIFT);
177
178 /* We only use helper_read_eflags_vme() in 16-bits mode */
179 return eflags & 0xffff;
180}
181
182void helper_dump_state()
183{
184 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
185 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
186 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
187 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
188 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
189 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
190 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
191}
192#endif
193
194/* return non zero if error */
195static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
196 int selector)
197{
198 SegmentCache *dt;
199 int index;
200 target_ulong ptr;
201
202#ifdef VBOX
203 /* Trying to load a selector with CPL=1? */
204 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
205 {
206 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
207 selector = selector & 0xfffc;
208 }
209#endif
210
211 if (selector & 0x4)
212 dt = &env->ldt;
213 else
214 dt = &env->gdt;
215 index = selector & ~7;
216 if ((index + 7) > dt->limit)
217 return -1;
218 ptr = dt->base + index;
219 *e1_ptr = ldl_kernel(ptr);
220 *e2_ptr = ldl_kernel(ptr + 4);
221 return 0;
222}
223
224static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
225{
226 unsigned int limit;
227 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
228 if (e2 & DESC_G_MASK)
229 limit = (limit << 12) | 0xfff;
230 return limit;
231}
232
233static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
234{
235 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
236}
237
238static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
239{
240 sc->base = get_seg_base(e1, e2);
241 sc->limit = get_seg_limit(e1, e2);
242 sc->flags = e2;
243}
244
245/* init the segment cache in vm86 mode. */
246static inline void load_seg_vm(int seg, int selector)
247{
248 selector &= 0xffff;
249#ifdef VBOX
250 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
251 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
252 flags |= (3 << DESC_DPL_SHIFT);
253
254 cpu_x86_load_seg_cache(env, seg, selector,
255 (selector << 4), 0xffff, flags);
256#else
257 cpu_x86_load_seg_cache(env, seg, selector,
258 (selector << 4), 0xffff, 0);
259#endif
260}
261
262static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
263 uint32_t *esp_ptr, int dpl)
264{
265#ifndef VBOX
266 int type, index, shift;
267#else
268 unsigned int type, index, shift;
269#endif
270
271#if 0
272 {
273 int i;
274 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
275 for(i=0;i<env->tr.limit;i++) {
276 printf("%02x ", env->tr.base[i]);
277 if ((i & 7) == 7) printf("\n");
278 }
279 printf("\n");
280 }
281#endif
282
283 if (!(env->tr.flags & DESC_P_MASK))
284 cpu_abort(env, "invalid tss");
285 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
286 if ((type & 7) != 1)
287 cpu_abort(env, "invalid tss type");
288 shift = type >> 3;
289 index = (dpl * 4 + 2) << shift;
290 if (index + (4 << shift) - 1 > env->tr.limit)
291 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
292 if (shift == 0) {
293 *esp_ptr = lduw_kernel(env->tr.base + index);
294 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
295 } else {
296 *esp_ptr = ldl_kernel(env->tr.base + index);
297 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
298 }
299}
300
301/* XXX: merge with load_seg() */
302static void tss_load_seg(int seg_reg, int selector)
303{
304 uint32_t e1, e2;
305 int rpl, dpl, cpl;
306
307#ifdef VBOX
308 e1 = e2 = 0;
309 cpl = env->hflags & HF_CPL_MASK;
310 /* Trying to load a selector with CPL=1? */
311 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
312 {
313 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
314 selector = selector & 0xfffc;
315 }
316#endif
317
318 if ((selector & 0xfffc) != 0) {
319 if (load_segment(&e1, &e2, selector) != 0)
320 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
321 if (!(e2 & DESC_S_MASK))
322 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
323 rpl = selector & 3;
324 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
325 cpl = env->hflags & HF_CPL_MASK;
326 if (seg_reg == R_CS) {
327 if (!(e2 & DESC_CS_MASK))
328 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
329 /* XXX: is it correct ? */
330 if (dpl != rpl)
331 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
332 if ((e2 & DESC_C_MASK) && dpl > rpl)
333 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
334 } else if (seg_reg == R_SS) {
335 /* SS must be writable data */
336 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
337 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
338 if (dpl != cpl || dpl != rpl)
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 } else {
341 /* not readable code */
342 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
343 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
344 /* if data or non conforming code, checks the rights */
345 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
346 if (dpl < cpl || dpl < rpl)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 }
349 }
350 if (!(e2 & DESC_P_MASK))
351 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
352 cpu_x86_load_seg_cache(env, seg_reg, selector,
353 get_seg_base(e1, e2),
354 get_seg_limit(e1, e2),
355 e2);
356 } else {
357 if (seg_reg == R_SS || seg_reg == R_CS)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359#ifdef VBOX
360#if 0
361 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
362 cpu_x86_load_seg_cache(env, seg_reg, selector,
363 0, 0, 0);
364#endif
365#endif
366 }
367}
368
369#define SWITCH_TSS_JMP 0
370#define SWITCH_TSS_IRET 1
371#define SWITCH_TSS_CALL 2
372
373/* XXX: restore CPU state in registers (PowerPC case) */
374static void switch_tss(int tss_selector,
375 uint32_t e1, uint32_t e2, int source,
376 uint32_t next_eip)
377{
378 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
379 target_ulong tss_base;
380 uint32_t new_regs[8], new_segs[6];
381 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
382 uint32_t old_eflags, eflags_mask;
383 SegmentCache *dt;
384#ifndef VBOX
385 int index;
386#else
387 unsigned int index;
388#endif
389 target_ulong ptr;
390
391 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
392#ifdef DEBUG_PCALL
393 if (loglevel & CPU_LOG_PCALL)
394 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
395#endif
396
397#if defined(VBOX) && defined(DEBUG)
398 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
399#endif
400
401 /* if task gate, we read the TSS segment and we load it */
402 if (type == 5) {
403 if (!(e2 & DESC_P_MASK))
404 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
405 tss_selector = e1 >> 16;
406 if (tss_selector & 4)
407 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
408 if (load_segment(&e1, &e2, tss_selector) != 0)
409 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
410 if (e2 & DESC_S_MASK)
411 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
412 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
413 if ((type & 7) != 1)
414 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
415 }
416
417 if (!(e2 & DESC_P_MASK))
418 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
419
420 if (type & 8)
421 tss_limit_max = 103;
422 else
423 tss_limit_max = 43;
424 tss_limit = get_seg_limit(e1, e2);
425 tss_base = get_seg_base(e1, e2);
426 if ((tss_selector & 4) != 0 ||
427 tss_limit < tss_limit_max)
428 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
429 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
430 if (old_type & 8)
431 old_tss_limit_max = 103;
432 else
433 old_tss_limit_max = 43;
434
435 /* read all the registers from the new TSS */
436 if (type & 8) {
437 /* 32 bit */
438 new_cr3 = ldl_kernel(tss_base + 0x1c);
439 new_eip = ldl_kernel(tss_base + 0x20);
440 new_eflags = ldl_kernel(tss_base + 0x24);
441 for(i = 0; i < 8; i++)
442 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
443 for(i = 0; i < 6; i++)
444 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
445 new_ldt = lduw_kernel(tss_base + 0x60);
446 new_trap = ldl_kernel(tss_base + 0x64);
447 } else {
448 /* 16 bit */
449 new_cr3 = 0;
450 new_eip = lduw_kernel(tss_base + 0x0e);
451 new_eflags = lduw_kernel(tss_base + 0x10);
452 for(i = 0; i < 8; i++)
453 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
454 for(i = 0; i < 4; i++)
455 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
456 new_ldt = lduw_kernel(tss_base + 0x2a);
457 new_segs[R_FS] = 0;
458 new_segs[R_GS] = 0;
459 new_trap = 0;
460 }
461
462 /* NOTE: we must avoid memory exceptions during the task switch,
463 so we make dummy accesses before */
464 /* XXX: it can still fail in some cases, so a bigger hack is
465 necessary to valid the TLB after having done the accesses */
466
467 v1 = ldub_kernel(env->tr.base);
468 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
469 stb_kernel(env->tr.base, v1);
470 stb_kernel(env->tr.base + old_tss_limit_max, v2);
471
472 /* clear busy bit (it is restartable) */
473 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
474 target_ulong ptr;
475 uint32_t e2;
476 ptr = env->gdt.base + (env->tr.selector & ~7);
477 e2 = ldl_kernel(ptr + 4);
478 e2 &= ~DESC_TSS_BUSY_MASK;
479 stl_kernel(ptr + 4, e2);
480 }
481 old_eflags = compute_eflags();
482 if (source == SWITCH_TSS_IRET)
483 old_eflags &= ~NT_MASK;
484
485 /* save the current state in the old TSS */
486 if (type & 8) {
487 /* 32 bit */
488 stl_kernel(env->tr.base + 0x20, next_eip);
489 stl_kernel(env->tr.base + 0x24, old_eflags);
490 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
491 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
492 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
493 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
494 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
495 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
496 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
497 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
498 for(i = 0; i < 6; i++)
499 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
500#ifdef VBOX
501 /* Must store the ldt as it gets reloaded and might have been changed. */
502 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
503#endif
504#if defined(VBOX) && defined(DEBUG)
505 printf("TSS 32 bits switch\n");
506 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
507#endif
508 } else {
509 /* 16 bit */
510 stw_kernel(env->tr.base + 0x0e, next_eip);
511 stw_kernel(env->tr.base + 0x10, old_eflags);
512 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
513 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
514 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
515 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
516 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
517 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
518 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
519 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
520 for(i = 0; i < 4; i++)
521 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
522#ifdef VBOX
523 /* Must store the ldt as it gets reloaded and might have been changed. */
524 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
525#endif
526 }
527
528 /* now if an exception occurs, it will occurs in the next task
529 context */
530
531 if (source == SWITCH_TSS_CALL) {
532 stw_kernel(tss_base, env->tr.selector);
533 new_eflags |= NT_MASK;
534 }
535
536 /* set busy bit */
537 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
538 target_ulong ptr;
539 uint32_t e2;
540 ptr = env->gdt.base + (tss_selector & ~7);
541 e2 = ldl_kernel(ptr + 4);
542 e2 |= DESC_TSS_BUSY_MASK;
543 stl_kernel(ptr + 4, e2);
544 }
545
546 /* set the new CPU state */
547 /* from this point, any exception which occurs can give problems */
548 env->cr[0] |= CR0_TS_MASK;
549 env->hflags |= HF_TS_MASK;
550 env->tr.selector = tss_selector;
551 env->tr.base = tss_base;
552 env->tr.limit = tss_limit;
553 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
554
555 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
556 cpu_x86_update_cr3(env, new_cr3);
557 }
558
559 /* load all registers without an exception, then reload them with
560 possible exception */
561 env->eip = new_eip;
562 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
563 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
564 if (!(type & 8))
565 eflags_mask &= 0xffff;
566 load_eflags(new_eflags, eflags_mask);
567 /* XXX: what to do in 16 bit case ? */
568 EAX = new_regs[0];
569 ECX = new_regs[1];
570 EDX = new_regs[2];
571 EBX = new_regs[3];
572 ESP = new_regs[4];
573 EBP = new_regs[5];
574 ESI = new_regs[6];
575 EDI = new_regs[7];
576 if (new_eflags & VM_MASK) {
577 for(i = 0; i < 6; i++)
578 load_seg_vm(i, new_segs[i]);
579 /* in vm86, CPL is always 3 */
580 cpu_x86_set_cpl(env, 3);
581 } else {
582 /* CPL is set the RPL of CS */
583 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
584 /* first just selectors as the rest may trigger exceptions */
585 for(i = 0; i < 6; i++)
586 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
587 }
588
589 env->ldt.selector = new_ldt & ~4;
590 env->ldt.base = 0;
591 env->ldt.limit = 0;
592 env->ldt.flags = 0;
593
594 /* load the LDT */
595 if (new_ldt & 4)
596 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
597
598 if ((new_ldt & 0xfffc) != 0) {
599 dt = &env->gdt;
600 index = new_ldt & ~7;
601 if ((index + 7) > dt->limit)
602 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
603 ptr = dt->base + index;
604 e1 = ldl_kernel(ptr);
605 e2 = ldl_kernel(ptr + 4);
606 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
607 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
608 if (!(e2 & DESC_P_MASK))
609 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
610 load_seg_cache_raw_dt(&env->ldt, e1, e2);
611 }
612
613 /* load the segments */
614 if (!(new_eflags & VM_MASK)) {
615 tss_load_seg(R_CS, new_segs[R_CS]);
616 tss_load_seg(R_SS, new_segs[R_SS]);
617 tss_load_seg(R_ES, new_segs[R_ES]);
618 tss_load_seg(R_DS, new_segs[R_DS]);
619 tss_load_seg(R_FS, new_segs[R_FS]);
620 tss_load_seg(R_GS, new_segs[R_GS]);
621 }
622
623 /* check that EIP is in the CS segment limits */
624 if (new_eip > env->segs[R_CS].limit) {
625 /* XXX: different exception if CALL ? */
626 raise_exception_err(EXCP0D_GPF, 0);
627 }
628}
629
630/* check if Port I/O is allowed in TSS */
631static inline void check_io(int addr, int size)
632{
633#ifndef VBOX
634 int io_offset, val, mask;
635#else
636 int val, mask;
637 unsigned int io_offset;
638#endif /* VBOX */
639 /* TSS must be a valid 32 bit one */
640 if (!(env->tr.flags & DESC_P_MASK) ||
641 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
642 env->tr.limit < 103)
643 goto fail;
644 io_offset = lduw_kernel(env->tr.base + 0x66);
645 io_offset += (addr >> 3);
646 /* Note: the check needs two bytes */
647 if ((io_offset + 1) > env->tr.limit)
648 goto fail;
649 val = lduw_kernel(env->tr.base + io_offset);
650 val >>= (addr & 7);
651 mask = (1 << size) - 1;
652 /* all bits must be zero to allow the I/O */
653 if ((val & mask) != 0) {
654 fail:
655 raise_exception_err(EXCP0D_GPF, 0);
656 }
657}
658
659#ifdef VBOX
660/* Keep in sync with gen_check_external_event() */
661void helper_check_external_event()
662{
663 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
664 | CPU_INTERRUPT_EXTERNAL_TIMER
665 | CPU_INTERRUPT_EXTERNAL_DMA))
666 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
667 && (env->eflags & IF_MASK)
668 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
669 {
670 helper_external_event();
671 }
672
673}
674
675void helper_sync_seg(uint32_t reg)
676{
677 if (env->segs[reg].newselector)
678 sync_seg(env, reg, env->segs[reg].newselector);
679}
680#endif
681
682void helper_check_iob(uint32_t t0)
683{
684 check_io(t0, 1);
685}
686
687void helper_check_iow(uint32_t t0)
688{
689 check_io(t0, 2);
690}
691
692void helper_check_iol(uint32_t t0)
693{
694 check_io(t0, 4);
695}
696
697void helper_outb(uint32_t port, uint32_t data)
698{
699 cpu_outb(env, port, data & 0xff);
700}
701
702target_ulong helper_inb(uint32_t port)
703{
704 return cpu_inb(env, port);
705}
706
707void helper_outw(uint32_t port, uint32_t data)
708{
709 cpu_outw(env, port, data & 0xffff);
710}
711
712target_ulong helper_inw(uint32_t port)
713{
714 return cpu_inw(env, port);
715}
716
717void helper_outl(uint32_t port, uint32_t data)
718{
719 cpu_outl(env, port, data);
720}
721
722target_ulong helper_inl(uint32_t port)
723{
724 return cpu_inl(env, port);
725}
726
727static inline unsigned int get_sp_mask(unsigned int e2)
728{
729 if (e2 & DESC_B_MASK)
730 return 0xffffffff;
731 else
732 return 0xffff;
733}
734
735#ifdef TARGET_X86_64
736#define SET_ESP(val, sp_mask)\
737do {\
738 if ((sp_mask) == 0xffff)\
739 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
740 else if ((sp_mask) == 0xffffffffLL)\
741 ESP = (uint32_t)(val);\
742 else\
743 ESP = (val);\
744} while (0)
745#else
746#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
747#endif
748
749/* in 64-bit machines, this can overflow. So this segment addition macro
750 * can be used to trim the value to 32-bit whenever needed */
751#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
752
753/* XXX: add a is_user flag to have proper security support */
754#define PUSHW(ssp, sp, sp_mask, val)\
755{\
756 sp -= 2;\
757 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
758}
759
760#define PUSHL(ssp, sp, sp_mask, val)\
761{\
762 sp -= 4;\
763 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
764}
765
766#define POPW(ssp, sp, sp_mask, val)\
767{\
768 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
769 sp += 2;\
770}
771
772#define POPL(ssp, sp, sp_mask, val)\
773{\
774 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
775 sp += 4;\
776}
777
778/* protected mode interrupt */
779static void do_interrupt_protected(int intno, int is_int, int error_code,
780 unsigned int next_eip, int is_hw)
781{
782 SegmentCache *dt;
783 target_ulong ptr, ssp;
784 int type, dpl, selector, ss_dpl, cpl;
785 int has_error_code, new_stack, shift;
786 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
787 uint32_t old_eip, sp_mask;
788
789#ifdef VBOX
790 ss = ss_e1 = ss_e2 = 0;
791 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
792 cpu_loop_exit();
793#endif
794
795 has_error_code = 0;
796 if (!is_int && !is_hw) {
797 switch(intno) {
798 case 8:
799 case 10:
800 case 11:
801 case 12:
802 case 13:
803 case 14:
804 case 17:
805 has_error_code = 1;
806 break;
807 }
808 }
809 if (is_int)
810 old_eip = next_eip;
811 else
812 old_eip = env->eip;
813
814 dt = &env->idt;
815#ifndef VBOX
816 if (intno * 8 + 7 > dt->limit)
817#else
818 if ((unsigned)intno * 8 + 7 > dt->limit)
819#endif
820 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
821 ptr = dt->base + intno * 8;
822 e1 = ldl_kernel(ptr);
823 e2 = ldl_kernel(ptr + 4);
824 /* check gate type */
825 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
826 switch(type) {
827 case 5: /* task gate */
828 /* must do that check here to return the correct error code */
829 if (!(e2 & DESC_P_MASK))
830 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
831 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
832 if (has_error_code) {
833 int type;
834 uint32_t mask;
835 /* push the error code */
836 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
837 shift = type >> 3;
838 if (env->segs[R_SS].flags & DESC_B_MASK)
839 mask = 0xffffffff;
840 else
841 mask = 0xffff;
842 esp = (ESP - (2 << shift)) & mask;
843 ssp = env->segs[R_SS].base + esp;
844 if (shift)
845 stl_kernel(ssp, error_code);
846 else
847 stw_kernel(ssp, error_code);
848 SET_ESP(esp, mask);
849 }
850 return;
851 case 6: /* 286 interrupt gate */
852 case 7: /* 286 trap gate */
853 case 14: /* 386 interrupt gate */
854 case 15: /* 386 trap gate */
855 break;
856 default:
857 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
858 break;
859 }
860 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
861 cpl = env->hflags & HF_CPL_MASK;
862 /* check privilege if software int */
863 if (is_int && dpl < cpl)
864 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
865 /* check valid bit */
866 if (!(e2 & DESC_P_MASK))
867 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
868 selector = e1 >> 16;
869 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
870 if ((selector & 0xfffc) == 0)
871 raise_exception_err(EXCP0D_GPF, 0);
872
873 if (load_segment(&e1, &e2, selector) != 0)
874 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
875 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
876 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
878 if (dpl > cpl)
879 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
880 if (!(e2 & DESC_P_MASK))
881 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
882 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
883 /* to inner privilege */
884 get_ss_esp_from_tss(&ss, &esp, dpl);
885 if ((ss & 0xfffc) == 0)
886 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
887 if ((ss & 3) != dpl)
888 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
889 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
890 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
891 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
892 if (ss_dpl != dpl)
893 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
894 if (!(ss_e2 & DESC_S_MASK) ||
895 (ss_e2 & DESC_CS_MASK) ||
896 !(ss_e2 & DESC_W_MASK))
897 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
898 if (!(ss_e2 & DESC_P_MASK))
899#ifdef VBOX /* See page 3-477 of 253666.pdf */
900 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
901#else
902 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
903#endif
904 new_stack = 1;
905 sp_mask = get_sp_mask(ss_e2);
906 ssp = get_seg_base(ss_e1, ss_e2);
907#if defined(VBOX) && defined(DEBUG)
908 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
909#endif
910 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
911 /* to same privilege */
912 if (env->eflags & VM_MASK)
913 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
914 new_stack = 0;
915 sp_mask = get_sp_mask(env->segs[R_SS].flags);
916 ssp = env->segs[R_SS].base;
917 esp = ESP;
918 dpl = cpl;
919 } else {
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 new_stack = 0; /* avoid warning */
922 sp_mask = 0; /* avoid warning */
923 ssp = 0; /* avoid warning */
924 esp = 0; /* avoid warning */
925 }
926
927 shift = type >> 3;
928
929#if 0
930 /* XXX: check that enough room is available */
931 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
932 if (env->eflags & VM_MASK)
933 push_size += 8;
934 push_size <<= shift;
935#endif
936 if (shift == 1) {
937 if (new_stack) {
938 if (env->eflags & VM_MASK) {
939 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
940 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
941 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
942 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
943 }
944 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
945 PUSHL(ssp, esp, sp_mask, ESP);
946 }
947 PUSHL(ssp, esp, sp_mask, compute_eflags());
948 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
949 PUSHL(ssp, esp, sp_mask, old_eip);
950 if (has_error_code) {
951 PUSHL(ssp, esp, sp_mask, error_code);
952 }
953 } else {
954 if (new_stack) {
955 if (env->eflags & VM_MASK) {
956 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
957 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
958 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
959 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
960 }
961 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
962 PUSHW(ssp, esp, sp_mask, ESP);
963 }
964 PUSHW(ssp, esp, sp_mask, compute_eflags());
965 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
966 PUSHW(ssp, esp, sp_mask, old_eip);
967 if (has_error_code) {
968 PUSHW(ssp, esp, sp_mask, error_code);
969 }
970 }
971
972 if (new_stack) {
973 if (env->eflags & VM_MASK) {
974 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
975 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
976 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
977 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
978 }
979 ss = (ss & ~3) | dpl;
980 cpu_x86_load_seg_cache(env, R_SS, ss,
981 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
982 }
983 SET_ESP(esp, sp_mask);
984
985 selector = (selector & ~3) | dpl;
986 cpu_x86_load_seg_cache(env, R_CS, selector,
987 get_seg_base(e1, e2),
988 get_seg_limit(e1, e2),
989 e2);
990 cpu_x86_set_cpl(env, dpl);
991 env->eip = offset;
992
993 /* interrupt gate clear IF mask */
994 if ((type & 1) == 0) {
995 env->eflags &= ~IF_MASK;
996 }
997#ifndef VBOX
998 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
999#else
1000 /*
1001 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1002 * gets confused by seemingly changed EFLAGS. See #3491 and
1003 * public bug #2341.
1004 */
1005 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1006#endif
1007}
1008
1009#ifdef VBOX
1010
1011/* check if VME interrupt redirection is enabled in TSS */
1012DECLINLINE(bool) is_vme_irq_redirected(int intno)
1013{
1014 unsigned int io_offset, intredir_offset;
1015 unsigned char val, mask;
1016
1017 /* TSS must be a valid 32 bit one */
1018 if (!(env->tr.flags & DESC_P_MASK) ||
1019 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1020 env->tr.limit < 103)
1021 goto fail;
1022 io_offset = lduw_kernel(env->tr.base + 0x66);
1023 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1024 if (io_offset < 0x68 + 0x20)
1025 io_offset = 0x68 + 0x20;
1026 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1027 intredir_offset = io_offset - 0x20;
1028
1029 intredir_offset += (intno >> 3);
1030 if ((intredir_offset) > env->tr.limit)
1031 goto fail;
1032
1033 val = ldub_kernel(env->tr.base + intredir_offset);
1034 mask = 1 << (unsigned char)(intno & 7);
1035
1036 /* bit set means no redirection. */
1037 if ((val & mask) != 0) {
1038 return false;
1039 }
1040 return true;
1041
1042fail:
1043 raise_exception_err(EXCP0D_GPF, 0);
1044 return true;
1045}
1046
1047/* V86 mode software interrupt with CR4.VME=1 */
1048static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1049{
1050 target_ulong ptr, ssp;
1051 int selector;
1052 uint32_t offset, esp;
1053 uint32_t old_cs, old_eflags;
1054 uint32_t iopl;
1055
1056 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1057
1058 if (!is_vme_irq_redirected(intno))
1059 {
1060 if (iopl == 3)
1061 {
1062 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1063 return;
1064 }
1065 else
1066 raise_exception_err(EXCP0D_GPF, 0);
1067 }
1068
1069 /* virtual mode idt is at linear address 0 */
1070 ptr = 0 + intno * 4;
1071 offset = lduw_kernel(ptr);
1072 selector = lduw_kernel(ptr + 2);
1073 esp = ESP;
1074 ssp = env->segs[R_SS].base;
1075 old_cs = env->segs[R_CS].selector;
1076
1077 old_eflags = compute_eflags();
1078 if (iopl < 3)
1079 {
1080 /* copy VIF into IF and set IOPL to 3 */
1081 if (env->eflags & VIF_MASK)
1082 old_eflags |= IF_MASK;
1083 else
1084 old_eflags &= ~IF_MASK;
1085
1086 old_eflags |= (3 << IOPL_SHIFT);
1087 }
1088
1089 /* XXX: use SS segment size ? */
1090 PUSHW(ssp, esp, 0xffff, old_eflags);
1091 PUSHW(ssp, esp, 0xffff, old_cs);
1092 PUSHW(ssp, esp, 0xffff, next_eip);
1093
1094 /* update processor state */
1095 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1096 env->eip = offset;
1097 env->segs[R_CS].selector = selector;
1098 env->segs[R_CS].base = (selector << 4);
1099 env->eflags &= ~(TF_MASK | RF_MASK);
1100
1101 if (iopl < 3)
1102 env->eflags &= ~VIF_MASK;
1103 else
1104 env->eflags &= ~IF_MASK;
1105}
1106
1107#endif /* VBOX */
1108
1109#ifdef TARGET_X86_64
1110
1111#define PUSHQ(sp, val)\
1112{\
1113 sp -= 8;\
1114 stq_kernel(sp, (val));\
1115}
1116
1117#define POPQ(sp, val)\
1118{\
1119 val = ldq_kernel(sp);\
1120 sp += 8;\
1121}
1122
1123static inline target_ulong get_rsp_from_tss(int level)
1124{
1125 int index;
1126
1127#if 0
1128 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1129 env->tr.base, env->tr.limit);
1130#endif
1131
1132 if (!(env->tr.flags & DESC_P_MASK))
1133 cpu_abort(env, "invalid tss");
1134 index = 8 * level + 4;
1135 if ((index + 7) > env->tr.limit)
1136 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1137 return ldq_kernel(env->tr.base + index);
1138}
1139
1140/* 64 bit interrupt */
1141static void do_interrupt64(int intno, int is_int, int error_code,
1142 target_ulong next_eip, int is_hw)
1143{
1144 SegmentCache *dt;
1145 target_ulong ptr;
1146 int type, dpl, selector, cpl, ist;
1147 int has_error_code, new_stack;
1148 uint32_t e1, e2, e3, ss;
1149 target_ulong old_eip, esp, offset;
1150
1151#ifdef VBOX
1152 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1153 cpu_loop_exit();
1154#endif
1155
1156 has_error_code = 0;
1157 if (!is_int && !is_hw) {
1158 switch(intno) {
1159 case 8:
1160 case 10:
1161 case 11:
1162 case 12:
1163 case 13:
1164 case 14:
1165 case 17:
1166 has_error_code = 1;
1167 break;
1168 }
1169 }
1170 if (is_int)
1171 old_eip = next_eip;
1172 else
1173 old_eip = env->eip;
1174
1175 dt = &env->idt;
1176 if (intno * 16 + 15 > dt->limit)
1177 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1178 ptr = dt->base + intno * 16;
1179 e1 = ldl_kernel(ptr);
1180 e2 = ldl_kernel(ptr + 4);
1181 e3 = ldl_kernel(ptr + 8);
1182 /* check gate type */
1183 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1184 switch(type) {
1185 case 14: /* 386 interrupt gate */
1186 case 15: /* 386 trap gate */
1187 break;
1188 default:
1189 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1190 break;
1191 }
1192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1193 cpl = env->hflags & HF_CPL_MASK;
1194 /* check privilege if software int */
1195 if (is_int && dpl < cpl)
1196 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1197 /* check valid bit */
1198 if (!(e2 & DESC_P_MASK))
1199 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1200 selector = e1 >> 16;
1201 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1202 ist = e2 & 7;
1203 if ((selector & 0xfffc) == 0)
1204 raise_exception_err(EXCP0D_GPF, 0);
1205
1206 if (load_segment(&e1, &e2, selector) != 0)
1207 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1208 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1209 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1210 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1211 if (dpl > cpl)
1212 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1213 if (!(e2 & DESC_P_MASK))
1214 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1215 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1216 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1217 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1218 /* to inner privilege */
1219 if (ist != 0)
1220 esp = get_rsp_from_tss(ist + 3);
1221 else
1222 esp = get_rsp_from_tss(dpl);
1223 esp &= ~0xfLL; /* align stack */
1224 ss = 0;
1225 new_stack = 1;
1226 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1227 /* to same privilege */
1228 if (env->eflags & VM_MASK)
1229 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1230 new_stack = 0;
1231 if (ist != 0)
1232 esp = get_rsp_from_tss(ist + 3);
1233 else
1234 esp = ESP;
1235 esp &= ~0xfLL; /* align stack */
1236 dpl = cpl;
1237 } else {
1238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1239 new_stack = 0; /* avoid warning */
1240 esp = 0; /* avoid warning */
1241 }
1242
1243 PUSHQ(esp, env->segs[R_SS].selector);
1244 PUSHQ(esp, ESP);
1245 PUSHQ(esp, compute_eflags());
1246 PUSHQ(esp, env->segs[R_CS].selector);
1247 PUSHQ(esp, old_eip);
1248 if (has_error_code) {
1249 PUSHQ(esp, error_code);
1250 }
1251
1252 if (new_stack) {
1253 ss = 0 | dpl;
1254 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1255 }
1256 ESP = esp;
1257
1258 selector = (selector & ~3) | dpl;
1259 cpu_x86_load_seg_cache(env, R_CS, selector,
1260 get_seg_base(e1, e2),
1261 get_seg_limit(e1, e2),
1262 e2);
1263 cpu_x86_set_cpl(env, dpl);
1264 env->eip = offset;
1265
1266 /* interrupt gate clear IF mask */
1267 if ((type & 1) == 0) {
1268 env->eflags &= ~IF_MASK;
1269 }
1270
1271#ifndef VBOX
1272 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1273#else
1274 /*
1275 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1276 * gets confused by seemingly changed EFLAGS. See #3491 and
1277 * public bug #2341.
1278 */
1279 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1280#endif
1281}
1282#endif
1283
1284#if defined(CONFIG_USER_ONLY)
1285void helper_syscall(int next_eip_addend)
1286{
1287 env->exception_index = EXCP_SYSCALL;
1288 env->exception_next_eip = env->eip + next_eip_addend;
1289 cpu_loop_exit();
1290}
1291#else
1292void helper_syscall(int next_eip_addend)
1293{
1294 int selector;
1295
1296 if (!(env->efer & MSR_EFER_SCE)) {
1297 raise_exception_err(EXCP06_ILLOP, 0);
1298 }
1299 selector = (env->star >> 32) & 0xffff;
1300#ifdef TARGET_X86_64
1301 if (env->hflags & HF_LMA_MASK) {
1302 int code64;
1303
1304 ECX = env->eip + next_eip_addend;
1305 env->regs[11] = compute_eflags();
1306
1307 code64 = env->hflags & HF_CS64_MASK;
1308
1309 cpu_x86_set_cpl(env, 0);
1310 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1311 0, 0xffffffff,
1312 DESC_G_MASK | DESC_P_MASK |
1313 DESC_S_MASK |
1314 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1315 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1316 0, 0xffffffff,
1317 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1318 DESC_S_MASK |
1319 DESC_W_MASK | DESC_A_MASK);
1320 env->eflags &= ~env->fmask;
1321 load_eflags(env->eflags, 0);
1322 if (code64)
1323 env->eip = env->lstar;
1324 else
1325 env->eip = env->cstar;
1326 } else
1327#endif
1328 {
1329 ECX = (uint32_t)(env->eip + next_eip_addend);
1330
1331 cpu_x86_set_cpl(env, 0);
1332 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1333 0, 0xffffffff,
1334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1335 DESC_S_MASK |
1336 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1337 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1338 0, 0xffffffff,
1339 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1340 DESC_S_MASK |
1341 DESC_W_MASK | DESC_A_MASK);
1342 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1343 env->eip = (uint32_t)env->star;
1344 }
1345}
1346#endif
1347
1348void helper_sysret(int dflag)
1349{
1350 int cpl, selector;
1351
1352 if (!(env->efer & MSR_EFER_SCE)) {
1353 raise_exception_err(EXCP06_ILLOP, 0);
1354 }
1355 cpl = env->hflags & HF_CPL_MASK;
1356 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1357 raise_exception_err(EXCP0D_GPF, 0);
1358 }
1359 selector = (env->star >> 48) & 0xffff;
1360#ifdef TARGET_X86_64
1361 if (env->hflags & HF_LMA_MASK) {
1362 if (dflag == 2) {
1363 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1364 0, 0xffffffff,
1365 DESC_G_MASK | DESC_P_MASK |
1366 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1367 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1368 DESC_L_MASK);
1369 env->eip = ECX;
1370 } else {
1371 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1372 0, 0xffffffff,
1373 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1374 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1375 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1376 env->eip = (uint32_t)ECX;
1377 }
1378 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1379 0, 0xffffffff,
1380 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1381 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1382 DESC_W_MASK | DESC_A_MASK);
1383 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1384 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1385 cpu_x86_set_cpl(env, 3);
1386 } else
1387#endif
1388 {
1389 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1390 0, 0xffffffff,
1391 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1392 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1393 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1394 env->eip = (uint32_t)ECX;
1395 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1396 0, 0xffffffff,
1397 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1399 DESC_W_MASK | DESC_A_MASK);
1400 env->eflags |= IF_MASK;
1401 cpu_x86_set_cpl(env, 3);
1402 }
1403#ifdef USE_KQEMU
1404 if (kqemu_is_ok(env)) {
1405 if (env->hflags & HF_LMA_MASK)
1406 CC_OP = CC_OP_EFLAGS;
1407 env->exception_index = -1;
1408 cpu_loop_exit();
1409 }
1410#endif
1411}
1412
1413#ifdef VBOX
1414/**
1415 * Checks and processes external VMM events.
1416 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1417 */
1418void helper_external_event(void)
1419{
1420#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1421 uintptr_t uSP;
1422# ifdef RT_ARCH_AMD64
1423 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1424# else
1425 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1426# endif
1427 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1428#endif
1429 /* Keep in sync with flags checked by gen_check_external_event() */
1430 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1431 {
1432 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1433 ~CPU_INTERRUPT_EXTERNAL_HARD);
1434 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1435 }
1436 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1437 {
1438 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1439 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1440 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1441 }
1442 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1443 {
1444 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1445 ~CPU_INTERRUPT_EXTERNAL_DMA);
1446 remR3DmaRun(env);
1447 }
1448 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1449 {
1450 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1451 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1452 remR3TimersRun(env);
1453 }
1454}
1455/* helper for recording call instruction addresses for later scanning */
1456void helper_record_call()
1457{
1458 if ( !(env->state & CPU_RAW_RING0)
1459 && (env->cr[0] & CR0_PG_MASK)
1460 && !(env->eflags & X86_EFL_IF))
1461 remR3RecordCall(env);
1462}
1463#endif /* VBOX */
1464
1465/* real mode interrupt */
1466static void do_interrupt_real(int intno, int is_int, int error_code,
1467 unsigned int next_eip)
1468{
1469 SegmentCache *dt;
1470 target_ulong ptr, ssp;
1471 int selector;
1472 uint32_t offset, esp;
1473 uint32_t old_cs, old_eip;
1474
1475 /* real mode (simpler !) */
1476 dt = &env->idt;
1477#ifndef VBOX
1478 if (intno * 4 + 3 > dt->limit)
1479#else
1480 if ((unsigned)intno * 4 + 3 > dt->limit)
1481#endif
1482 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1483 ptr = dt->base + intno * 4;
1484 offset = lduw_kernel(ptr);
1485 selector = lduw_kernel(ptr + 2);
1486 esp = ESP;
1487 ssp = env->segs[R_SS].base;
1488 if (is_int)
1489 old_eip = next_eip;
1490 else
1491 old_eip = env->eip;
1492 old_cs = env->segs[R_CS].selector;
1493 /* XXX: use SS segment size ? */
1494 PUSHW(ssp, esp, 0xffff, compute_eflags());
1495 PUSHW(ssp, esp, 0xffff, old_cs);
1496 PUSHW(ssp, esp, 0xffff, old_eip);
1497
1498 /* update processor state */
1499 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1500 env->eip = offset;
1501 env->segs[R_CS].selector = selector;
1502 env->segs[R_CS].base = (selector << 4);
1503 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1504}
1505
1506/* fake user mode interrupt */
1507void do_interrupt_user(int intno, int is_int, int error_code,
1508 target_ulong next_eip)
1509{
1510 SegmentCache *dt;
1511 target_ulong ptr;
1512 int dpl, cpl, shift;
1513 uint32_t e2;
1514
1515 dt = &env->idt;
1516 if (env->hflags & HF_LMA_MASK) {
1517 shift = 4;
1518 } else {
1519 shift = 3;
1520 }
1521 ptr = dt->base + (intno << shift);
1522 e2 = ldl_kernel(ptr + 4);
1523
1524 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1525 cpl = env->hflags & HF_CPL_MASK;
1526 /* check privilege if software int */
1527 if (is_int && dpl < cpl)
1528 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1529
1530 /* Since we emulate only user space, we cannot do more than
1531 exiting the emulation with the suitable exception and error
1532 code */
1533 if (is_int)
1534 EIP = next_eip;
1535}
1536
1537/*
1538 * Begin execution of an interruption. is_int is TRUE if coming from
1539 * the int instruction. next_eip is the EIP value AFTER the interrupt
1540 * instruction. It is only relevant if is_int is TRUE.
1541 */
1542void do_interrupt(int intno, int is_int, int error_code,
1543 target_ulong next_eip, int is_hw)
1544{
1545 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1546 if (is_int) {
1547 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1548 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1549 } else {
1550 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1551 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1552 }
1553 }
1554
1555 if (loglevel & CPU_LOG_INT) {
1556 if ((env->cr[0] & CR0_PE_MASK)) {
1557 static int count;
1558 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1559 count, intno, error_code, is_int,
1560 env->hflags & HF_CPL_MASK,
1561 env->segs[R_CS].selector, EIP,
1562 (int)env->segs[R_CS].base + EIP,
1563 env->segs[R_SS].selector, ESP);
1564 if (intno == 0x0e) {
1565 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1566 } else {
1567 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1568 }
1569 fprintf(logfile, "\n");
1570 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1571#if 0
1572 {
1573 int i;
1574 uint8_t *ptr;
1575 fprintf(logfile, " code=");
1576 ptr = env->segs[R_CS].base + env->eip;
1577 for(i = 0; i < 16; i++) {
1578 fprintf(logfile, " %02x", ldub(ptr + i));
1579 }
1580 fprintf(logfile, "\n");
1581 }
1582#endif
1583 count++;
1584 }
1585 }
1586 if (env->cr[0] & CR0_PE_MASK) {
1587#ifdef TARGET_X86_64
1588 if (env->hflags & HF_LMA_MASK) {
1589 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1590 } else
1591#endif
1592 {
1593#ifdef VBOX
1594 /* int xx *, v86 code and VME enabled? */
1595 if ( (env->eflags & VM_MASK)
1596 && (env->cr[4] & CR4_VME_MASK)
1597 && is_int
1598 && !is_hw
1599 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1600 )
1601 do_soft_interrupt_vme(intno, error_code, next_eip);
1602 else
1603#endif /* VBOX */
1604 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1605 }
1606 } else {
1607 do_interrupt_real(intno, is_int, error_code, next_eip);
1608 }
1609}
1610
1611/*
1612 * Check nested exceptions and change to double or triple fault if
1613 * needed. It should only be called, if this is not an interrupt.
1614 * Returns the new exception number.
1615 */
1616static int check_exception(int intno, int *error_code)
1617{
1618 int first_contributory = env->old_exception == 0 ||
1619 (env->old_exception >= 10 &&
1620 env->old_exception <= 13);
1621 int second_contributory = intno == 0 ||
1622 (intno >= 10 && intno <= 13);
1623
1624 if (loglevel & CPU_LOG_INT)
1625 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1626 env->old_exception, intno);
1627
1628 if (env->old_exception == EXCP08_DBLE)
1629 cpu_abort(env, "triple fault");
1630
1631 if ((first_contributory && second_contributory)
1632 || (env->old_exception == EXCP0E_PAGE &&
1633 (second_contributory || (intno == EXCP0E_PAGE)))) {
1634 intno = EXCP08_DBLE;
1635 *error_code = 0;
1636 }
1637
1638 if (second_contributory || (intno == EXCP0E_PAGE) ||
1639 (intno == EXCP08_DBLE))
1640 env->old_exception = intno;
1641
1642 return intno;
1643}
1644
1645/*
1646 * Signal an interruption. It is executed in the main CPU loop.
1647 * is_int is TRUE if coming from the int instruction. next_eip is the
1648 * EIP value AFTER the interrupt instruction. It is only relevant if
1649 * is_int is TRUE.
1650 */
1651void raise_interrupt(int intno, int is_int, int error_code,
1652 int next_eip_addend)
1653{
1654#if defined(VBOX) && defined(DEBUG)
1655 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1656#endif
1657 if (!is_int) {
1658 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1659 intno = check_exception(intno, &error_code);
1660 } else {
1661 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1662 }
1663
1664 env->exception_index = intno;
1665 env->error_code = error_code;
1666 env->exception_is_int = is_int;
1667 env->exception_next_eip = env->eip + next_eip_addend;
1668 cpu_loop_exit();
1669}
1670
1671/* shortcuts to generate exceptions */
1672
1673void (raise_exception_err)(int exception_index, int error_code)
1674{
1675 raise_interrupt(exception_index, 0, error_code, 0);
1676}
1677
1678void raise_exception(int exception_index)
1679{
1680 raise_interrupt(exception_index, 0, 0, 0);
1681}
1682
1683/* SMM support */
1684
1685#if defined(CONFIG_USER_ONLY)
1686
1687void do_smm_enter(void)
1688{
1689}
1690
1691void helper_rsm(void)
1692{
1693}
1694
1695#else
1696
1697#ifdef TARGET_X86_64
1698#define SMM_REVISION_ID 0x00020064
1699#else
1700#define SMM_REVISION_ID 0x00020000
1701#endif
1702
1703void do_smm_enter(void)
1704{
1705 target_ulong sm_state;
1706 SegmentCache *dt;
1707 int i, offset;
1708
1709 if (loglevel & CPU_LOG_INT) {
1710 fprintf(logfile, "SMM: enter\n");
1711 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1712 }
1713
1714 env->hflags |= HF_SMM_MASK;
1715 cpu_smm_update(env);
1716
1717 sm_state = env->smbase + 0x8000;
1718
1719#ifdef TARGET_X86_64
1720 for(i = 0; i < 6; i++) {
1721 dt = &env->segs[i];
1722 offset = 0x7e00 + i * 16;
1723 stw_phys(sm_state + offset, dt->selector);
1724 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1725 stl_phys(sm_state + offset + 4, dt->limit);
1726 stq_phys(sm_state + offset + 8, dt->base);
1727 }
1728
1729 stq_phys(sm_state + 0x7e68, env->gdt.base);
1730 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1731
1732 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1733 stq_phys(sm_state + 0x7e78, env->ldt.base);
1734 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1735 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1736
1737 stq_phys(sm_state + 0x7e88, env->idt.base);
1738 stl_phys(sm_state + 0x7e84, env->idt.limit);
1739
1740 stw_phys(sm_state + 0x7e90, env->tr.selector);
1741 stq_phys(sm_state + 0x7e98, env->tr.base);
1742 stl_phys(sm_state + 0x7e94, env->tr.limit);
1743 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1744
1745 stq_phys(sm_state + 0x7ed0, env->efer);
1746
1747 stq_phys(sm_state + 0x7ff8, EAX);
1748 stq_phys(sm_state + 0x7ff0, ECX);
1749 stq_phys(sm_state + 0x7fe8, EDX);
1750 stq_phys(sm_state + 0x7fe0, EBX);
1751 stq_phys(sm_state + 0x7fd8, ESP);
1752 stq_phys(sm_state + 0x7fd0, EBP);
1753 stq_phys(sm_state + 0x7fc8, ESI);
1754 stq_phys(sm_state + 0x7fc0, EDI);
1755 for(i = 8; i < 16; i++)
1756 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1757 stq_phys(sm_state + 0x7f78, env->eip);
1758 stl_phys(sm_state + 0x7f70, compute_eflags());
1759 stl_phys(sm_state + 0x7f68, env->dr[6]);
1760 stl_phys(sm_state + 0x7f60, env->dr[7]);
1761
1762 stl_phys(sm_state + 0x7f48, env->cr[4]);
1763 stl_phys(sm_state + 0x7f50, env->cr[3]);
1764 stl_phys(sm_state + 0x7f58, env->cr[0]);
1765
1766 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1767 stl_phys(sm_state + 0x7f00, env->smbase);
1768#else
1769 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1770 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1771 stl_phys(sm_state + 0x7ff4, compute_eflags());
1772 stl_phys(sm_state + 0x7ff0, env->eip);
1773 stl_phys(sm_state + 0x7fec, EDI);
1774 stl_phys(sm_state + 0x7fe8, ESI);
1775 stl_phys(sm_state + 0x7fe4, EBP);
1776 stl_phys(sm_state + 0x7fe0, ESP);
1777 stl_phys(sm_state + 0x7fdc, EBX);
1778 stl_phys(sm_state + 0x7fd8, EDX);
1779 stl_phys(sm_state + 0x7fd4, ECX);
1780 stl_phys(sm_state + 0x7fd0, EAX);
1781 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1782 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1783
1784 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1785 stl_phys(sm_state + 0x7f64, env->tr.base);
1786 stl_phys(sm_state + 0x7f60, env->tr.limit);
1787 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1788
1789 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1790 stl_phys(sm_state + 0x7f80, env->ldt.base);
1791 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1792 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1793
1794 stl_phys(sm_state + 0x7f74, env->gdt.base);
1795 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1796
1797 stl_phys(sm_state + 0x7f58, env->idt.base);
1798 stl_phys(sm_state + 0x7f54, env->idt.limit);
1799
1800 for(i = 0; i < 6; i++) {
1801 dt = &env->segs[i];
1802 if (i < 3)
1803 offset = 0x7f84 + i * 12;
1804 else
1805 offset = 0x7f2c + (i - 3) * 12;
1806 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1807 stl_phys(sm_state + offset + 8, dt->base);
1808 stl_phys(sm_state + offset + 4, dt->limit);
1809 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1810 }
1811 stl_phys(sm_state + 0x7f14, env->cr[4]);
1812
1813 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1814 stl_phys(sm_state + 0x7ef8, env->smbase);
1815#endif
1816 /* init SMM cpu state */
1817
1818#ifdef TARGET_X86_64
1819 cpu_load_efer(env, 0);
1820#endif
1821 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1822 env->eip = 0x00008000;
1823 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1824 0xffffffff, 0);
1825 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1826 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1827 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1828 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1829 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1830
1831 cpu_x86_update_cr0(env,
1832 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1833 cpu_x86_update_cr4(env, 0);
1834 env->dr[7] = 0x00000400;
1835 CC_OP = CC_OP_EFLAGS;
1836}
1837
1838void helper_rsm(void)
1839{
1840#ifdef VBOX
1841 cpu_abort(env, "helper_rsm");
1842#else /* !VBOX */
1843 target_ulong sm_
1844
1845 target_ulong sm_state;
1846 int i, offset;
1847 uint32_t val;
1848
1849 sm_state = env->smbase + 0x8000;
1850#ifdef TARGET_X86_64
1851 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1852
1853 for(i = 0; i < 6; i++) {
1854 offset = 0x7e00 + i * 16;
1855 cpu_x86_load_seg_cache(env, i,
1856 lduw_phys(sm_state + offset),
1857 ldq_phys(sm_state + offset + 8),
1858 ldl_phys(sm_state + offset + 4),
1859 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1860 }
1861
1862 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1863 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1864
1865 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1866 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1867 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1868 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1869
1870 env->idt.base = ldq_phys(sm_state + 0x7e88);
1871 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1872
1873 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1874 env->tr.base = ldq_phys(sm_state + 0x7e98);
1875 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1876 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1877
1878 EAX = ldq_phys(sm_state + 0x7ff8);
1879 ECX = ldq_phys(sm_state + 0x7ff0);
1880 EDX = ldq_phys(sm_state + 0x7fe8);
1881 EBX = ldq_phys(sm_state + 0x7fe0);
1882 ESP = ldq_phys(sm_state + 0x7fd8);
1883 EBP = ldq_phys(sm_state + 0x7fd0);
1884 ESI = ldq_phys(sm_state + 0x7fc8);
1885 EDI = ldq_phys(sm_state + 0x7fc0);
1886 for(i = 8; i < 16; i++)
1887 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1888 env->eip = ldq_phys(sm_state + 0x7f78);
1889 load_eflags(ldl_phys(sm_state + 0x7f70),
1890 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1891 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1892 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1893
1894 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1895 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1896 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1897
1898 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1899 if (val & 0x20000) {
1900 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1901 }
1902#else
1903 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1904 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1905 load_eflags(ldl_phys(sm_state + 0x7ff4),
1906 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1907 env->eip = ldl_phys(sm_state + 0x7ff0);
1908 EDI = ldl_phys(sm_state + 0x7fec);
1909 ESI = ldl_phys(sm_state + 0x7fe8);
1910 EBP = ldl_phys(sm_state + 0x7fe4);
1911 ESP = ldl_phys(sm_state + 0x7fe0);
1912 EBX = ldl_phys(sm_state + 0x7fdc);
1913 EDX = ldl_phys(sm_state + 0x7fd8);
1914 ECX = ldl_phys(sm_state + 0x7fd4);
1915 EAX = ldl_phys(sm_state + 0x7fd0);
1916 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1917 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1918
1919 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1920 env->tr.base = ldl_phys(sm_state + 0x7f64);
1921 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1922 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1923
1924 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1925 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1926 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1927 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1928
1929 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1930 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1931
1932 env->idt.base = ldl_phys(sm_state + 0x7f58);
1933 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1934
1935 for(i = 0; i < 6; i++) {
1936 if (i < 3)
1937 offset = 0x7f84 + i * 12;
1938 else
1939 offset = 0x7f2c + (i - 3) * 12;
1940 cpu_x86_load_seg_cache(env, i,
1941 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1942 ldl_phys(sm_state + offset + 8),
1943 ldl_phys(sm_state + offset + 4),
1944 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1945 }
1946 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1947
1948 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1949 if (val & 0x20000) {
1950 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1951 }
1952#endif
1953 CC_OP = CC_OP_EFLAGS;
1954 env->hflags &= ~HF_SMM_MASK;
1955 cpu_smm_update(env);
1956
1957 if (loglevel & CPU_LOG_INT) {
1958 fprintf(logfile, "SMM: after RSM\n");
1959 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1960 }
1961#endif /* !VBOX */
1962}
1963
1964#endif /* !CONFIG_USER_ONLY */
1965
1966
1967/* division, flags are undefined */
1968
1969void helper_divb_AL(target_ulong t0)
1970{
1971 unsigned int num, den, q, r;
1972
1973 num = (EAX & 0xffff);
1974 den = (t0 & 0xff);
1975 if (den == 0) {
1976 raise_exception(EXCP00_DIVZ);
1977 }
1978 q = (num / den);
1979 if (q > 0xff)
1980 raise_exception(EXCP00_DIVZ);
1981 q &= 0xff;
1982 r = (num % den) & 0xff;
1983 EAX = (EAX & ~0xffff) | (r << 8) | q;
1984}
1985
1986void helper_idivb_AL(target_ulong t0)
1987{
1988 int num, den, q, r;
1989
1990 num = (int16_t)EAX;
1991 den = (int8_t)t0;
1992 if (den == 0) {
1993 raise_exception(EXCP00_DIVZ);
1994 }
1995 q = (num / den);
1996 if (q != (int8_t)q)
1997 raise_exception(EXCP00_DIVZ);
1998 q &= 0xff;
1999 r = (num % den) & 0xff;
2000 EAX = (EAX & ~0xffff) | (r << 8) | q;
2001}
2002
2003void helper_divw_AX(target_ulong t0)
2004{
2005 unsigned int num, den, q, r;
2006
2007 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2008 den = (t0 & 0xffff);
2009 if (den == 0) {
2010 raise_exception(EXCP00_DIVZ);
2011 }
2012 q = (num / den);
2013 if (q > 0xffff)
2014 raise_exception(EXCP00_DIVZ);
2015 q &= 0xffff;
2016 r = (num % den) & 0xffff;
2017 EAX = (EAX & ~0xffff) | q;
2018 EDX = (EDX & ~0xffff) | r;
2019}
2020
2021void helper_idivw_AX(target_ulong t0)
2022{
2023 int num, den, q, r;
2024
2025 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2026 den = (int16_t)t0;
2027 if (den == 0) {
2028 raise_exception(EXCP00_DIVZ);
2029 }
2030 q = (num / den);
2031 if (q != (int16_t)q)
2032 raise_exception(EXCP00_DIVZ);
2033 q &= 0xffff;
2034 r = (num % den) & 0xffff;
2035 EAX = (EAX & ~0xffff) | q;
2036 EDX = (EDX & ~0xffff) | r;
2037}
2038
2039void helper_divl_EAX(target_ulong t0)
2040{
2041 unsigned int den, r;
2042 uint64_t num, q;
2043
2044 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2045 den = t0;
2046 if (den == 0) {
2047 raise_exception(EXCP00_DIVZ);
2048 }
2049 q = (num / den);
2050 r = (num % den);
2051 if (q > 0xffffffff)
2052 raise_exception(EXCP00_DIVZ);
2053 EAX = (uint32_t)q;
2054 EDX = (uint32_t)r;
2055}
2056
2057void helper_idivl_EAX(target_ulong t0)
2058{
2059 int den, r;
2060 int64_t num, q;
2061
2062 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2063 den = t0;
2064 if (den == 0) {
2065 raise_exception(EXCP00_DIVZ);
2066 }
2067 q = (num / den);
2068 r = (num % den);
2069 if (q != (int32_t)q)
2070 raise_exception(EXCP00_DIVZ);
2071 EAX = (uint32_t)q;
2072 EDX = (uint32_t)r;
2073}
2074
2075/* bcd */
2076
2077/* XXX: exception */
2078void helper_aam(int base)
2079{
2080 int al, ah;
2081 al = EAX & 0xff;
2082 ah = al / base;
2083 al = al % base;
2084 EAX = (EAX & ~0xffff) | al | (ah << 8);
2085 CC_DST = al;
2086}
2087
2088void helper_aad(int base)
2089{
2090 int al, ah;
2091 al = EAX & 0xff;
2092 ah = (EAX >> 8) & 0xff;
2093 al = ((ah * base) + al) & 0xff;
2094 EAX = (EAX & ~0xffff) | al;
2095 CC_DST = al;
2096}
2097
2098void helper_aaa(void)
2099{
2100 int icarry;
2101 int al, ah, af;
2102 int eflags;
2103
2104 eflags = cc_table[CC_OP].compute_all();
2105 af = eflags & CC_A;
2106 al = EAX & 0xff;
2107 ah = (EAX >> 8) & 0xff;
2108
2109 icarry = (al > 0xf9);
2110 if (((al & 0x0f) > 9 ) || af) {
2111 al = (al + 6) & 0x0f;
2112 ah = (ah + 1 + icarry) & 0xff;
2113 eflags |= CC_C | CC_A;
2114 } else {
2115 eflags &= ~(CC_C | CC_A);
2116 al &= 0x0f;
2117 }
2118 EAX = (EAX & ~0xffff) | al | (ah << 8);
2119 CC_SRC = eflags;
2120 FORCE_RET();
2121}
2122
2123void helper_aas(void)
2124{
2125 int icarry;
2126 int al, ah, af;
2127 int eflags;
2128
2129 eflags = cc_table[CC_OP].compute_all();
2130 af = eflags & CC_A;
2131 al = EAX & 0xff;
2132 ah = (EAX >> 8) & 0xff;
2133
2134 icarry = (al < 6);
2135 if (((al & 0x0f) > 9 ) || af) {
2136 al = (al - 6) & 0x0f;
2137 ah = (ah - 1 - icarry) & 0xff;
2138 eflags |= CC_C | CC_A;
2139 } else {
2140 eflags &= ~(CC_C | CC_A);
2141 al &= 0x0f;
2142 }
2143 EAX = (EAX & ~0xffff) | al | (ah << 8);
2144 CC_SRC = eflags;
2145 FORCE_RET();
2146}
2147
2148void helper_daa(void)
2149{
2150 int al, af, cf;
2151 int eflags;
2152
2153 eflags = cc_table[CC_OP].compute_all();
2154 cf = eflags & CC_C;
2155 af = eflags & CC_A;
2156 al = EAX & 0xff;
2157
2158 eflags = 0;
2159 if (((al & 0x0f) > 9 ) || af) {
2160 al = (al + 6) & 0xff;
2161 eflags |= CC_A;
2162 }
2163 if ((al > 0x9f) || cf) {
2164 al = (al + 0x60) & 0xff;
2165 eflags |= CC_C;
2166 }
2167 EAX = (EAX & ~0xff) | al;
2168 /* well, speed is not an issue here, so we compute the flags by hand */
2169 eflags |= (al == 0) << 6; /* zf */
2170 eflags |= parity_table[al]; /* pf */
2171 eflags |= (al & 0x80); /* sf */
2172 CC_SRC = eflags;
2173 FORCE_RET();
2174}
2175
2176void helper_das(void)
2177{
2178 int al, al1, af, cf;
2179 int eflags;
2180
2181 eflags = cc_table[CC_OP].compute_all();
2182 cf = eflags & CC_C;
2183 af = eflags & CC_A;
2184 al = EAX & 0xff;
2185
2186 eflags = 0;
2187 al1 = al;
2188 if (((al & 0x0f) > 9 ) || af) {
2189 eflags |= CC_A;
2190 if (al < 6 || cf)
2191 eflags |= CC_C;
2192 al = (al - 6) & 0xff;
2193 }
2194 if ((al1 > 0x99) || cf) {
2195 al = (al - 0x60) & 0xff;
2196 eflags |= CC_C;
2197 }
2198 EAX = (EAX & ~0xff) | al;
2199 /* well, speed is not an issue here, so we compute the flags by hand */
2200 eflags |= (al == 0) << 6; /* zf */
2201 eflags |= parity_table[al]; /* pf */
2202 eflags |= (al & 0x80); /* sf */
2203 CC_SRC = eflags;
2204 FORCE_RET();
2205}
2206
2207void helper_into(int next_eip_addend)
2208{
2209 int eflags;
2210 eflags = cc_table[CC_OP].compute_all();
2211 if (eflags & CC_O) {
2212 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2213 }
2214}
2215
2216void helper_cmpxchg8b(target_ulong a0)
2217{
2218 uint64_t d;
2219 int eflags;
2220
2221 eflags = cc_table[CC_OP].compute_all();
2222 d = ldq(a0);
2223 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2224 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2225 eflags |= CC_Z;
2226 } else {
2227 /* always do the store */
2228 stq(a0, d);
2229 EDX = (uint32_t)(d >> 32);
2230 EAX = (uint32_t)d;
2231 eflags &= ~CC_Z;
2232 }
2233 CC_SRC = eflags;
2234}
2235
2236#ifdef TARGET_X86_64
2237void helper_cmpxchg16b(target_ulong a0)
2238{
2239 uint64_t d0, d1;
2240 int eflags;
2241
2242 if ((a0 & 0xf) != 0)
2243 raise_exception(EXCP0D_GPF);
2244 eflags = cc_table[CC_OP].compute_all();
2245 d0 = ldq(a0);
2246 d1 = ldq(a0 + 8);
2247 if (d0 == EAX && d1 == EDX) {
2248 stq(a0, EBX);
2249 stq(a0 + 8, ECX);
2250 eflags |= CC_Z;
2251 } else {
2252 /* always do the store */
2253 stq(a0, d0);
2254 stq(a0 + 8, d1);
2255 EDX = d1;
2256 EAX = d0;
2257 eflags &= ~CC_Z;
2258 }
2259 CC_SRC = eflags;
2260}
2261#endif
2262
2263void helper_single_step(void)
2264{
2265 env->dr[6] |= 0x4000;
2266 raise_exception(EXCP01_SSTP);
2267}
2268
2269void helper_cpuid(void)
2270{
2271#ifndef VBOX
2272 uint32_t index;
2273
2274 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2275
2276 index = (uint32_t)EAX;
2277 /* test if maximum index reached */
2278 if (index & 0x80000000) {
2279 if (index > env->cpuid_xlevel)
2280 index = env->cpuid_level;
2281 } else {
2282 if (index > env->cpuid_level)
2283 index = env->cpuid_level;
2284 }
2285
2286 switch(index) {
2287 case 0:
2288 EAX = env->cpuid_level;
2289 EBX = env->cpuid_vendor1;
2290 EDX = env->cpuid_vendor2;
2291 ECX = env->cpuid_vendor3;
2292 break;
2293 case 1:
2294 EAX = env->cpuid_version;
2295 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2296 ECX = env->cpuid_ext_features;
2297 EDX = env->cpuid_features;
2298 break;
2299 case 2:
2300 /* cache info: needed for Pentium Pro compatibility */
2301 EAX = 1;
2302 EBX = 0;
2303 ECX = 0;
2304 EDX = 0x2c307d;
2305 break;
2306 case 4:
2307 /* cache info: needed for Core compatibility */
2308 switch (ECX) {
2309 case 0: /* L1 dcache info */
2310 EAX = 0x0000121;
2311 EBX = 0x1c0003f;
2312 ECX = 0x000003f;
2313 EDX = 0x0000001;
2314 break;
2315 case 1: /* L1 icache info */
2316 EAX = 0x0000122;
2317 EBX = 0x1c0003f;
2318 ECX = 0x000003f;
2319 EDX = 0x0000001;
2320 break;
2321 case 2: /* L2 cache info */
2322 EAX = 0x0000143;
2323 EBX = 0x3c0003f;
2324 ECX = 0x0000fff;
2325 EDX = 0x0000001;
2326 break;
2327 default: /* end of info */
2328 EAX = 0;
2329 EBX = 0;
2330 ECX = 0;
2331 EDX = 0;
2332 break;
2333 }
2334
2335 break;
2336 case 5:
2337 /* mwait info: needed for Core compatibility */
2338 EAX = 0; /* Smallest monitor-line size in bytes */
2339 EBX = 0; /* Largest monitor-line size in bytes */
2340 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2341 EDX = 0;
2342 break;
2343 case 6:
2344 /* Thermal and Power Leaf */
2345 EAX = 0;
2346 EBX = 0;
2347 ECX = 0;
2348 EDX = 0;
2349 break;
2350 case 9:
2351 /* Direct Cache Access Information Leaf */
2352 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2353 EBX = 0;
2354 ECX = 0;
2355 EDX = 0;
2356 break;
2357 case 0xA:
2358 /* Architectural Performance Monitoring Leaf */
2359 EAX = 0;
2360 EBX = 0;
2361 ECX = 0;
2362 EDX = 0;
2363 break;
2364 case 0x80000000:
2365 EAX = env->cpuid_xlevel;
2366 EBX = env->cpuid_vendor1;
2367 EDX = env->cpuid_vendor2;
2368 ECX = env->cpuid_vendor3;
2369 break;
2370 case 0x80000001:
2371 EAX = env->cpuid_features;
2372 EBX = 0;
2373 ECX = env->cpuid_ext3_features;
2374 EDX = env->cpuid_ext2_features;
2375 break;
2376 case 0x80000002:
2377 case 0x80000003:
2378 case 0x80000004:
2379 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2380 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2381 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2382 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2383 break;
2384 case 0x80000005:
2385 /* cache info (L1 cache) */
2386 EAX = 0x01ff01ff;
2387 EBX = 0x01ff01ff;
2388 ECX = 0x40020140;
2389 EDX = 0x40020140;
2390 break;
2391 case 0x80000006:
2392 /* cache info (L2 cache) */
2393 EAX = 0;
2394 EBX = 0x42004200;
2395 ECX = 0x02008140;
2396 EDX = 0;
2397 break;
2398 case 0x80000008:
2399 /* virtual & phys address size in low 2 bytes. */
2400/* XXX: This value must match the one used in the MMU code. */
2401 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2402 /* 64 bit processor */
2403#if defined(USE_KQEMU)
2404 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2405#else
2406/* XXX: The physical address space is limited to 42 bits in exec.c. */
2407 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2408#endif
2409 } else {
2410#if defined(USE_KQEMU)
2411 EAX = 0x00000020; /* 32 bits physical */
2412#else
2413 if (env->cpuid_features & CPUID_PSE36)
2414 EAX = 0x00000024; /* 36 bits physical */
2415 else
2416 EAX = 0x00000020; /* 32 bits physical */
2417#endif
2418 }
2419 EBX = 0;
2420 ECX = 0;
2421 EDX = 0;
2422 break;
2423 case 0x8000000A:
2424 EAX = 0x00000001;
2425 EBX = 0;
2426 ECX = 0;
2427 EDX = 0;
2428 break;
2429 default:
2430 /* reserved values: zero */
2431 EAX = 0;
2432 EBX = 0;
2433 ECX = 0;
2434 EDX = 0;
2435 break;
2436 }
2437#else /* VBOX */
2438 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2439#endif /* VBOX */
2440}
2441
2442void helper_enter_level(int level, int data32, target_ulong t1)
2443{
2444 target_ulong ssp;
2445 uint32_t esp_mask, esp, ebp;
2446
2447 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2448 ssp = env->segs[R_SS].base;
2449 ebp = EBP;
2450 esp = ESP;
2451 if (data32) {
2452 /* 32 bit */
2453 esp -= 4;
2454 while (--level) {
2455 esp -= 4;
2456 ebp -= 4;
2457 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2458 }
2459 esp -= 4;
2460 stl(ssp + (esp & esp_mask), t1);
2461 } else {
2462 /* 16 bit */
2463 esp -= 2;
2464 while (--level) {
2465 esp -= 2;
2466 ebp -= 2;
2467 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2468 }
2469 esp -= 2;
2470 stw(ssp + (esp & esp_mask), t1);
2471 }
2472}
2473
2474#ifdef TARGET_X86_64
2475void helper_enter64_level(int level, int data64, target_ulong t1)
2476{
2477 target_ulong esp, ebp;
2478 ebp = EBP;
2479 esp = ESP;
2480
2481 if (data64) {
2482 /* 64 bit */
2483 esp -= 8;
2484 while (--level) {
2485 esp -= 8;
2486 ebp -= 8;
2487 stq(esp, ldq(ebp));
2488 }
2489 esp -= 8;
2490 stq(esp, t1);
2491 } else {
2492 /* 16 bit */
2493 esp -= 2;
2494 while (--level) {
2495 esp -= 2;
2496 ebp -= 2;
2497 stw(esp, lduw(ebp));
2498 }
2499 esp -= 2;
2500 stw(esp, t1);
2501 }
2502}
2503#endif
2504
2505void helper_lldt(int selector)
2506{
2507 SegmentCache *dt;
2508 uint32_t e1, e2;
2509#ifndef VBOX
2510 int index, entry_limit;
2511#else
2512 unsigned int index, entry_limit;
2513#endif
2514 target_ulong ptr;
2515
2516#ifdef VBOX
2517 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2518 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2519#endif
2520
2521 selector &= 0xffff;
2522 if ((selector & 0xfffc) == 0) {
2523 /* XXX: NULL selector case: invalid LDT */
2524 env->ldt.base = 0;
2525 env->ldt.limit = 0;
2526 } else {
2527 if (selector & 0x4)
2528 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2529 dt = &env->gdt;
2530 index = selector & ~7;
2531#ifdef TARGET_X86_64
2532 if (env->hflags & HF_LMA_MASK)
2533 entry_limit = 15;
2534 else
2535#endif
2536 entry_limit = 7;
2537 if ((index + entry_limit) > dt->limit)
2538 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2539 ptr = dt->base + index;
2540 e1 = ldl_kernel(ptr);
2541 e2 = ldl_kernel(ptr + 4);
2542 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2543 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2544 if (!(e2 & DESC_P_MASK))
2545 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2546#ifdef TARGET_X86_64
2547 if (env->hflags & HF_LMA_MASK) {
2548 uint32_t e3;
2549 e3 = ldl_kernel(ptr + 8);
2550 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2551 env->ldt.base |= (target_ulong)e3 << 32;
2552 } else
2553#endif
2554 {
2555 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2556 }
2557 }
2558 env->ldt.selector = selector;
2559#ifdef VBOX
2560 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2561 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2562#endif
2563}
2564
2565void helper_ltr(int selector)
2566{
2567 SegmentCache *dt;
2568 uint32_t e1, e2;
2569#ifndef VBOX
2570 int index, type, entry_limit;
2571#else
2572 unsigned int index;
2573 int type, entry_limit;
2574#endif
2575 target_ulong ptr;
2576
2577#ifdef VBOX
2578 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2579 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2580 env->tr.flags, (RTSEL)(selector & 0xffff)));
2581#endif
2582 selector &= 0xffff;
2583 if ((selector & 0xfffc) == 0) {
2584 /* NULL selector case: invalid TR */
2585 env->tr.base = 0;
2586 env->tr.limit = 0;
2587 env->tr.flags = 0;
2588 } else {
2589 if (selector & 0x4)
2590 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2591 dt = &env->gdt;
2592 index = selector & ~7;
2593#ifdef TARGET_X86_64
2594 if (env->hflags & HF_LMA_MASK)
2595 entry_limit = 15;
2596 else
2597#endif
2598 entry_limit = 7;
2599 if ((index + entry_limit) > dt->limit)
2600 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2601 ptr = dt->base + index;
2602 e1 = ldl_kernel(ptr);
2603 e2 = ldl_kernel(ptr + 4);
2604 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2605 if ((e2 & DESC_S_MASK) ||
2606 (type != 1 && type != 9))
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 if (!(e2 & DESC_P_MASK))
2609 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2610#ifdef TARGET_X86_64
2611 if (env->hflags & HF_LMA_MASK) {
2612 uint32_t e3, e4;
2613 e3 = ldl_kernel(ptr + 8);
2614 e4 = ldl_kernel(ptr + 12);
2615 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2616 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2617 load_seg_cache_raw_dt(&env->tr, e1, e2);
2618 env->tr.base |= (target_ulong)e3 << 32;
2619 } else
2620#endif
2621 {
2622 load_seg_cache_raw_dt(&env->tr, e1, e2);
2623 }
2624 e2 |= DESC_TSS_BUSY_MASK;
2625 stl_kernel(ptr + 4, e2);
2626 }
2627 env->tr.selector = selector;
2628#ifdef VBOX
2629 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2630 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2631 env->tr.flags, (RTSEL)(selector & 0xffff)));
2632#endif
2633}
2634
2635/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2636void helper_load_seg(int seg_reg, int selector)
2637{
2638 uint32_t e1, e2;
2639 int cpl, dpl, rpl;
2640 SegmentCache *dt;
2641#ifndef VBOX
2642 int index;
2643#else
2644 unsigned int index;
2645#endif
2646 target_ulong ptr;
2647
2648 selector &= 0xffff;
2649 cpl = env->hflags & HF_CPL_MASK;
2650
2651#ifdef VBOX
2652 /* Trying to load a selector with CPL=1? */
2653 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2654 {
2655 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2656 selector = selector & 0xfffc;
2657 }
2658#endif
2659 if ((selector & 0xfffc) == 0) {
2660 /* null selector case */
2661 if (seg_reg == R_SS
2662#ifdef TARGET_X86_64
2663 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2664#endif
2665 )
2666 raise_exception_err(EXCP0D_GPF, 0);
2667 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2668 } else {
2669
2670 if (selector & 0x4)
2671 dt = &env->ldt;
2672 else
2673 dt = &env->gdt;
2674 index = selector & ~7;
2675 if ((index + 7) > dt->limit)
2676 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2677 ptr = dt->base + index;
2678 e1 = ldl_kernel(ptr);
2679 e2 = ldl_kernel(ptr + 4);
2680
2681 if (!(e2 & DESC_S_MASK))
2682 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2683 rpl = selector & 3;
2684 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2685 if (seg_reg == R_SS) {
2686 /* must be writable segment */
2687 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2688 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2689 if (rpl != cpl || dpl != cpl)
2690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2691 } else {
2692 /* must be readable segment */
2693 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2694 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2695
2696 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2697 /* if not conforming code, test rights */
2698 if (dpl < cpl || dpl < rpl)
2699 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2700 }
2701 }
2702
2703 if (!(e2 & DESC_P_MASK)) {
2704 if (seg_reg == R_SS)
2705 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2706 else
2707 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2708 }
2709
2710 /* set the access bit if not already set */
2711 if (!(e2 & DESC_A_MASK)) {
2712 e2 |= DESC_A_MASK;
2713 stl_kernel(ptr + 4, e2);
2714 }
2715
2716 cpu_x86_load_seg_cache(env, seg_reg, selector,
2717 get_seg_base(e1, e2),
2718 get_seg_limit(e1, e2),
2719 e2);
2720#if 0
2721 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2722 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2723#endif
2724 }
2725}
2726
2727/* protected mode jump */
2728void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2729 int next_eip_addend)
2730{
2731 int gate_cs, type;
2732 uint32_t e1, e2, cpl, dpl, rpl, limit;
2733 target_ulong next_eip;
2734
2735#ifdef VBOX
2736 e1 = e2 = 0;
2737#endif
2738 if ((new_cs & 0xfffc) == 0)
2739 raise_exception_err(EXCP0D_GPF, 0);
2740 if (load_segment(&e1, &e2, new_cs) != 0)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 cpl = env->hflags & HF_CPL_MASK;
2743 if (e2 & DESC_S_MASK) {
2744 if (!(e2 & DESC_CS_MASK))
2745 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2746 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2747 if (e2 & DESC_C_MASK) {
2748 /* conforming code segment */
2749 if (dpl > cpl)
2750 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751 } else {
2752 /* non conforming code segment */
2753 rpl = new_cs & 3;
2754 if (rpl > cpl)
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 if (dpl != cpl)
2757 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2758 }
2759 if (!(e2 & DESC_P_MASK))
2760 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2761 limit = get_seg_limit(e1, e2);
2762 if (new_eip > limit &&
2763 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2764 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2765 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2766 get_seg_base(e1, e2), limit, e2);
2767 EIP = new_eip;
2768 } else {
2769 /* jump to call or task gate */
2770 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2771 rpl = new_cs & 3;
2772 cpl = env->hflags & HF_CPL_MASK;
2773 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2774 switch(type) {
2775 case 1: /* 286 TSS */
2776 case 9: /* 386 TSS */
2777 case 5: /* task gate */
2778 if (dpl < cpl || dpl < rpl)
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 next_eip = env->eip + next_eip_addend;
2781 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2782 CC_OP = CC_OP_EFLAGS;
2783 break;
2784 case 4: /* 286 call gate */
2785 case 12: /* 386 call gate */
2786 if ((dpl < cpl) || (dpl < rpl))
2787 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788 if (!(e2 & DESC_P_MASK))
2789 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2790 gate_cs = e1 >> 16;
2791 new_eip = (e1 & 0xffff);
2792 if (type == 12)
2793 new_eip |= (e2 & 0xffff0000);
2794 if (load_segment(&e1, &e2, gate_cs) != 0)
2795 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2796 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2797 /* must be code segment */
2798 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2799 (DESC_S_MASK | DESC_CS_MASK)))
2800 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2801 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2802 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2803 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2804 if (!(e2 & DESC_P_MASK))
2805#ifdef VBOX /* See page 3-514 of 253666.pdf */
2806 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2807#else
2808 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2809#endif
2810 limit = get_seg_limit(e1, e2);
2811 if (new_eip > limit)
2812 raise_exception_err(EXCP0D_GPF, 0);
2813 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2814 get_seg_base(e1, e2), limit, e2);
2815 EIP = new_eip;
2816 break;
2817 default:
2818 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2819 break;
2820 }
2821 }
2822}
2823
2824/* real mode call */
2825void helper_lcall_real(int new_cs, target_ulong new_eip1,
2826 int shift, int next_eip)
2827{
2828 int new_eip;
2829 uint32_t esp, esp_mask;
2830 target_ulong ssp;
2831
2832 new_eip = new_eip1;
2833 esp = ESP;
2834 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2835 ssp = env->segs[R_SS].base;
2836 if (shift) {
2837 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2838 PUSHL(ssp, esp, esp_mask, next_eip);
2839 } else {
2840 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2841 PUSHW(ssp, esp, esp_mask, next_eip);
2842 }
2843
2844 SET_ESP(esp, esp_mask);
2845 env->eip = new_eip;
2846 env->segs[R_CS].selector = new_cs;
2847 env->segs[R_CS].base = (new_cs << 4);
2848}
2849
2850/* protected mode call */
2851void helper_lcall_protected(int new_cs, target_ulong new_eip,
2852 int shift, int next_eip_addend)
2853{
2854 int new_stack, i;
2855 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2856 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2857 uint32_t val, limit, old_sp_mask;
2858 target_ulong ssp, old_ssp, next_eip;
2859
2860#ifdef VBOX
2861 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2862#endif
2863 next_eip = env->eip + next_eip_addend;
2864#ifdef DEBUG_PCALL
2865 if (loglevel & CPU_LOG_PCALL) {
2866 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2867 new_cs, (uint32_t)new_eip, shift);
2868 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2869 }
2870#endif
2871 if ((new_cs & 0xfffc) == 0)
2872 raise_exception_err(EXCP0D_GPF, 0);
2873 if (load_segment(&e1, &e2, new_cs) != 0)
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 cpl = env->hflags & HF_CPL_MASK;
2876#ifdef DEBUG_PCALL
2877 if (loglevel & CPU_LOG_PCALL) {
2878 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2879 }
2880#endif
2881 if (e2 & DESC_S_MASK) {
2882 if (!(e2 & DESC_CS_MASK))
2883 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2884 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2885 if (e2 & DESC_C_MASK) {
2886 /* conforming code segment */
2887 if (dpl > cpl)
2888 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2889 } else {
2890 /* non conforming code segment */
2891 rpl = new_cs & 3;
2892 if (rpl > cpl)
2893 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2894 if (dpl != cpl)
2895 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2896 }
2897 if (!(e2 & DESC_P_MASK))
2898 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2899
2900#ifdef TARGET_X86_64
2901 /* XXX: check 16/32 bit cases in long mode */
2902 if (shift == 2) {
2903 target_ulong rsp;
2904 /* 64 bit case */
2905 rsp = ESP;
2906 PUSHQ(rsp, env->segs[R_CS].selector);
2907 PUSHQ(rsp, next_eip);
2908 /* from this point, not restartable */
2909 ESP = rsp;
2910 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2911 get_seg_base(e1, e2),
2912 get_seg_limit(e1, e2), e2);
2913 EIP = new_eip;
2914 } else
2915#endif
2916 {
2917 sp = ESP;
2918 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2919 ssp = env->segs[R_SS].base;
2920 if (shift) {
2921 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2922 PUSHL(ssp, sp, sp_mask, next_eip);
2923 } else {
2924 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2925 PUSHW(ssp, sp, sp_mask, next_eip);
2926 }
2927
2928 limit = get_seg_limit(e1, e2);
2929 if (new_eip > limit)
2930 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2931 /* from this point, not restartable */
2932 SET_ESP(sp, sp_mask);
2933 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2934 get_seg_base(e1, e2), limit, e2);
2935 EIP = new_eip;
2936 }
2937 } else {
2938 /* check gate type */
2939 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2940 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2941 rpl = new_cs & 3;
2942 switch(type) {
2943 case 1: /* available 286 TSS */
2944 case 9: /* available 386 TSS */
2945 case 5: /* task gate */
2946 if (dpl < cpl || dpl < rpl)
2947 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2948 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2949 CC_OP = CC_OP_EFLAGS;
2950 return;
2951 case 4: /* 286 call gate */
2952 case 12: /* 386 call gate */
2953 break;
2954 default:
2955 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2956 break;
2957 }
2958 shift = type >> 3;
2959
2960 if (dpl < cpl || dpl < rpl)
2961 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2962 /* check valid bit */
2963 if (!(e2 & DESC_P_MASK))
2964 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2965 selector = e1 >> 16;
2966 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2967 param_count = e2 & 0x1f;
2968 if ((selector & 0xfffc) == 0)
2969 raise_exception_err(EXCP0D_GPF, 0);
2970
2971 if (load_segment(&e1, &e2, selector) != 0)
2972 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2973 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2974 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2975 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2976 if (dpl > cpl)
2977 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2978 if (!(e2 & DESC_P_MASK))
2979 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2980
2981 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2982 /* to inner privilege */
2983 get_ss_esp_from_tss(&ss, &sp, dpl);
2984#ifdef DEBUG_PCALL
2985 if (loglevel & CPU_LOG_PCALL)
2986 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2987 ss, sp, param_count, ESP);
2988#endif
2989 if ((ss & 0xfffc) == 0)
2990 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2991 if ((ss & 3) != dpl)
2992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2993 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2994 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2995 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2996 if (ss_dpl != dpl)
2997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2998 if (!(ss_e2 & DESC_S_MASK) ||
2999 (ss_e2 & DESC_CS_MASK) ||
3000 !(ss_e2 & DESC_W_MASK))
3001 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3002 if (!(ss_e2 & DESC_P_MASK))
3003#ifdef VBOX /* See page 3-99 of 253666.pdf */
3004 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3005#else
3006 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3007#endif
3008
3009 // push_size = ((param_count * 2) + 8) << shift;
3010
3011 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3012 old_ssp = env->segs[R_SS].base;
3013
3014 sp_mask = get_sp_mask(ss_e2);
3015 ssp = get_seg_base(ss_e1, ss_e2);
3016 if (shift) {
3017 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3018 PUSHL(ssp, sp, sp_mask, ESP);
3019 for(i = param_count - 1; i >= 0; i--) {
3020 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3021 PUSHL(ssp, sp, sp_mask, val);
3022 }
3023 } else {
3024 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3025 PUSHW(ssp, sp, sp_mask, ESP);
3026 for(i = param_count - 1; i >= 0; i--) {
3027 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3028 PUSHW(ssp, sp, sp_mask, val);
3029 }
3030 }
3031 new_stack = 1;
3032 } else {
3033 /* to same privilege */
3034 sp = ESP;
3035 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3036 ssp = env->segs[R_SS].base;
3037 // push_size = (4 << shift);
3038 new_stack = 0;
3039 }
3040
3041 if (shift) {
3042 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3043 PUSHL(ssp, sp, sp_mask, next_eip);
3044 } else {
3045 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3046 PUSHW(ssp, sp, sp_mask, next_eip);
3047 }
3048
3049 /* from this point, not restartable */
3050
3051 if (new_stack) {
3052 ss = (ss & ~3) | dpl;
3053 cpu_x86_load_seg_cache(env, R_SS, ss,
3054 ssp,
3055 get_seg_limit(ss_e1, ss_e2),
3056 ss_e2);
3057 }
3058
3059 selector = (selector & ~3) | dpl;
3060 cpu_x86_load_seg_cache(env, R_CS, selector,
3061 get_seg_base(e1, e2),
3062 get_seg_limit(e1, e2),
3063 e2);
3064 cpu_x86_set_cpl(env, dpl);
3065 SET_ESP(sp, sp_mask);
3066 EIP = offset;
3067 }
3068#ifdef USE_KQEMU
3069 if (kqemu_is_ok(env)) {
3070 env->exception_index = -1;
3071 cpu_loop_exit();
3072 }
3073#endif
3074}
3075
3076/* real and vm86 mode iret */
3077void helper_iret_real(int shift)
3078{
3079 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3080 target_ulong ssp;
3081 int eflags_mask;
3082#ifdef VBOX
3083 bool fVME = false;
3084
3085 remR3TrapClear(env->pVM);
3086#endif /* VBOX */
3087
3088 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3089 sp = ESP;
3090 ssp = env->segs[R_SS].base;
3091 if (shift == 1) {
3092 /* 32 bits */
3093 POPL(ssp, sp, sp_mask, new_eip);
3094 POPL(ssp, sp, sp_mask, new_cs);
3095 new_cs &= 0xffff;
3096 POPL(ssp, sp, sp_mask, new_eflags);
3097 } else {
3098 /* 16 bits */
3099 POPW(ssp, sp, sp_mask, new_eip);
3100 POPW(ssp, sp, sp_mask, new_cs);
3101 POPW(ssp, sp, sp_mask, new_eflags);
3102 }
3103#ifdef VBOX
3104 if ( (env->eflags & VM_MASK)
3105 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3106 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3107 {
3108 fVME = true;
3109 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3110 /* if TF will be set -> #GP */
3111 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3112 || (new_eflags & TF_MASK))
3113 raise_exception(EXCP0D_GPF);
3114 }
3115#endif /* VBOX */
3116 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3117 env->segs[R_CS].selector = new_cs;
3118 env->segs[R_CS].base = (new_cs << 4);
3119 env->eip = new_eip;
3120#ifdef VBOX
3121 if (fVME)
3122 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3123 else
3124#endif
3125 if (env->eflags & VM_MASK)
3126 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3127 else
3128 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3129 if (shift == 0)
3130 eflags_mask &= 0xffff;
3131 load_eflags(new_eflags, eflags_mask);
3132 env->hflags2 &= ~HF2_NMI_MASK;
3133#ifdef VBOX
3134 if (fVME)
3135 {
3136 if (new_eflags & IF_MASK)
3137 env->eflags |= VIF_MASK;
3138 else
3139 env->eflags &= ~VIF_MASK;
3140 }
3141#endif /* VBOX */
3142}
3143
3144static inline void validate_seg(int seg_reg, int cpl)
3145{
3146 int dpl;
3147 uint32_t e2;
3148
3149 /* XXX: on x86_64, we do not want to nullify FS and GS because
3150 they may still contain a valid base. I would be interested to
3151 know how a real x86_64 CPU behaves */
3152 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3153 (env->segs[seg_reg].selector & 0xfffc) == 0)
3154 return;
3155
3156 e2 = env->segs[seg_reg].flags;
3157 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3158 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3159 /* data or non conforming code segment */
3160 if (dpl < cpl) {
3161 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3162 }
3163 }
3164}
3165
3166/* protected mode iret */
3167static inline void helper_ret_protected(int shift, int is_iret, int addend)
3168{
3169 uint32_t new_cs, new_eflags, new_ss;
3170 uint32_t new_es, new_ds, new_fs, new_gs;
3171 uint32_t e1, e2, ss_e1, ss_e2;
3172 int cpl, dpl, rpl, eflags_mask, iopl;
3173 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3174
3175#ifdef VBOX
3176 ss_e1 = ss_e2 = e1 = e2 = 0;
3177#endif
3178
3179#ifdef TARGET_X86_64
3180 if (shift == 2)
3181 sp_mask = -1;
3182 else
3183#endif
3184 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3185 sp = ESP;
3186 ssp = env->segs[R_SS].base;
3187 new_eflags = 0; /* avoid warning */
3188#ifdef TARGET_X86_64
3189 if (shift == 2) {
3190 POPQ(sp, new_eip);
3191 POPQ(sp, new_cs);
3192 new_cs &= 0xffff;
3193 if (is_iret) {
3194 POPQ(sp, new_eflags);
3195 }
3196 } else
3197#endif
3198 if (shift == 1) {
3199 /* 32 bits */
3200 POPL(ssp, sp, sp_mask, new_eip);
3201 POPL(ssp, sp, sp_mask, new_cs);
3202 new_cs &= 0xffff;
3203 if (is_iret) {
3204 POPL(ssp, sp, sp_mask, new_eflags);
3205#if defined(VBOX) && defined(DEBUG)
3206 printf("iret: new CS %04X\n", new_cs);
3207 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3208 printf("iret: new EFLAGS %08X\n", new_eflags);
3209 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3210#endif
3211 if (new_eflags & VM_MASK)
3212 goto return_to_vm86;
3213 }
3214#ifdef VBOX
3215 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3216 {
3217#ifdef DEBUG
3218 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3219#endif
3220 new_cs = new_cs & 0xfffc;
3221 }
3222#endif
3223 } else {
3224 /* 16 bits */
3225 POPW(ssp, sp, sp_mask, new_eip);
3226 POPW(ssp, sp, sp_mask, new_cs);
3227 if (is_iret)
3228 POPW(ssp, sp, sp_mask, new_eflags);
3229 }
3230#ifdef DEBUG_PCALL
3231 if (loglevel & CPU_LOG_PCALL) {
3232 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3233 new_cs, new_eip, shift, addend);
3234 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3235 }
3236#endif
3237 if ((new_cs & 0xfffc) == 0)
3238 {
3239#if defined(VBOX) && defined(DEBUG)
3240 printf("new_cs & 0xfffc) == 0\n");
3241#endif
3242 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3243 }
3244 if (load_segment(&e1, &e2, new_cs) != 0)
3245 {
3246#if defined(VBOX) && defined(DEBUG)
3247 printf("load_segment failed\n");
3248#endif
3249 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3250 }
3251 if (!(e2 & DESC_S_MASK) ||
3252 !(e2 & DESC_CS_MASK))
3253 {
3254#if defined(VBOX) && defined(DEBUG)
3255 printf("e2 mask %08x\n", e2);
3256#endif
3257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3258 }
3259 cpl = env->hflags & HF_CPL_MASK;
3260 rpl = new_cs & 3;
3261 if (rpl < cpl)
3262 {
3263#if defined(VBOX) && defined(DEBUG)
3264 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3265#endif
3266 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3267 }
3268 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3269 if (e2 & DESC_C_MASK) {
3270 if (dpl > rpl)
3271 {
3272#if defined(VBOX) && defined(DEBUG)
3273 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3274#endif
3275 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3276 }
3277 } else {
3278 if (dpl != rpl)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 }
3286 if (!(e2 & DESC_P_MASK))
3287 {
3288#if defined(VBOX) && defined(DEBUG)
3289 printf("DESC_P_MASK e2=%08x\n", e2);
3290#endif
3291 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3292 }
3293
3294 sp += addend;
3295 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3296 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3297 /* return to same privilege level */
3298 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3299 get_seg_base(e1, e2),
3300 get_seg_limit(e1, e2),
3301 e2);
3302 } else {
3303 /* return to different privilege level */
3304#ifdef TARGET_X86_64
3305 if (shift == 2) {
3306 POPQ(sp, new_esp);
3307 POPQ(sp, new_ss);
3308 new_ss &= 0xffff;
3309 } else
3310#endif
3311 if (shift == 1) {
3312 /* 32 bits */
3313 POPL(ssp, sp, sp_mask, new_esp);
3314 POPL(ssp, sp, sp_mask, new_ss);
3315 new_ss &= 0xffff;
3316 } else {
3317 /* 16 bits */
3318 POPW(ssp, sp, sp_mask, new_esp);
3319 POPW(ssp, sp, sp_mask, new_ss);
3320 }
3321#ifdef DEBUG_PCALL
3322 if (loglevel & CPU_LOG_PCALL) {
3323 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3324 new_ss, new_esp);
3325 }
3326#endif
3327 if ((new_ss & 0xfffc) == 0) {
3328#ifdef TARGET_X86_64
3329 /* NULL ss is allowed in long mode if cpl != 3*/
3330 /* XXX: test CS64 ? */
3331 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3332 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3333 0, 0xffffffff,
3334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3335 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3336 DESC_W_MASK | DESC_A_MASK);
3337 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3338 } else
3339#endif
3340 {
3341 raise_exception_err(EXCP0D_GPF, 0);
3342 }
3343 } else {
3344 if ((new_ss & 3) != rpl)
3345 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3346 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3347 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3348 if (!(ss_e2 & DESC_S_MASK) ||
3349 (ss_e2 & DESC_CS_MASK) ||
3350 !(ss_e2 & DESC_W_MASK))
3351 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3352 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3353 if (dpl != rpl)
3354 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3355 if (!(ss_e2 & DESC_P_MASK))
3356 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3357 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3358 get_seg_base(ss_e1, ss_e2),
3359 get_seg_limit(ss_e1, ss_e2),
3360 ss_e2);
3361 }
3362
3363 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3364 get_seg_base(e1, e2),
3365 get_seg_limit(e1, e2),
3366 e2);
3367 cpu_x86_set_cpl(env, rpl);
3368 sp = new_esp;
3369#ifdef TARGET_X86_64
3370 if (env->hflags & HF_CS64_MASK)
3371 sp_mask = -1;
3372 else
3373#endif
3374 sp_mask = get_sp_mask(ss_e2);
3375
3376 /* validate data segments */
3377 validate_seg(R_ES, rpl);
3378 validate_seg(R_DS, rpl);
3379 validate_seg(R_FS, rpl);
3380 validate_seg(R_GS, rpl);
3381
3382 sp += addend;
3383 }
3384 SET_ESP(sp, sp_mask);
3385 env->eip = new_eip;
3386 if (is_iret) {
3387 /* NOTE: 'cpl' is the _old_ CPL */
3388 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3389 if (cpl == 0)
3390#ifdef VBOX
3391 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3392#else
3393 eflags_mask |= IOPL_MASK;
3394#endif
3395 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3396 if (cpl <= iopl)
3397 eflags_mask |= IF_MASK;
3398 if (shift == 0)
3399 eflags_mask &= 0xffff;
3400 load_eflags(new_eflags, eflags_mask);
3401 }
3402 return;
3403
3404 return_to_vm86:
3405 POPL(ssp, sp, sp_mask, new_esp);
3406 POPL(ssp, sp, sp_mask, new_ss);
3407 POPL(ssp, sp, sp_mask, new_es);
3408 POPL(ssp, sp, sp_mask, new_ds);
3409 POPL(ssp, sp, sp_mask, new_fs);
3410 POPL(ssp, sp, sp_mask, new_gs);
3411
3412 /* modify processor state */
3413 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3414 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3415 load_seg_vm(R_CS, new_cs & 0xffff);
3416 cpu_x86_set_cpl(env, 3);
3417 load_seg_vm(R_SS, new_ss & 0xffff);
3418 load_seg_vm(R_ES, new_es & 0xffff);
3419 load_seg_vm(R_DS, new_ds & 0xffff);
3420 load_seg_vm(R_FS, new_fs & 0xffff);
3421 load_seg_vm(R_GS, new_gs & 0xffff);
3422
3423 env->eip = new_eip & 0xffff;
3424 ESP = new_esp;
3425}
3426
3427void helper_iret_protected(int shift, int next_eip)
3428{
3429 int tss_selector, type;
3430 uint32_t e1, e2;
3431
3432#ifdef VBOX
3433 e1 = e2 = 0;
3434 remR3TrapClear(env->pVM);
3435#endif
3436
3437 /* specific case for TSS */
3438 if (env->eflags & NT_MASK) {
3439#ifdef TARGET_X86_64
3440 if (env->hflags & HF_LMA_MASK)
3441 raise_exception_err(EXCP0D_GPF, 0);
3442#endif
3443 tss_selector = lduw_kernel(env->tr.base + 0);
3444 if (tss_selector & 4)
3445 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3446 if (load_segment(&e1, &e2, tss_selector) != 0)
3447 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3448 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3449 /* NOTE: we check both segment and busy TSS */
3450 if (type != 3)
3451 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3452 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3453 } else {
3454 helper_ret_protected(shift, 1, 0);
3455 }
3456 env->hflags2 &= ~HF2_NMI_MASK;
3457#ifdef USE_KQEMU
3458 if (kqemu_is_ok(env)) {
3459 CC_OP = CC_OP_EFLAGS;
3460 env->exception_index = -1;
3461 cpu_loop_exit();
3462 }
3463#endif
3464}
3465
3466void helper_lret_protected(int shift, int addend)
3467{
3468 helper_ret_protected(shift, 0, addend);
3469#ifdef USE_KQEMU
3470 if (kqemu_is_ok(env)) {
3471 env->exception_index = -1;
3472 cpu_loop_exit();
3473 }
3474#endif
3475}
3476
3477void helper_sysenter(void)
3478{
3479 if (env->sysenter_cs == 0) {
3480 raise_exception_err(EXCP0D_GPF, 0);
3481 }
3482 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3483 cpu_x86_set_cpl(env, 0);
3484
3485#ifdef TARGET_X86_64
3486 if (env->hflags & HF_LMA_MASK) {
3487 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3488 0, 0xffffffff,
3489 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3490 DESC_S_MASK |
3491 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3492 } else
3493#endif
3494 {
3495 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3496 0, 0xffffffff,
3497 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3498 DESC_S_MASK |
3499 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3500 }
3501 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3502 0, 0xffffffff,
3503 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3504 DESC_S_MASK |
3505 DESC_W_MASK | DESC_A_MASK);
3506 ESP = env->sysenter_esp;
3507 EIP = env->sysenter_eip;
3508}
3509
3510void helper_sysexit(int dflag)
3511{
3512 int cpl;
3513
3514 cpl = env->hflags & HF_CPL_MASK;
3515 if (env->sysenter_cs == 0 || cpl != 0) {
3516 raise_exception_err(EXCP0D_GPF, 0);
3517 }
3518 cpu_x86_set_cpl(env, 3);
3519#ifdef TARGET_X86_64
3520 if (dflag == 2) {
3521 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3522 0, 0xffffffff,
3523 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3524 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3525 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3526 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3527 0, 0xffffffff,
3528 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3529 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3530 DESC_W_MASK | DESC_A_MASK);
3531 } else
3532#endif
3533 {
3534 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3535 0, 0xffffffff,
3536 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3537 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3538 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3539 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3540 0, 0xffffffff,
3541 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3542 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3543 DESC_W_MASK | DESC_A_MASK);
3544 }
3545 ESP = ECX;
3546 EIP = EDX;
3547#ifdef USE_KQEMU
3548 if (kqemu_is_ok(env)) {
3549 env->exception_index = -1;
3550 cpu_loop_exit();
3551 }
3552#endif
3553}
3554
3555#if defined(CONFIG_USER_ONLY)
3556target_ulong helper_read_crN(int reg)
3557{
3558 return 0;
3559}
3560
3561void helper_write_crN(int reg, target_ulong t0)
3562{
3563}
3564#else
3565target_ulong helper_read_crN(int reg)
3566{
3567 target_ulong val;
3568
3569 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3570 switch(reg) {
3571 default:
3572 val = env->cr[reg];
3573 break;
3574 case 8:
3575 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3576 val = cpu_get_apic_tpr(env);
3577 } else {
3578 val = env->v_tpr;
3579 }
3580 break;
3581 }
3582 return val;
3583}
3584
3585void helper_write_crN(int reg, target_ulong t0)
3586{
3587 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3588 switch(reg) {
3589 case 0:
3590 cpu_x86_update_cr0(env, t0);
3591 break;
3592 case 3:
3593 cpu_x86_update_cr3(env, t0);
3594 break;
3595 case 4:
3596 cpu_x86_update_cr4(env, t0);
3597 break;
3598 case 8:
3599 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3600 cpu_set_apic_tpr(env, t0);
3601 }
3602 env->v_tpr = t0 & 0x0f;
3603 break;
3604 default:
3605 env->cr[reg] = t0;
3606 break;
3607 }
3608}
3609#endif
3610
3611void helper_lmsw(target_ulong t0)
3612{
3613 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3614 if already set to one. */
3615 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3616 helper_write_crN(0, t0);
3617}
3618
3619void helper_clts(void)
3620{
3621 env->cr[0] &= ~CR0_TS_MASK;
3622 env->hflags &= ~HF_TS_MASK;
3623}
3624
3625/* XXX: do more */
3626void helper_movl_drN_T0(int reg, target_ulong t0)
3627{
3628 env->dr[reg] = t0;
3629}
3630
3631void helper_invlpg(target_ulong addr)
3632{
3633 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3634 tlb_flush_page(env, addr);
3635}
3636
3637void helper_rdtsc(void)
3638{
3639 uint64_t val;
3640
3641 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3642 raise_exception(EXCP0D_GPF);
3643 }
3644 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3645
3646 val = cpu_get_tsc(env) + env->tsc_offset;
3647 EAX = (uint32_t)(val);
3648 EDX = (uint32_t)(val >> 32);
3649}
3650
3651#ifdef VBOX
3652void helper_rdtscp(void)
3653{
3654 uint64_t val;
3655 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3656 raise_exception(EXCP0D_GPF);
3657 }
3658
3659 val = cpu_get_tsc(env);
3660 EAX = (uint32_t)(val);
3661 EDX = (uint32_t)(val >> 32);
3662 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3663 ECX = (uint32_t)(val);
3664 else
3665 ECX = 0;
3666}
3667#endif
3668
3669void helper_rdpmc(void)
3670{
3671#ifdef VBOX
3672 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3673 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3674 raise_exception(EXCP0D_GPF);
3675 }
3676 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3677 EAX = 0;
3678 EDX = 0;
3679#else
3680 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3681 raise_exception(EXCP0D_GPF);
3682 }
3683 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3684
3685 /* currently unimplemented */
3686 raise_exception_err(EXCP06_ILLOP, 0);
3687#endif
3688}
3689
3690#if defined(CONFIG_USER_ONLY)
3691void helper_wrmsr(void)
3692{
3693}
3694
3695void helper_rdmsr(void)
3696{
3697}
3698#else
3699void helper_wrmsr(void)
3700{
3701 uint64_t val;
3702
3703 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3704
3705 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3706
3707 switch((uint32_t)ECX) {
3708 case MSR_IA32_SYSENTER_CS:
3709 env->sysenter_cs = val & 0xffff;
3710 break;
3711 case MSR_IA32_SYSENTER_ESP:
3712 env->sysenter_esp = val;
3713 break;
3714 case MSR_IA32_SYSENTER_EIP:
3715 env->sysenter_eip = val;
3716 break;
3717 case MSR_IA32_APICBASE:
3718#ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3719 cpu_set_apic_base(env, val);
3720#endif
3721 break;
3722 case MSR_EFER:
3723 {
3724 uint64_t update_mask;
3725 update_mask = 0;
3726 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3727 update_mask |= MSR_EFER_SCE;
3728 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3729 update_mask |= MSR_EFER_LME;
3730 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3731 update_mask |= MSR_EFER_FFXSR;
3732 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3733 update_mask |= MSR_EFER_NXE;
3734 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3735 update_mask |= MSR_EFER_SVME;
3736 cpu_load_efer(env, (env->efer & ~update_mask) |
3737 (val & update_mask));
3738 }
3739 break;
3740 case MSR_STAR:
3741 env->star = val;
3742 break;
3743 case MSR_PAT:
3744 env->pat = val;
3745 break;
3746 case MSR_VM_HSAVE_PA:
3747 env->vm_hsave = val;
3748 break;
3749#ifdef TARGET_X86_64
3750 case MSR_LSTAR:
3751 env->lstar = val;
3752 break;
3753 case MSR_CSTAR:
3754 env->cstar = val;
3755 break;
3756 case MSR_FMASK:
3757 env->fmask = val;
3758 break;
3759 case MSR_FSBASE:
3760 env->segs[R_FS].base = val;
3761 break;
3762 case MSR_GSBASE:
3763 env->segs[R_GS].base = val;
3764 break;
3765 case MSR_KERNELGSBASE:
3766 env->kernelgsbase = val;
3767 break;
3768#endif
3769 default:
3770#ifndef VBOX
3771 /* XXX: exception ? */
3772#endif
3773 break;
3774 }
3775
3776#ifdef VBOX
3777 /* call CPUM. */
3778 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3779 {
3780 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3781 }
3782#endif
3783}
3784
3785void helper_rdmsr(void)
3786{
3787 uint64_t val;
3788 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3789
3790 switch((uint32_t)ECX) {
3791 case MSR_IA32_SYSENTER_CS:
3792 val = env->sysenter_cs;
3793 break;
3794 case MSR_IA32_SYSENTER_ESP:
3795 val = env->sysenter_esp;
3796 break;
3797 case MSR_IA32_SYSENTER_EIP:
3798 val = env->sysenter_eip;
3799 break;
3800 case MSR_IA32_APICBASE:
3801 val = cpu_get_apic_base(env);
3802 break;
3803 case MSR_EFER:
3804 val = env->efer;
3805 break;
3806 case MSR_STAR:
3807 val = env->star;
3808 break;
3809 case MSR_PAT:
3810 val = env->pat;
3811 break;
3812 case MSR_VM_HSAVE_PA:
3813 val = env->vm_hsave;
3814 break;
3815#ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3816 case MSR_IA32_PERF_STATUS:
3817 /* tsc_increment_by_tick */
3818 val = 1000ULL;
3819 /* CPU multiplier */
3820 val |= ((uint64_t)4ULL << 40);
3821 break;
3822#endif
3823#ifdef TARGET_X86_64
3824 case MSR_LSTAR:
3825 val = env->lstar;
3826 break;
3827 case MSR_CSTAR:
3828 val = env->cstar;
3829 break;
3830 case MSR_FMASK:
3831 val = env->fmask;
3832 break;
3833 case MSR_FSBASE:
3834 val = env->segs[R_FS].base;
3835 break;
3836 case MSR_GSBASE:
3837 val = env->segs[R_GS].base;
3838 break;
3839 case MSR_KERNELGSBASE:
3840 val = env->kernelgsbase;
3841 break;
3842#endif
3843#ifdef USE_KQEMU
3844 case MSR_QPI_COMMBASE:
3845 if (env->kqemu_enabled) {
3846 val = kqemu_comm_base;
3847 } else {
3848 val = 0;
3849 }
3850 break;
3851#endif
3852 default:
3853#ifndef VBOX
3854 /* XXX: exception ? */
3855 val = 0;
3856#else /* VBOX */
3857 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3858 {
3859 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3860 val = 0;
3861 }
3862#endif
3863 break;
3864 }
3865 EAX = (uint32_t)(val);
3866 EDX = (uint32_t)(val >> 32);
3867
3868#ifdef VBOX_STRICT
3869 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3870 val = 0;
3871 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3872#endif
3873}
3874#endif
3875
3876target_ulong helper_lsl(target_ulong selector1)
3877{
3878 unsigned int limit;
3879 uint32_t e1, e2, eflags, selector;
3880 int rpl, dpl, cpl, type;
3881
3882 selector = selector1 & 0xffff;
3883 eflags = cc_table[CC_OP].compute_all();
3884 if (load_segment(&e1, &e2, selector) != 0)
3885 goto fail;
3886 rpl = selector & 3;
3887 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3888 cpl = env->hflags & HF_CPL_MASK;
3889 if (e2 & DESC_S_MASK) {
3890 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3891 /* conforming */
3892 } else {
3893 if (dpl < cpl || dpl < rpl)
3894 goto fail;
3895 }
3896 } else {
3897 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3898 switch(type) {
3899 case 1:
3900 case 2:
3901 case 3:
3902 case 9:
3903 case 11:
3904 break;
3905 default:
3906 goto fail;
3907 }
3908 if (dpl < cpl || dpl < rpl) {
3909 fail:
3910 CC_SRC = eflags & ~CC_Z;
3911 return 0;
3912 }
3913 }
3914 limit = get_seg_limit(e1, e2);
3915 CC_SRC = eflags | CC_Z;
3916 return limit;
3917}
3918
3919target_ulong helper_lar(target_ulong selector1)
3920{
3921 uint32_t e1, e2, eflags, selector;
3922 int rpl, dpl, cpl, type;
3923
3924 selector = selector1 & 0xffff;
3925 eflags = cc_table[CC_OP].compute_all();
3926 if ((selector & 0xfffc) == 0)
3927 goto fail;
3928 if (load_segment(&e1, &e2, selector) != 0)
3929 goto fail;
3930 rpl = selector & 3;
3931 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3932 cpl = env->hflags & HF_CPL_MASK;
3933 if (e2 & DESC_S_MASK) {
3934 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3935 /* conforming */
3936 } else {
3937 if (dpl < cpl || dpl < rpl)
3938 goto fail;
3939 }
3940 } else {
3941 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3942 switch(type) {
3943 case 1:
3944 case 2:
3945 case 3:
3946 case 4:
3947 case 5:
3948 case 9:
3949 case 11:
3950 case 12:
3951 break;
3952 default:
3953 goto fail;
3954 }
3955 if (dpl < cpl || dpl < rpl) {
3956 fail:
3957 CC_SRC = eflags & ~CC_Z;
3958 return 0;
3959 }
3960 }
3961 CC_SRC = eflags | CC_Z;
3962 return e2 & 0x00f0ff00;
3963}
3964
3965void helper_verr(target_ulong selector1)
3966{
3967 uint32_t e1, e2, eflags, selector;
3968 int rpl, dpl, cpl;
3969
3970 selector = selector1 & 0xffff;
3971 eflags = cc_table[CC_OP].compute_all();
3972 if ((selector & 0xfffc) == 0)
3973 goto fail;
3974 if (load_segment(&e1, &e2, selector) != 0)
3975 goto fail;
3976 if (!(e2 & DESC_S_MASK))
3977 goto fail;
3978 rpl = selector & 3;
3979 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3980 cpl = env->hflags & HF_CPL_MASK;
3981 if (e2 & DESC_CS_MASK) {
3982 if (!(e2 & DESC_R_MASK))
3983 goto fail;
3984 if (!(e2 & DESC_C_MASK)) {
3985 if (dpl < cpl || dpl < rpl)
3986 goto fail;
3987 }
3988 } else {
3989 if (dpl < cpl || dpl < rpl) {
3990 fail:
3991 CC_SRC = eflags & ~CC_Z;
3992 return;
3993 }
3994 }
3995 CC_SRC = eflags | CC_Z;
3996}
3997
3998void helper_verw(target_ulong selector1)
3999{
4000 uint32_t e1, e2, eflags, selector;
4001 int rpl, dpl, cpl;
4002
4003 selector = selector1 & 0xffff;
4004 eflags = cc_table[CC_OP].compute_all();
4005 if ((selector & 0xfffc) == 0)
4006 goto fail;
4007 if (load_segment(&e1, &e2, selector) != 0)
4008 goto fail;
4009 if (!(e2 & DESC_S_MASK))
4010 goto fail;
4011 rpl = selector & 3;
4012 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4013 cpl = env->hflags & HF_CPL_MASK;
4014 if (e2 & DESC_CS_MASK) {
4015 goto fail;
4016 } else {
4017 if (dpl < cpl || dpl < rpl)
4018 goto fail;
4019 if (!(e2 & DESC_W_MASK)) {
4020 fail:
4021 CC_SRC = eflags & ~CC_Z;
4022 return;
4023 }
4024 }
4025 CC_SRC = eflags | CC_Z;
4026}
4027
4028/* x87 FPU helpers */
4029
4030static void fpu_set_exception(int mask)
4031{
4032 env->fpus |= mask;
4033 if (env->fpus & (~env->fpuc & FPUC_EM))
4034 env->fpus |= FPUS_SE | FPUS_B;
4035}
4036
4037static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4038{
4039 if (b == 0.0)
4040 fpu_set_exception(FPUS_ZE);
4041 return a / b;
4042}
4043
4044void fpu_raise_exception(void)
4045{
4046 if (env->cr[0] & CR0_NE_MASK) {
4047 raise_exception(EXCP10_COPR);
4048 }
4049#if !defined(CONFIG_USER_ONLY)
4050 else {
4051 cpu_set_ferr(env);
4052 }
4053#endif
4054}
4055
4056void helper_flds_FT0(uint32_t val)
4057{
4058 union {
4059 float32 f;
4060 uint32_t i;
4061 } u;
4062 u.i = val;
4063 FT0 = float32_to_floatx(u.f, &env->fp_status);
4064}
4065
4066void helper_fldl_FT0(uint64_t val)
4067{
4068 union {
4069 float64 f;
4070 uint64_t i;
4071 } u;
4072 u.i = val;
4073 FT0 = float64_to_floatx(u.f, &env->fp_status);
4074}
4075
4076void helper_fildl_FT0(int32_t val)
4077{
4078 FT0 = int32_to_floatx(val, &env->fp_status);
4079}
4080
4081void helper_flds_ST0(uint32_t val)
4082{
4083 int new_fpstt;
4084 union {
4085 float32 f;
4086 uint32_t i;
4087 } u;
4088 new_fpstt = (env->fpstt - 1) & 7;
4089 u.i = val;
4090 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4091 env->fpstt = new_fpstt;
4092 env->fptags[new_fpstt] = 0; /* validate stack entry */
4093}
4094
4095void helper_fldl_ST0(uint64_t val)
4096{
4097 int new_fpstt;
4098 union {
4099 float64 f;
4100 uint64_t i;
4101 } u;
4102 new_fpstt = (env->fpstt - 1) & 7;
4103 u.i = val;
4104 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4105 env->fpstt = new_fpstt;
4106 env->fptags[new_fpstt] = 0; /* validate stack entry */
4107}
4108
4109void helper_fildl_ST0(int32_t val)
4110{
4111 int new_fpstt;
4112 new_fpstt = (env->fpstt - 1) & 7;
4113 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4114 env->fpstt = new_fpstt;
4115 env->fptags[new_fpstt] = 0; /* validate stack entry */
4116}
4117
4118void helper_fildll_ST0(int64_t val)
4119{
4120 int new_fpstt;
4121 new_fpstt = (env->fpstt - 1) & 7;
4122 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4123 env->fpstt = new_fpstt;
4124 env->fptags[new_fpstt] = 0; /* validate stack entry */
4125}
4126
4127#ifndef VBOX
4128uint32_t helper_fsts_ST0(void)
4129#else
4130RTCCUINTREG helper_fsts_ST0(void)
4131#endif
4132{
4133 union {
4134 float32 f;
4135 uint32_t i;
4136 } u;
4137 u.f = floatx_to_float32(ST0, &env->fp_status);
4138 return u.i;
4139}
4140
4141uint64_t helper_fstl_ST0(void)
4142{
4143 union {
4144 float64 f;
4145 uint64_t i;
4146 } u;
4147 u.f = floatx_to_float64(ST0, &env->fp_status);
4148 return u.i;
4149}
4150#ifndef VBOX
4151int32_t helper_fist_ST0(void)
4152#else
4153RTCCINTREG helper_fist_ST0(void)
4154#endif
4155{
4156 int32_t val;
4157 val = floatx_to_int32(ST0, &env->fp_status);
4158 if (val != (int16_t)val)
4159 val = -32768;
4160 return val;
4161}
4162
4163#ifndef VBOX
4164int32_t helper_fistl_ST0(void)
4165#else
4166RTCCINTREG helper_fistl_ST0(void)
4167#endif
4168{
4169 int32_t val;
4170 val = floatx_to_int32(ST0, &env->fp_status);
4171 return val;
4172}
4173
4174int64_t helper_fistll_ST0(void)
4175{
4176 int64_t val;
4177 val = floatx_to_int64(ST0, &env->fp_status);
4178 return val;
4179}
4180
4181#ifndef VBOX
4182int32_t helper_fistt_ST0(void)
4183#else
4184RTCCINTREG helper_fistt_ST0(void)
4185#endif
4186{
4187 int32_t val;
4188 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4189 if (val != (int16_t)val)
4190 val = -32768;
4191 return val;
4192}
4193
4194#ifndef VBOX
4195int32_t helper_fisttl_ST0(void)
4196#else
4197RTCCINTREG helper_fisttl_ST0(void)
4198#endif
4199{
4200 int32_t val;
4201 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4202 return val;
4203}
4204
4205int64_t helper_fisttll_ST0(void)
4206{
4207 int64_t val;
4208 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4209 return val;
4210}
4211
4212void helper_fldt_ST0(target_ulong ptr)
4213{
4214 int new_fpstt;
4215 new_fpstt = (env->fpstt - 1) & 7;
4216 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4217 env->fpstt = new_fpstt;
4218 env->fptags[new_fpstt] = 0; /* validate stack entry */
4219}
4220
4221void helper_fstt_ST0(target_ulong ptr)
4222{
4223 helper_fstt(ST0, ptr);
4224}
4225
4226void helper_fpush(void)
4227{
4228 fpush();
4229}
4230
4231void helper_fpop(void)
4232{
4233 fpop();
4234}
4235
4236void helper_fdecstp(void)
4237{
4238 env->fpstt = (env->fpstt - 1) & 7;
4239 env->fpus &= (~0x4700);
4240}
4241
4242void helper_fincstp(void)
4243{
4244 env->fpstt = (env->fpstt + 1) & 7;
4245 env->fpus &= (~0x4700);
4246}
4247
4248/* FPU move */
4249
4250void helper_ffree_STN(int st_index)
4251{
4252 env->fptags[(env->fpstt + st_index) & 7] = 1;
4253}
4254
4255void helper_fmov_ST0_FT0(void)
4256{
4257 ST0 = FT0;
4258}
4259
4260void helper_fmov_FT0_STN(int st_index)
4261{
4262 FT0 = ST(st_index);
4263}
4264
4265void helper_fmov_ST0_STN(int st_index)
4266{
4267 ST0 = ST(st_index);
4268}
4269
4270void helper_fmov_STN_ST0(int st_index)
4271{
4272 ST(st_index) = ST0;
4273}
4274
4275void helper_fxchg_ST0_STN(int st_index)
4276{
4277 CPU86_LDouble tmp;
4278 tmp = ST(st_index);
4279 ST(st_index) = ST0;
4280 ST0 = tmp;
4281}
4282
4283/* FPU operations */
4284
4285static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4286
4287void helper_fcom_ST0_FT0(void)
4288{
4289 int ret;
4290
4291 ret = floatx_compare(ST0, FT0, &env->fp_status);
4292 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4293 FORCE_RET();
4294}
4295
4296void helper_fucom_ST0_FT0(void)
4297{
4298 int ret;
4299
4300 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4301 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4302 FORCE_RET();
4303}
4304
4305static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4306
4307void helper_fcomi_ST0_FT0(void)
4308{
4309 int eflags;
4310 int ret;
4311
4312 ret = floatx_compare(ST0, FT0, &env->fp_status);
4313 eflags = cc_table[CC_OP].compute_all();
4314 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4315 CC_SRC = eflags;
4316 FORCE_RET();
4317}
4318
4319void helper_fucomi_ST0_FT0(void)
4320{
4321 int eflags;
4322 int ret;
4323
4324 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4325 eflags = cc_table[CC_OP].compute_all();
4326 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4327 CC_SRC = eflags;
4328 FORCE_RET();
4329}
4330
4331void helper_fadd_ST0_FT0(void)
4332{
4333 ST0 += FT0;
4334}
4335
4336void helper_fmul_ST0_FT0(void)
4337{
4338 ST0 *= FT0;
4339}
4340
4341void helper_fsub_ST0_FT0(void)
4342{
4343 ST0 -= FT0;
4344}
4345
4346void helper_fsubr_ST0_FT0(void)
4347{
4348 ST0 = FT0 - ST0;
4349}
4350
4351void helper_fdiv_ST0_FT0(void)
4352{
4353 ST0 = helper_fdiv(ST0, FT0);
4354}
4355
4356void helper_fdivr_ST0_FT0(void)
4357{
4358 ST0 = helper_fdiv(FT0, ST0);
4359}
4360
4361/* fp operations between STN and ST0 */
4362
4363void helper_fadd_STN_ST0(int st_index)
4364{
4365 ST(st_index) += ST0;
4366}
4367
4368void helper_fmul_STN_ST0(int st_index)
4369{
4370 ST(st_index) *= ST0;
4371}
4372
4373void helper_fsub_STN_ST0(int st_index)
4374{
4375 ST(st_index) -= ST0;
4376}
4377
4378void helper_fsubr_STN_ST0(int st_index)
4379{
4380 CPU86_LDouble *p;
4381 p = &ST(st_index);
4382 *p = ST0 - *p;
4383}
4384
4385void helper_fdiv_STN_ST0(int st_index)
4386{
4387 CPU86_LDouble *p;
4388 p = &ST(st_index);
4389 *p = helper_fdiv(*p, ST0);
4390}
4391
4392void helper_fdivr_STN_ST0(int st_index)
4393{
4394 CPU86_LDouble *p;
4395 p = &ST(st_index);
4396 *p = helper_fdiv(ST0, *p);
4397}
4398
4399/* misc FPU operations */
4400void helper_fchs_ST0(void)
4401{
4402 ST0 = floatx_chs(ST0);
4403}
4404
4405void helper_fabs_ST0(void)
4406{
4407 ST0 = floatx_abs(ST0);
4408}
4409
4410void helper_fld1_ST0(void)
4411{
4412 ST0 = f15rk[1];
4413}
4414
4415void helper_fldl2t_ST0(void)
4416{
4417 ST0 = f15rk[6];
4418}
4419
4420void helper_fldl2e_ST0(void)
4421{
4422 ST0 = f15rk[5];
4423}
4424
4425void helper_fldpi_ST0(void)
4426{
4427 ST0 = f15rk[2];
4428}
4429
4430void helper_fldlg2_ST0(void)
4431{
4432 ST0 = f15rk[3];
4433}
4434
4435void helper_fldln2_ST0(void)
4436{
4437 ST0 = f15rk[4];
4438}
4439
4440void helper_fldz_ST0(void)
4441{
4442 ST0 = f15rk[0];
4443}
4444
4445void helper_fldz_FT0(void)
4446{
4447 FT0 = f15rk[0];
4448}
4449
4450#ifndef VBOX
4451uint32_t helper_fnstsw(void)
4452#else
4453RTCCUINTREG helper_fnstsw(void)
4454#endif
4455{
4456 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4457}
4458
4459#ifndef VBOX
4460uint32_t helper_fnstcw(void)
4461#else
4462RTCCUINTREG helper_fnstcw(void)
4463#endif
4464{
4465 return env->fpuc;
4466}
4467
4468static void update_fp_status(void)
4469{
4470 int rnd_type;
4471
4472 /* set rounding mode */
4473 switch(env->fpuc & RC_MASK) {
4474 default:
4475 case RC_NEAR:
4476 rnd_type = float_round_nearest_even;
4477 break;
4478 case RC_DOWN:
4479 rnd_type = float_round_down;
4480 break;
4481 case RC_UP:
4482 rnd_type = float_round_up;
4483 break;
4484 case RC_CHOP:
4485 rnd_type = float_round_to_zero;
4486 break;
4487 }
4488 set_float_rounding_mode(rnd_type, &env->fp_status);
4489#ifdef FLOATX80
4490 switch((env->fpuc >> 8) & 3) {
4491 case 0:
4492 rnd_type = 32;
4493 break;
4494 case 2:
4495 rnd_type = 64;
4496 break;
4497 case 3:
4498 default:
4499 rnd_type = 80;
4500 break;
4501 }
4502 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4503#endif
4504}
4505
4506void helper_fldcw(uint32_t val)
4507{
4508 env->fpuc = val;
4509 update_fp_status();
4510}
4511
4512void helper_fclex(void)
4513{
4514 env->fpus &= 0x7f00;
4515}
4516
4517void helper_fwait(void)
4518{
4519 if (env->fpus & FPUS_SE)
4520 fpu_raise_exception();
4521 FORCE_RET();
4522}
4523
4524void helper_fninit(void)
4525{
4526 env->fpus = 0;
4527 env->fpstt = 0;
4528 env->fpuc = 0x37f;
4529 env->fptags[0] = 1;
4530 env->fptags[1] = 1;
4531 env->fptags[2] = 1;
4532 env->fptags[3] = 1;
4533 env->fptags[4] = 1;
4534 env->fptags[5] = 1;
4535 env->fptags[6] = 1;
4536 env->fptags[7] = 1;
4537}
4538
4539/* BCD ops */
4540
4541void helper_fbld_ST0(target_ulong ptr)
4542{
4543 CPU86_LDouble tmp;
4544 uint64_t val;
4545 unsigned int v;
4546 int i;
4547
4548 val = 0;
4549 for(i = 8; i >= 0; i--) {
4550 v = ldub(ptr + i);
4551 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4552 }
4553 tmp = val;
4554 if (ldub(ptr + 9) & 0x80)
4555 tmp = -tmp;
4556 fpush();
4557 ST0 = tmp;
4558}
4559
4560void helper_fbst_ST0(target_ulong ptr)
4561{
4562 int v;
4563 target_ulong mem_ref, mem_end;
4564 int64_t val;
4565
4566 val = floatx_to_int64(ST0, &env->fp_status);
4567 mem_ref = ptr;
4568 mem_end = mem_ref + 9;
4569 if (val < 0) {
4570 stb(mem_end, 0x80);
4571 val = -val;
4572 } else {
4573 stb(mem_end, 0x00);
4574 }
4575 while (mem_ref < mem_end) {
4576 if (val == 0)
4577 break;
4578 v = val % 100;
4579 val = val / 100;
4580 v = ((v / 10) << 4) | (v % 10);
4581 stb(mem_ref++, v);
4582 }
4583 while (mem_ref < mem_end) {
4584 stb(mem_ref++, 0);
4585 }
4586}
4587
4588void helper_f2xm1(void)
4589{
4590 ST0 = pow(2.0,ST0) - 1.0;
4591}
4592
4593void helper_fyl2x(void)
4594{
4595 CPU86_LDouble fptemp;
4596
4597 fptemp = ST0;
4598 if (fptemp>0.0){
4599 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4600 ST1 *= fptemp;
4601 fpop();
4602 } else {
4603 env->fpus &= (~0x4700);
4604 env->fpus |= 0x400;
4605 }
4606}
4607
4608void helper_fptan(void)
4609{
4610 CPU86_LDouble fptemp;
4611
4612 fptemp = ST0;
4613 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4614 env->fpus |= 0x400;
4615 } else {
4616 ST0 = tan(fptemp);
4617 fpush();
4618 ST0 = 1.0;
4619 env->fpus &= (~0x400); /* C2 <-- 0 */
4620 /* the above code is for |arg| < 2**52 only */
4621 }
4622}
4623
4624void helper_fpatan(void)
4625{
4626 CPU86_LDouble fptemp, fpsrcop;
4627
4628 fpsrcop = ST1;
4629 fptemp = ST0;
4630 ST1 = atan2(fpsrcop,fptemp);
4631 fpop();
4632}
4633
4634void helper_fxtract(void)
4635{
4636 CPU86_LDoubleU temp;
4637 unsigned int expdif;
4638
4639 temp.d = ST0;
4640 expdif = EXPD(temp) - EXPBIAS;
4641 /*DP exponent bias*/
4642 ST0 = expdif;
4643 fpush();
4644 BIASEXPONENT(temp);
4645 ST0 = temp.d;
4646}
4647
4648void helper_fprem1(void)
4649{
4650 CPU86_LDouble dblq, fpsrcop, fptemp;
4651 CPU86_LDoubleU fpsrcop1, fptemp1;
4652 int expdif;
4653 signed long long int q;
4654
4655#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4656 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4657#else
4658 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4659#endif
4660 ST0 = 0.0 / 0.0; /* NaN */
4661 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4662 return;
4663 }
4664
4665 fpsrcop = ST0;
4666 fptemp = ST1;
4667 fpsrcop1.d = fpsrcop;
4668 fptemp1.d = fptemp;
4669 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4670
4671 if (expdif < 0) {
4672 /* optimisation? taken from the AMD docs */
4673 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4674 /* ST0 is unchanged */
4675 return;
4676 }
4677
4678 if (expdif < 53) {
4679 dblq = fpsrcop / fptemp;
4680 /* round dblq towards nearest integer */
4681 dblq = rint(dblq);
4682 ST0 = fpsrcop - fptemp * dblq;
4683
4684 /* convert dblq to q by truncating towards zero */
4685 if (dblq < 0.0)
4686 q = (signed long long int)(-dblq);
4687 else
4688 q = (signed long long int)dblq;
4689
4690 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4691 /* (C0,C3,C1) <-- (q2,q1,q0) */
4692 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4693 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4694 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4695 } else {
4696 env->fpus |= 0x400; /* C2 <-- 1 */
4697 fptemp = pow(2.0, expdif - 50);
4698 fpsrcop = (ST0 / ST1) / fptemp;
4699 /* fpsrcop = integer obtained by chopping */
4700 fpsrcop = (fpsrcop < 0.0) ?
4701 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4702 ST0 -= (ST1 * fpsrcop * fptemp);
4703 }
4704}
4705
4706void helper_fprem(void)
4707{
4708 CPU86_LDouble dblq, fpsrcop, fptemp;
4709 CPU86_LDoubleU fpsrcop1, fptemp1;
4710 int expdif;
4711 signed long long int q;
4712
4713#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4714 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4715#else
4716 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4717#endif
4718 ST0 = 0.0 / 0.0; /* NaN */
4719 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4720 return;
4721 }
4722
4723 fpsrcop = (CPU86_LDouble)ST0;
4724 fptemp = (CPU86_LDouble)ST1;
4725 fpsrcop1.d = fpsrcop;
4726 fptemp1.d = fptemp;
4727 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4728
4729 if (expdif < 0) {
4730 /* optimisation? taken from the AMD docs */
4731 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4732 /* ST0 is unchanged */
4733 return;
4734 }
4735
4736 if ( expdif < 53 ) {
4737 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4738 /* round dblq towards zero */
4739 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4740 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4741
4742 /* convert dblq to q by truncating towards zero */
4743 if (dblq < 0.0)
4744 q = (signed long long int)(-dblq);
4745 else
4746 q = (signed long long int)dblq;
4747
4748 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4749 /* (C0,C3,C1) <-- (q2,q1,q0) */
4750 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4751 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4752 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4753 } else {
4754 int N = 32 + (expdif % 32); /* as per AMD docs */
4755 env->fpus |= 0x400; /* C2 <-- 1 */
4756 fptemp = pow(2.0, (double)(expdif - N));
4757 fpsrcop = (ST0 / ST1) / fptemp;
4758 /* fpsrcop = integer obtained by chopping */
4759 fpsrcop = (fpsrcop < 0.0) ?
4760 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4761 ST0 -= (ST1 * fpsrcop * fptemp);
4762 }
4763}
4764
4765void helper_fyl2xp1(void)
4766{
4767 CPU86_LDouble fptemp;
4768
4769 fptemp = ST0;
4770 if ((fptemp+1.0)>0.0) {
4771 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4772 ST1 *= fptemp;
4773 fpop();
4774 } else {
4775 env->fpus &= (~0x4700);
4776 env->fpus |= 0x400;
4777 }
4778}
4779
4780void helper_fsqrt(void)
4781{
4782 CPU86_LDouble fptemp;
4783
4784 fptemp = ST0;
4785 if (fptemp<0.0) {
4786 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4787 env->fpus |= 0x400;
4788 }
4789 ST0 = sqrt(fptemp);
4790}
4791
4792void helper_fsincos(void)
4793{
4794 CPU86_LDouble fptemp;
4795
4796 fptemp = ST0;
4797 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4798 env->fpus |= 0x400;
4799 } else {
4800 ST0 = sin(fptemp);
4801 fpush();
4802 ST0 = cos(fptemp);
4803 env->fpus &= (~0x400); /* C2 <-- 0 */
4804 /* the above code is for |arg| < 2**63 only */
4805 }
4806}
4807
4808void helper_frndint(void)
4809{
4810 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4811}
4812
4813void helper_fscale(void)
4814{
4815 ST0 = ldexp (ST0, (int)(ST1));
4816}
4817
4818void helper_fsin(void)
4819{
4820 CPU86_LDouble fptemp;
4821
4822 fptemp = ST0;
4823 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4824 env->fpus |= 0x400;
4825 } else {
4826 ST0 = sin(fptemp);
4827 env->fpus &= (~0x400); /* C2 <-- 0 */
4828 /* the above code is for |arg| < 2**53 only */
4829 }
4830}
4831
4832void helper_fcos(void)
4833{
4834 CPU86_LDouble fptemp;
4835
4836 fptemp = ST0;
4837 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4838 env->fpus |= 0x400;
4839 } else {
4840 ST0 = cos(fptemp);
4841 env->fpus &= (~0x400); /* C2 <-- 0 */
4842 /* the above code is for |arg5 < 2**63 only */
4843 }
4844}
4845
4846void helper_fxam_ST0(void)
4847{
4848 CPU86_LDoubleU temp;
4849 int expdif;
4850
4851 temp.d = ST0;
4852
4853 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4854 if (SIGND(temp))
4855 env->fpus |= 0x200; /* C1 <-- 1 */
4856
4857 /* XXX: test fptags too */
4858 expdif = EXPD(temp);
4859 if (expdif == MAXEXPD) {
4860#ifdef USE_X86LDOUBLE
4861 if (MANTD(temp) == 0x8000000000000000ULL)
4862#else
4863 if (MANTD(temp) == 0)
4864#endif
4865 env->fpus |= 0x500 /*Infinity*/;
4866 else
4867 env->fpus |= 0x100 /*NaN*/;
4868 } else if (expdif == 0) {
4869 if (MANTD(temp) == 0)
4870 env->fpus |= 0x4000 /*Zero*/;
4871 else
4872 env->fpus |= 0x4400 /*Denormal*/;
4873 } else {
4874 env->fpus |= 0x400;
4875 }
4876}
4877
4878void helper_fstenv(target_ulong ptr, int data32)
4879{
4880 int fpus, fptag, exp, i;
4881 uint64_t mant;
4882 CPU86_LDoubleU tmp;
4883
4884 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4885 fptag = 0;
4886 for (i=7; i>=0; i--) {
4887 fptag <<= 2;
4888 if (env->fptags[i]) {
4889 fptag |= 3;
4890 } else {
4891 tmp.d = env->fpregs[i].d;
4892 exp = EXPD(tmp);
4893 mant = MANTD(tmp);
4894 if (exp == 0 && mant == 0) {
4895 /* zero */
4896 fptag |= 1;
4897 } else if (exp == 0 || exp == MAXEXPD
4898#ifdef USE_X86LDOUBLE
4899 || (mant & (1LL << 63)) == 0
4900#endif
4901 ) {
4902 /* NaNs, infinity, denormal */
4903 fptag |= 2;
4904 }
4905 }
4906 }
4907 if (data32) {
4908 /* 32 bit */
4909 stl(ptr, env->fpuc);
4910 stl(ptr + 4, fpus);
4911 stl(ptr + 8, fptag);
4912 stl(ptr + 12, 0); /* fpip */
4913 stl(ptr + 16, 0); /* fpcs */
4914 stl(ptr + 20, 0); /* fpoo */
4915 stl(ptr + 24, 0); /* fpos */
4916 } else {
4917 /* 16 bit */
4918 stw(ptr, env->fpuc);
4919 stw(ptr + 2, fpus);
4920 stw(ptr + 4, fptag);
4921 stw(ptr + 6, 0);
4922 stw(ptr + 8, 0);
4923 stw(ptr + 10, 0);
4924 stw(ptr + 12, 0);
4925 }
4926}
4927
4928void helper_fldenv(target_ulong ptr, int data32)
4929{
4930 int i, fpus, fptag;
4931
4932 if (data32) {
4933 env->fpuc = lduw(ptr);
4934 fpus = lduw(ptr + 4);
4935 fptag = lduw(ptr + 8);
4936 }
4937 else {
4938 env->fpuc = lduw(ptr);
4939 fpus = lduw(ptr + 2);
4940 fptag = lduw(ptr + 4);
4941 }
4942 env->fpstt = (fpus >> 11) & 7;
4943 env->fpus = fpus & ~0x3800;
4944 for(i = 0;i < 8; i++) {
4945 env->fptags[i] = ((fptag & 3) == 3);
4946 fptag >>= 2;
4947 }
4948}
4949
4950void helper_fsave(target_ulong ptr, int data32)
4951{
4952 CPU86_LDouble tmp;
4953 int i;
4954
4955 helper_fstenv(ptr, data32);
4956
4957 ptr += (14 << data32);
4958 for(i = 0;i < 8; i++) {
4959 tmp = ST(i);
4960 helper_fstt(tmp, ptr);
4961 ptr += 10;
4962 }
4963
4964 /* fninit */
4965 env->fpus = 0;
4966 env->fpstt = 0;
4967 env->fpuc = 0x37f;
4968 env->fptags[0] = 1;
4969 env->fptags[1] = 1;
4970 env->fptags[2] = 1;
4971 env->fptags[3] = 1;
4972 env->fptags[4] = 1;
4973 env->fptags[5] = 1;
4974 env->fptags[6] = 1;
4975 env->fptags[7] = 1;
4976}
4977
4978void helper_frstor(target_ulong ptr, int data32)
4979{
4980 CPU86_LDouble tmp;
4981 int i;
4982
4983 helper_fldenv(ptr, data32);
4984 ptr += (14 << data32);
4985
4986 for(i = 0;i < 8; i++) {
4987 tmp = helper_fldt(ptr);
4988 ST(i) = tmp;
4989 ptr += 10;
4990 }
4991}
4992
4993void helper_fxsave(target_ulong ptr, int data64)
4994{
4995 int fpus, fptag, i, nb_xmm_regs;
4996 CPU86_LDouble tmp;
4997 target_ulong addr;
4998
4999 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5000 fptag = 0;
5001 for(i = 0; i < 8; i++) {
5002 fptag |= (env->fptags[i] << i);
5003 }
5004 stw(ptr, env->fpuc);
5005 stw(ptr + 2, fpus);
5006 stw(ptr + 4, fptag ^ 0xff);
5007#ifdef TARGET_X86_64
5008 if (data64) {
5009 stq(ptr + 0x08, 0); /* rip */
5010 stq(ptr + 0x10, 0); /* rdp */
5011 } else
5012#endif
5013 {
5014 stl(ptr + 0x08, 0); /* eip */
5015 stl(ptr + 0x0c, 0); /* sel */
5016 stl(ptr + 0x10, 0); /* dp */
5017 stl(ptr + 0x14, 0); /* sel */
5018 }
5019
5020 addr = ptr + 0x20;
5021 for(i = 0;i < 8; i++) {
5022 tmp = ST(i);
5023 helper_fstt(tmp, addr);
5024 addr += 16;
5025 }
5026
5027 if (env->cr[4] & CR4_OSFXSR_MASK) {
5028 /* XXX: finish it */
5029 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5030 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5031 if (env->hflags & HF_CS64_MASK)
5032 nb_xmm_regs = 16;
5033 else
5034 nb_xmm_regs = 8;
5035 addr = ptr + 0xa0;
5036 for(i = 0; i < nb_xmm_regs; i++) {
5037 stq(addr, env->xmm_regs[i].XMM_Q(0));
5038 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5039 addr += 16;
5040 }
5041 }
5042}
5043
5044void helper_fxrstor(target_ulong ptr, int data64)
5045{
5046 int i, fpus, fptag, nb_xmm_regs;
5047 CPU86_LDouble tmp;
5048 target_ulong addr;
5049
5050 env->fpuc = lduw(ptr);
5051 fpus = lduw(ptr + 2);
5052 fptag = lduw(ptr + 4);
5053 env->fpstt = (fpus >> 11) & 7;
5054 env->fpus = fpus & ~0x3800;
5055 fptag ^= 0xff;
5056 for(i = 0;i < 8; i++) {
5057 env->fptags[i] = ((fptag >> i) & 1);
5058 }
5059
5060 addr = ptr + 0x20;
5061 for(i = 0;i < 8; i++) {
5062 tmp = helper_fldt(addr);
5063 ST(i) = tmp;
5064 addr += 16;
5065 }
5066
5067 if (env->cr[4] & CR4_OSFXSR_MASK) {
5068 /* XXX: finish it */
5069 env->mxcsr = ldl(ptr + 0x18);
5070 //ldl(ptr + 0x1c);
5071 if (env->hflags & HF_CS64_MASK)
5072 nb_xmm_regs = 16;
5073 else
5074 nb_xmm_regs = 8;
5075 addr = ptr + 0xa0;
5076 for(i = 0; i < nb_xmm_regs; i++) {
5077#if !defined(VBOX) || __GNUC__ < 4
5078 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5079 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5080#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5081# if 1
5082 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5083 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5084 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5085 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5086# else
5087 /* this works fine on Mac OS X, gcc 4.0.1 */
5088 uint64_t u64 = ldq(addr);
5089 env->xmm_regs[i].XMM_Q(0);
5090 u64 = ldq(addr + 4);
5091 env->xmm_regs[i].XMM_Q(1) = u64;
5092# endif
5093#endif
5094 addr += 16;
5095 }
5096 }
5097}
5098
5099#ifndef USE_X86LDOUBLE
5100
5101void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5102{
5103 CPU86_LDoubleU temp;
5104 int e;
5105
5106 temp.d = f;
5107 /* mantissa */
5108 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5109 /* exponent + sign */
5110 e = EXPD(temp) - EXPBIAS + 16383;
5111 e |= SIGND(temp) >> 16;
5112 *pexp = e;
5113}
5114
5115CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5116{
5117 CPU86_LDoubleU temp;
5118 int e;
5119 uint64_t ll;
5120
5121 /* XXX: handle overflow ? */
5122 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5123 e |= (upper >> 4) & 0x800; /* sign */
5124 ll = (mant >> 11) & ((1LL << 52) - 1);
5125#ifdef __arm__
5126 temp.l.upper = (e << 20) | (ll >> 32);
5127 temp.l.lower = ll;
5128#else
5129 temp.ll = ll | ((uint64_t)e << 52);
5130#endif
5131 return temp.d;
5132}
5133
5134#else
5135
5136void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5137{
5138 CPU86_LDoubleU temp;
5139
5140 temp.d = f;
5141 *pmant = temp.l.lower;
5142 *pexp = temp.l.upper;
5143}
5144
5145CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5146{
5147 CPU86_LDoubleU temp;
5148
5149 temp.l.upper = upper;
5150 temp.l.lower = mant;
5151 return temp.d;
5152}
5153#endif
5154
5155#ifdef TARGET_X86_64
5156
5157//#define DEBUG_MULDIV
5158
5159static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5160{
5161 *plow += a;
5162 /* carry test */
5163 if (*plow < a)
5164 (*phigh)++;
5165 *phigh += b;
5166}
5167
5168static void neg128(uint64_t *plow, uint64_t *phigh)
5169{
5170 *plow = ~ *plow;
5171 *phigh = ~ *phigh;
5172 add128(plow, phigh, 1, 0);
5173}
5174
5175/* return TRUE if overflow */
5176static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5177{
5178 uint64_t q, r, a1, a0;
5179 int i, qb, ab;
5180
5181 a0 = *plow;
5182 a1 = *phigh;
5183 if (a1 == 0) {
5184 q = a0 / b;
5185 r = a0 % b;
5186 *plow = q;
5187 *phigh = r;
5188 } else {
5189 if (a1 >= b)
5190 return 1;
5191 /* XXX: use a better algorithm */
5192 for(i = 0; i < 64; i++) {
5193 ab = a1 >> 63;
5194 a1 = (a1 << 1) | (a0 >> 63);
5195 if (ab || a1 >= b) {
5196 a1 -= b;
5197 qb = 1;
5198 } else {
5199 qb = 0;
5200 }
5201 a0 = (a0 << 1) | qb;
5202 }
5203#if defined(DEBUG_MULDIV)
5204 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5205 *phigh, *plow, b, a0, a1);
5206#endif
5207 *plow = a0;
5208 *phigh = a1;
5209 }
5210 return 0;
5211}
5212
5213/* return TRUE if overflow */
5214static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5215{
5216 int sa, sb;
5217 sa = ((int64_t)*phigh < 0);
5218 if (sa)
5219 neg128(plow, phigh);
5220 sb = (b < 0);
5221 if (sb)
5222 b = -b;
5223 if (div64(plow, phigh, b) != 0)
5224 return 1;
5225 if (sa ^ sb) {
5226 if (*plow > (1ULL << 63))
5227 return 1;
5228 *plow = - *plow;
5229 } else {
5230 if (*plow >= (1ULL << 63))
5231 return 1;
5232 }
5233 if (sa)
5234 *phigh = - *phigh;
5235 return 0;
5236}
5237
5238void helper_mulq_EAX_T0(target_ulong t0)
5239{
5240 uint64_t r0, r1;
5241
5242 mulu64(&r0, &r1, EAX, t0);
5243 EAX = r0;
5244 EDX = r1;
5245 CC_DST = r0;
5246 CC_SRC = r1;
5247}
5248
5249void helper_imulq_EAX_T0(target_ulong t0)
5250{
5251 uint64_t r0, r1;
5252
5253 muls64(&r0, &r1, EAX, t0);
5254 EAX = r0;
5255 EDX = r1;
5256 CC_DST = r0;
5257 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5258}
5259
5260target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5261{
5262 uint64_t r0, r1;
5263
5264 muls64(&r0, &r1, t0, t1);
5265 CC_DST = r0;
5266 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5267 return r0;
5268}
5269
5270void helper_divq_EAX(target_ulong t0)
5271{
5272 uint64_t r0, r1;
5273 if (t0 == 0) {
5274 raise_exception(EXCP00_DIVZ);
5275 }
5276 r0 = EAX;
5277 r1 = EDX;
5278 if (div64(&r0, &r1, t0))
5279 raise_exception(EXCP00_DIVZ);
5280 EAX = r0;
5281 EDX = r1;
5282}
5283
5284void helper_idivq_EAX(target_ulong t0)
5285{
5286 uint64_t r0, r1;
5287 if (t0 == 0) {
5288 raise_exception(EXCP00_DIVZ);
5289 }
5290 r0 = EAX;
5291 r1 = EDX;
5292 if (idiv64(&r0, &r1, t0))
5293 raise_exception(EXCP00_DIVZ);
5294 EAX = r0;
5295 EDX = r1;
5296}
5297#endif
5298
5299static void do_hlt(void)
5300{
5301 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5302 env->halted = 1;
5303 env->exception_index = EXCP_HLT;
5304 cpu_loop_exit();
5305}
5306
5307void helper_hlt(int next_eip_addend)
5308{
5309 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5310 EIP += next_eip_addend;
5311
5312 do_hlt();
5313}
5314
5315void helper_monitor(target_ulong ptr)
5316{
5317#ifdef VBOX
5318 if ((uint32_t)ECX > 1)
5319 raise_exception(EXCP0D_GPF);
5320#else
5321 if ((uint32_t)ECX != 0)
5322 raise_exception(EXCP0D_GPF);
5323#endif
5324 /* XXX: store address ? */
5325 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5326}
5327
5328void helper_mwait(int next_eip_addend)
5329{
5330 if ((uint32_t)ECX != 0)
5331 raise_exception(EXCP0D_GPF);
5332#ifdef VBOX
5333 helper_hlt(next_eip_addend);
5334#else
5335 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5336 EIP += next_eip_addend;
5337
5338 /* XXX: not complete but not completely erroneous */
5339 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5340 /* more than one CPU: do not sleep because another CPU may
5341 wake this one */
5342 } else {
5343 do_hlt();
5344 }
5345#endif
5346}
5347
5348void helper_debug(void)
5349{
5350 env->exception_index = EXCP_DEBUG;
5351 cpu_loop_exit();
5352}
5353
5354void helper_raise_interrupt(int intno, int next_eip_addend)
5355{
5356 raise_interrupt(intno, 1, 0, next_eip_addend);
5357}
5358
5359void helper_raise_exception(int exception_index)
5360{
5361 raise_exception(exception_index);
5362}
5363
5364void helper_cli(void)
5365{
5366 env->eflags &= ~IF_MASK;
5367}
5368
5369void helper_sti(void)
5370{
5371 env->eflags |= IF_MASK;
5372}
5373
5374#ifdef VBOX
5375void helper_cli_vme(void)
5376{
5377 env->eflags &= ~VIF_MASK;
5378}
5379
5380void helper_sti_vme(void)
5381{
5382 /* First check, then change eflags according to the AMD manual */
5383 if (env->eflags & VIP_MASK) {
5384 raise_exception(EXCP0D_GPF);
5385 }
5386 env->eflags |= VIF_MASK;
5387}
5388#endif
5389
5390#if 0
5391/* vm86plus instructions */
5392void helper_cli_vm(void)
5393{
5394 env->eflags &= ~VIF_MASK;
5395}
5396
5397void helper_sti_vm(void)
5398{
5399 env->eflags |= VIF_MASK;
5400 if (env->eflags & VIP_MASK) {
5401 raise_exception(EXCP0D_GPF);
5402 }
5403}
5404#endif
5405
5406void helper_set_inhibit_irq(void)
5407{
5408 env->hflags |= HF_INHIBIT_IRQ_MASK;
5409}
5410
5411void helper_reset_inhibit_irq(void)
5412{
5413 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5414}
5415
5416void helper_boundw(target_ulong a0, int v)
5417{
5418 int low, high;
5419 low = ldsw(a0);
5420 high = ldsw(a0 + 2);
5421 v = (int16_t)v;
5422 if (v < low || v > high) {
5423 raise_exception(EXCP05_BOUND);
5424 }
5425 FORCE_RET();
5426}
5427
5428void helper_boundl(target_ulong a0, int v)
5429{
5430 int low, high;
5431 low = ldl(a0);
5432 high = ldl(a0 + 4);
5433 if (v < low || v > high) {
5434 raise_exception(EXCP05_BOUND);
5435 }
5436 FORCE_RET();
5437}
5438
5439static float approx_rsqrt(float a)
5440{
5441 return 1.0 / sqrt(a);
5442}
5443
5444static float approx_rcp(float a)
5445{
5446 return 1.0 / a;
5447}
5448
5449#if !defined(CONFIG_USER_ONLY)
5450
5451#define MMUSUFFIX _mmu
5452
5453#define SHIFT 0
5454#include "softmmu_template.h"
5455
5456#define SHIFT 1
5457#include "softmmu_template.h"
5458
5459#define SHIFT 2
5460#include "softmmu_template.h"
5461
5462#define SHIFT 3
5463#include "softmmu_template.h"
5464
5465#endif
5466
5467#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5468/* This code assumes real physical address always fit into host CPU reg,
5469 which is wrong in general, but true for our current use cases. */
5470RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5471{
5472 return remR3PhysReadS8(addr);
5473}
5474RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5475{
5476 return remR3PhysReadU8(addr);
5477}
5478void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5479{
5480 remR3PhysWriteU8(addr, val);
5481}
5482RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5483{
5484 return remR3PhysReadS16(addr);
5485}
5486RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5487{
5488 return remR3PhysReadU16(addr);
5489}
5490void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5491{
5492 remR3PhysWriteU16(addr, val);
5493}
5494RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5495{
5496 return remR3PhysReadS32(addr);
5497}
5498RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5499{
5500 return remR3PhysReadU32(addr);
5501}
5502void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5503{
5504 remR3PhysWriteU32(addr, val);
5505}
5506uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5507{
5508 return remR3PhysReadU64(addr);
5509}
5510void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5511{
5512 remR3PhysWriteU64(addr, val);
5513}
5514#endif
5515
5516/* try to fill the TLB and return an exception if error. If retaddr is
5517 NULL, it means that the function was called in C code (i.e. not
5518 from generated code or from helper.c) */
5519/* XXX: fix it to restore all registers */
5520void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5521{
5522 TranslationBlock *tb;
5523 int ret;
5524 unsigned long pc;
5525 CPUX86State *saved_env;
5526
5527 /* XXX: hack to restore env in all cases, even if not called from
5528 generated code */
5529 saved_env = env;
5530 env = cpu_single_env;
5531
5532 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5533 if (ret) {
5534 if (retaddr) {
5535 /* now we have a real cpu fault */
5536 pc = (unsigned long)retaddr;
5537 tb = tb_find_pc(pc);
5538 if (tb) {
5539 /* the PC is inside the translated code. It means that we have
5540 a virtual CPU fault */
5541 cpu_restore_state(tb, env, pc, NULL);
5542 }
5543 }
5544 raise_exception_err(env->exception_index, env->error_code);
5545 }
5546 env = saved_env;
5547}
5548
5549#ifdef VBOX
5550
5551/**
5552 * Correctly computes the eflags.
5553 * @returns eflags.
5554 * @param env1 CPU environment.
5555 */
5556uint32_t raw_compute_eflags(CPUX86State *env1)
5557{
5558 CPUX86State *savedenv = env;
5559 uint32_t efl;
5560 env = env1;
5561 efl = compute_eflags();
5562 env = savedenv;
5563 return efl;
5564}
5565
5566/**
5567 * Reads byte from virtual address in guest memory area.
5568 * XXX: is it working for any addresses? swapped out pages?
5569 * @returns read data byte.
5570 * @param env1 CPU environment.
5571 * @param pvAddr GC Virtual address.
5572 */
5573uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5574{
5575 CPUX86State *savedenv = env;
5576 uint8_t u8;
5577 env = env1;
5578 u8 = ldub_kernel(addr);
5579 env = savedenv;
5580 return u8;
5581}
5582
5583/**
5584 * Reads byte from virtual address in guest memory area.
5585 * XXX: is it working for any addresses? swapped out pages?
5586 * @returns read data byte.
5587 * @param env1 CPU environment.
5588 * @param pvAddr GC Virtual address.
5589 */
5590uint16_t read_word(CPUX86State *env1, target_ulong addr)
5591{
5592 CPUX86State *savedenv = env;
5593 uint16_t u16;
5594 env = env1;
5595 u16 = lduw_kernel(addr);
5596 env = savedenv;
5597 return u16;
5598}
5599
5600/**
5601 * Reads byte from virtual address in guest memory area.
5602 * XXX: is it working for any addresses? swapped out pages?
5603 * @returns read data byte.
5604 * @param env1 CPU environment.
5605 * @param pvAddr GC Virtual address.
5606 */
5607uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5608{
5609 CPUX86State *savedenv = env;
5610 uint32_t u32;
5611 env = env1;
5612 u32 = ldl_kernel(addr);
5613 env = savedenv;
5614 return u32;
5615}
5616
5617/**
5618 * Writes byte to virtual address in guest memory area.
5619 * XXX: is it working for any addresses? swapped out pages?
5620 * @returns read data byte.
5621 * @param env1 CPU environment.
5622 * @param pvAddr GC Virtual address.
5623 * @param val byte value
5624 */
5625void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5626{
5627 CPUX86State *savedenv = env;
5628 env = env1;
5629 stb(addr, val);
5630 env = savedenv;
5631}
5632
5633void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5634{
5635 CPUX86State *savedenv = env;
5636 env = env1;
5637 stw(addr, val);
5638 env = savedenv;
5639}
5640
5641void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5642{
5643 CPUX86State *savedenv = env;
5644 env = env1;
5645 stl(addr, val);
5646 env = savedenv;
5647}
5648
5649/**
5650 * Correctly loads selector into segment register with updating internal
5651 * qemu data/caches.
5652 * @param env1 CPU environment.
5653 * @param seg_reg Segment register.
5654 * @param selector Selector to load.
5655 */
5656void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5657{
5658 CPUX86State *savedenv = env;
5659#ifdef FORCE_SEGMENT_SYNC
5660 jmp_buf old_buf;
5661#endif
5662
5663 env = env1;
5664
5665 if ( env->eflags & X86_EFL_VM
5666 || !(env->cr[0] & X86_CR0_PE))
5667 {
5668 load_seg_vm(seg_reg, selector);
5669
5670 env = savedenv;
5671
5672 /* Successful sync. */
5673 env1->segs[seg_reg].newselector = 0;
5674 }
5675 else
5676 {
5677 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5678 time critical - let's not do that */
5679#ifdef FORCE_SEGMENT_SYNC
5680 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5681#endif
5682 if (setjmp(env1->jmp_env) == 0)
5683 {
5684 if (seg_reg == R_CS)
5685 {
5686 uint32_t e1, e2;
5687 e1 = e2 = 0;
5688 load_segment(&e1, &e2, selector);
5689 cpu_x86_load_seg_cache(env, R_CS, selector,
5690 get_seg_base(e1, e2),
5691 get_seg_limit(e1, e2),
5692 e2);
5693 }
5694 else
5695 helper_load_seg(seg_reg, selector);
5696 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5697 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5698
5699 env = savedenv;
5700
5701 /* Successful sync. */
5702 env1->segs[seg_reg].newselector = 0;
5703 }
5704 else
5705 {
5706 env = savedenv;
5707
5708 /* Postpone sync until the guest uses the selector. */
5709 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5710 env1->segs[seg_reg].newselector = selector;
5711 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5712 env1->exception_index = -1;
5713 env1->error_code = 0;
5714 env1->old_exception = -1;
5715 }
5716#ifdef FORCE_SEGMENT_SYNC
5717 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5718#endif
5719 }
5720
5721}
5722
5723DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5724{
5725 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5726}
5727
5728
5729int emulate_single_instr(CPUX86State *env1)
5730{
5731 TranslationBlock *tb;
5732 TranslationBlock *current;
5733 int flags;
5734 uint8_t *tc_ptr;
5735 target_ulong old_eip;
5736
5737 /* ensures env is loaded! */
5738 CPUX86State *savedenv = env;
5739 env = env1;
5740
5741 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5742
5743 current = env->current_tb;
5744 env->current_tb = NULL;
5745 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5746
5747 /*
5748 * Translate only one instruction.
5749 */
5750 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5751 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5752 env->segs[R_CS].base, flags, 0);
5753
5754 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5755
5756
5757 /* tb_link_phys: */
5758 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5759 tb->jmp_next[0] = NULL;
5760 tb->jmp_next[1] = NULL;
5761 Assert(tb->jmp_next[0] == NULL);
5762 Assert(tb->jmp_next[1] == NULL);
5763 if (tb->tb_next_offset[0] != 0xffff)
5764 tb_reset_jump(tb, 0);
5765 if (tb->tb_next_offset[1] != 0xffff)
5766 tb_reset_jump(tb, 1);
5767
5768 /*
5769 * Execute it using emulation
5770 */
5771 old_eip = env->eip;
5772 env->current_tb = tb;
5773
5774 /*
5775 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5776 * perhaps not a very safe hack
5777 */
5778 while(old_eip == env->eip)
5779 {
5780 tc_ptr = tb->tc_ptr;
5781
5782#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5783 int fake_ret;
5784 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5785#else
5786 tcg_qemu_tb_exec(tc_ptr);
5787#endif
5788 /*
5789 * Exit once we detect an external interrupt and interrupts are enabled
5790 */
5791 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5792 ( (env->eflags & IF_MASK) &&
5793 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5794 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5795 {
5796 break;
5797 }
5798 }
5799 env->current_tb = current;
5800
5801 tb_phys_invalidate(tb, -1);
5802 tb_free(tb);
5803/*
5804 Assert(tb->tb_next_offset[0] == 0xffff);
5805 Assert(tb->tb_next_offset[1] == 0xffff);
5806 Assert(tb->tb_next[0] == 0xffff);
5807 Assert(tb->tb_next[1] == 0xffff);
5808 Assert(tb->jmp_next[0] == NULL);
5809 Assert(tb->jmp_next[1] == NULL);
5810 Assert(tb->jmp_first == NULL); */
5811
5812 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5813
5814 /*
5815 * Execute the next instruction when we encounter instruction fusing.
5816 */
5817 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5818 {
5819 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5820 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5821 emulate_single_instr(env);
5822 }
5823
5824 env = savedenv;
5825 return 0;
5826}
5827
5828/**
5829 * Correctly loads a new ldtr selector.
5830 *
5831 * @param env1 CPU environment.
5832 * @param selector Selector to load.
5833 */
5834void sync_ldtr(CPUX86State *env1, int selector)
5835{
5836 CPUX86State *saved_env = env;
5837 if (setjmp(env1->jmp_env) == 0)
5838 {
5839 env = env1;
5840 helper_lldt(selector);
5841 env = saved_env;
5842 }
5843 else
5844 {
5845 env = saved_env;
5846#ifdef VBOX_STRICT
5847 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5848#endif
5849 }
5850}
5851
5852int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5853 uint32_t *esp_ptr, int dpl)
5854{
5855 int type, index, shift;
5856
5857 CPUX86State *savedenv = env;
5858 env = env1;
5859
5860 if (!(env->tr.flags & DESC_P_MASK))
5861 cpu_abort(env, "invalid tss");
5862 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5863 if ((type & 7) != 1)
5864 cpu_abort(env, "invalid tss type %d", type);
5865 shift = type >> 3;
5866 index = (dpl * 4 + 2) << shift;
5867 if (index + (4 << shift) - 1 > env->tr.limit)
5868 {
5869 env = savedenv;
5870 return 0;
5871 }
5872 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5873
5874 if (shift == 0) {
5875 *esp_ptr = lduw_kernel(env->tr.base + index);
5876 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5877 } else {
5878 *esp_ptr = ldl_kernel(env->tr.base + index);
5879 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5880 }
5881
5882 env = savedenv;
5883 return 1;
5884}
5885
5886//*****************************************************************************
5887// Needs to be at the bottom of the file (overriding macros)
5888
5889static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5890{
5891 return *(CPU86_LDouble *)ptr;
5892}
5893
5894static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5895{
5896 *(CPU86_LDouble *)ptr = f;
5897}
5898
5899#undef stw
5900#undef stl
5901#undef stq
5902#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5903#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5904#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5905
5906//*****************************************************************************
5907void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5908{
5909 int fpus, fptag, i, nb_xmm_regs;
5910 CPU86_LDouble tmp;
5911 uint8_t *addr;
5912 int data64 = !!(env->hflags & HF_LMA_MASK);
5913
5914 if (env->cpuid_features & CPUID_FXSR)
5915 {
5916 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5917 fptag = 0;
5918 for(i = 0; i < 8; i++) {
5919 fptag |= (env->fptags[i] << i);
5920 }
5921 stw(ptr, env->fpuc);
5922 stw(ptr + 2, fpus);
5923 stw(ptr + 4, fptag ^ 0xff);
5924
5925 addr = ptr + 0x20;
5926 for(i = 0;i < 8; i++) {
5927 tmp = ST(i);
5928 helper_fstt_raw(tmp, addr);
5929 addr += 16;
5930 }
5931
5932 if (env->cr[4] & CR4_OSFXSR_MASK) {
5933 /* XXX: finish it */
5934 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5935 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5936 nb_xmm_regs = 8 << data64;
5937 addr = ptr + 0xa0;
5938 for(i = 0; i < nb_xmm_regs; i++) {
5939#if __GNUC__ < 4
5940 stq(addr, env->xmm_regs[i].XMM_Q(0));
5941 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5942#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5943 stl(addr, env->xmm_regs[i].XMM_L(0));
5944 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5945 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5946 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5947#endif
5948 addr += 16;
5949 }
5950 }
5951 }
5952 else
5953 {
5954 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5955 int fptag;
5956
5957 fp->FCW = env->fpuc;
5958 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5959 fptag = 0;
5960 for (i=7; i>=0; i--) {
5961 fptag <<= 2;
5962 if (env->fptags[i]) {
5963 fptag |= 3;
5964 } else {
5965 /* the FPU automatically computes it */
5966 }
5967 }
5968 fp->FTW = fptag;
5969
5970 for(i = 0;i < 8; i++) {
5971 tmp = ST(i);
5972 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
5973 }
5974 }
5975}
5976
5977//*****************************************************************************
5978#undef lduw
5979#undef ldl
5980#undef ldq
5981#define lduw(a) *(uint16_t *)(a)
5982#define ldl(a) *(uint32_t *)(a)
5983#define ldq(a) *(uint64_t *)(a)
5984//*****************************************************************************
5985void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5986{
5987 int i, fpus, fptag, nb_xmm_regs;
5988 CPU86_LDouble tmp;
5989 uint8_t *addr;
5990 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
5991
5992 if (env->cpuid_features & CPUID_FXSR)
5993 {
5994 env->fpuc = lduw(ptr);
5995 fpus = lduw(ptr + 2);
5996 fptag = lduw(ptr + 4);
5997 env->fpstt = (fpus >> 11) & 7;
5998 env->fpus = fpus & ~0x3800;
5999 fptag ^= 0xff;
6000 for(i = 0;i < 8; i++) {
6001 env->fptags[i] = ((fptag >> i) & 1);
6002 }
6003
6004 addr = ptr + 0x20;
6005 for(i = 0;i < 8; i++) {
6006 tmp = helper_fldt_raw(addr);
6007 ST(i) = tmp;
6008 addr += 16;
6009 }
6010
6011 if (env->cr[4] & CR4_OSFXSR_MASK) {
6012 /* XXX: finish it, endianness */
6013 env->mxcsr = ldl(ptr + 0x18);
6014 //ldl(ptr + 0x1c);
6015 nb_xmm_regs = 8 << data64;
6016 addr = ptr + 0xa0;
6017 for(i = 0; i < nb_xmm_regs; i++) {
6018#if HC_ARCH_BITS == 32
6019 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6020 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6021 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6022 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6023 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6024#else
6025 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6026 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6027#endif
6028 addr += 16;
6029 }
6030 }
6031 }
6032 else
6033 {
6034 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6035 int fptag, j;
6036
6037 env->fpuc = fp->FCW;
6038 env->fpstt = (fp->FSW >> 11) & 7;
6039 env->fpus = fp->FSW & ~0x3800;
6040 fptag = fp->FTW;
6041 for(i = 0;i < 8; i++) {
6042 env->fptags[i] = ((fptag & 3) == 3);
6043 fptag >>= 2;
6044 }
6045 j = env->fpstt;
6046 for(i = 0;i < 8; i++) {
6047 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6048 ST(i) = tmp;
6049 }
6050 }
6051}
6052//*****************************************************************************
6053//*****************************************************************************
6054
6055#endif /* VBOX */
6056
6057/* Secure Virtual Machine helpers */
6058
6059#if defined(CONFIG_USER_ONLY)
6060
6061void helper_vmrun(int aflag, int next_eip_addend)
6062{
6063}
6064void helper_vmmcall(void)
6065{
6066}
6067void helper_vmload(int aflag)
6068{
6069}
6070void helper_vmsave(int aflag)
6071{
6072}
6073void helper_stgi(void)
6074{
6075}
6076void helper_clgi(void)
6077{
6078}
6079void helper_skinit(void)
6080{
6081}
6082void helper_invlpga(int aflag)
6083{
6084}
6085void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6086{
6087}
6088void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6089{
6090}
6091
6092void helper_svm_check_io(uint32_t port, uint32_t param,
6093 uint32_t next_eip_addend)
6094{
6095}
6096#else
6097
6098static inline void svm_save_seg(target_phys_addr_t addr,
6099 const SegmentCache *sc)
6100{
6101 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6102 sc->selector);
6103 stq_phys(addr + offsetof(struct vmcb_seg, base),
6104 sc->base);
6105 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6106 sc->limit);
6107 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6108 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6109}
6110
6111static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6112{
6113 unsigned int flags;
6114
6115 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6116 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6117 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6118 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6119 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6120}
6121
6122static inline void svm_load_seg_cache(target_phys_addr_t addr,
6123 CPUState *env, int seg_reg)
6124{
6125 SegmentCache sc1, *sc = &sc1;
6126 svm_load_seg(addr, sc);
6127 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6128 sc->base, sc->limit, sc->flags);
6129}
6130
6131void helper_vmrun(int aflag, int next_eip_addend)
6132{
6133 target_ulong addr;
6134 uint32_t event_inj;
6135 uint32_t int_ctl;
6136
6137 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6138
6139 if (aflag == 2)
6140 addr = EAX;
6141 else
6142 addr = (uint32_t)EAX;
6143
6144 if (loglevel & CPU_LOG_TB_IN_ASM)
6145 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6146
6147 env->vm_vmcb = addr;
6148
6149 /* save the current CPU state in the hsave page */
6150 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6151 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6152
6153 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6154 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6155
6156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6158 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6160 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6161 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6162
6163 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6164 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6165
6166 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6167 &env->segs[R_ES]);
6168 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6169 &env->segs[R_CS]);
6170 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6171 &env->segs[R_SS]);
6172 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6173 &env->segs[R_DS]);
6174
6175 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6176 EIP + next_eip_addend);
6177 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6178 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6179
6180 /* load the interception bitmaps so we do not need to access the
6181 vmcb in svm mode */
6182 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6183 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6184 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6185 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6186 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6187 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6188
6189 /* enable intercepts */
6190 env->hflags |= HF_SVMI_MASK;
6191
6192 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6193
6194 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6195 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6196
6197 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6198 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6199
6200 /* clear exit_info_2 so we behave like the real hardware */
6201 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6202
6203 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6204 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6205 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6206 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6207 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6208 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6209 if (int_ctl & V_INTR_MASKING_MASK) {
6210 env->v_tpr = int_ctl & V_TPR_MASK;
6211 env->hflags2 |= HF2_VINTR_MASK;
6212 if (env->eflags & IF_MASK)
6213 env->hflags2 |= HF2_HIF_MASK;
6214 }
6215
6216 cpu_load_efer(env,
6217 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6218 env->eflags = 0;
6219 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6220 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6221 CC_OP = CC_OP_EFLAGS;
6222
6223 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6224 env, R_ES);
6225 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6226 env, R_CS);
6227 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6228 env, R_SS);
6229 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6230 env, R_DS);
6231
6232 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6233 env->eip = EIP;
6234 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6235 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6236 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6237 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6238 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6239
6240 /* FIXME: guest state consistency checks */
6241
6242 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6243 case TLB_CONTROL_DO_NOTHING:
6244 break;
6245 case TLB_CONTROL_FLUSH_ALL_ASID:
6246 /* FIXME: this is not 100% correct but should work for now */
6247 tlb_flush(env, 1);
6248 break;
6249 }
6250
6251 env->hflags2 |= HF2_GIF_MASK;
6252
6253 if (int_ctl & V_IRQ_MASK) {
6254 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6255 }
6256
6257 /* maybe we need to inject an event */
6258 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6259 if (event_inj & SVM_EVTINJ_VALID) {
6260 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6261 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6262 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6263 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6264
6265 if (loglevel & CPU_LOG_TB_IN_ASM)
6266 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6267 /* FIXME: need to implement valid_err */
6268 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6269 case SVM_EVTINJ_TYPE_INTR:
6270 env->exception_index = vector;
6271 env->error_code = event_inj_err;
6272 env->exception_is_int = 0;
6273 env->exception_next_eip = -1;
6274 if (loglevel & CPU_LOG_TB_IN_ASM)
6275 fprintf(logfile, "INTR");
6276 /* XXX: is it always correct ? */
6277 do_interrupt(vector, 0, 0, 0, 1);
6278 break;
6279 case SVM_EVTINJ_TYPE_NMI:
6280 env->exception_index = EXCP02_NMI;
6281 env->error_code = event_inj_err;
6282 env->exception_is_int = 0;
6283 env->exception_next_eip = EIP;
6284 if (loglevel & CPU_LOG_TB_IN_ASM)
6285 fprintf(logfile, "NMI");
6286 cpu_loop_exit();
6287 break;
6288 case SVM_EVTINJ_TYPE_EXEPT:
6289 env->exception_index = vector;
6290 env->error_code = event_inj_err;
6291 env->exception_is_int = 0;
6292 env->exception_next_eip = -1;
6293 if (loglevel & CPU_LOG_TB_IN_ASM)
6294 fprintf(logfile, "EXEPT");
6295 cpu_loop_exit();
6296 break;
6297 case SVM_EVTINJ_TYPE_SOFT:
6298 env->exception_index = vector;
6299 env->error_code = event_inj_err;
6300 env->exception_is_int = 1;
6301 env->exception_next_eip = EIP;
6302 if (loglevel & CPU_LOG_TB_IN_ASM)
6303 fprintf(logfile, "SOFT");
6304 cpu_loop_exit();
6305 break;
6306 }
6307 if (loglevel & CPU_LOG_TB_IN_ASM)
6308 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6309 }
6310}
6311
6312void helper_vmmcall(void)
6313{
6314 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6315 raise_exception(EXCP06_ILLOP);
6316}
6317
6318void helper_vmload(int aflag)
6319{
6320 target_ulong addr;
6321 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6322
6323 if (aflag == 2)
6324 addr = EAX;
6325 else
6326 addr = (uint32_t)EAX;
6327
6328 if (loglevel & CPU_LOG_TB_IN_ASM)
6329 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6330 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6331 env->segs[R_FS].base);
6332
6333 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6334 env, R_FS);
6335 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6336 env, R_GS);
6337 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6338 &env->tr);
6339 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6340 &env->ldt);
6341
6342#ifdef TARGET_X86_64
6343 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6344 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6345 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6346 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6347#endif
6348 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6349 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6350 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6351 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6352}
6353
6354void helper_vmsave(int aflag)
6355{
6356 target_ulong addr;
6357 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6358
6359 if (aflag == 2)
6360 addr = EAX;
6361 else
6362 addr = (uint32_t)EAX;
6363
6364 if (loglevel & CPU_LOG_TB_IN_ASM)
6365 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6366 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6367 env->segs[R_FS].base);
6368
6369 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6370 &env->segs[R_FS]);
6371 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6372 &env->segs[R_GS]);
6373 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6374 &env->tr);
6375 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6376 &env->ldt);
6377
6378#ifdef TARGET_X86_64
6379 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6380 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6381 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6382 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6383#endif
6384 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6385 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6386 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6387 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6388}
6389
6390void helper_stgi(void)
6391{
6392 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6393 env->hflags2 |= HF2_GIF_MASK;
6394}
6395
6396void helper_clgi(void)
6397{
6398 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6399 env->hflags2 &= ~HF2_GIF_MASK;
6400}
6401
6402void helper_skinit(void)
6403{
6404 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6405 /* XXX: not implemented */
6406 raise_exception(EXCP06_ILLOP);
6407}
6408
6409void helper_invlpga(int aflag)
6410{
6411 target_ulong addr;
6412 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6413
6414 if (aflag == 2)
6415 addr = EAX;
6416 else
6417 addr = (uint32_t)EAX;
6418
6419 /* XXX: could use the ASID to see if it is needed to do the
6420 flush */
6421 tlb_flush_page(env, addr);
6422}
6423
6424void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6425{
6426 if (likely(!(env->hflags & HF_SVMI_MASK)))
6427 return;
6428#ifndef VBOX
6429 switch(type) {
6430#ifndef VBOX
6431 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6432#else
6433 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6434 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6435 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6436#endif
6437 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6438 helper_vmexit(type, param);
6439 }
6440 break;
6441#ifndef VBOX
6442 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6443#else
6444 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6445 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6446 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6447#endif
6448 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6449 helper_vmexit(type, param);
6450 }
6451 break;
6452 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6453 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6454 helper_vmexit(type, param);
6455 }
6456 break;
6457 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6458 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6459 helper_vmexit(type, param);
6460 }
6461 break;
6462 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6463 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6464 helper_vmexit(type, param);
6465 }
6466 break;
6467 case SVM_EXIT_MSR:
6468 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6469 /* FIXME: this should be read in at vmrun (faster this way?) */
6470 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6471 uint32_t t0, t1;
6472 switch((uint32_t)ECX) {
6473 case 0 ... 0x1fff:
6474 t0 = (ECX * 2) % 8;
6475 t1 = ECX / 8;
6476 break;
6477 case 0xc0000000 ... 0xc0001fff:
6478 t0 = (8192 + ECX - 0xc0000000) * 2;
6479 t1 = (t0 / 8);
6480 t0 %= 8;
6481 break;
6482 case 0xc0010000 ... 0xc0011fff:
6483 t0 = (16384 + ECX - 0xc0010000) * 2;
6484 t1 = (t0 / 8);
6485 t0 %= 8;
6486 break;
6487 default:
6488 helper_vmexit(type, param);
6489 t0 = 0;
6490 t1 = 0;
6491 break;
6492 }
6493 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6494 helper_vmexit(type, param);
6495 }
6496 break;
6497 default:
6498 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6499 helper_vmexit(type, param);
6500 }
6501 break;
6502 }
6503#else
6504 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6505#endif
6506}
6507
6508void helper_svm_check_io(uint32_t port, uint32_t param,
6509 uint32_t next_eip_addend)
6510{
6511 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6512 /* FIXME: this should be read in at vmrun (faster this way?) */
6513 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6514 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6515 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6516 /* next EIP */
6517 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6518 env->eip + next_eip_addend);
6519 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6520 }
6521 }
6522}
6523
6524/* Note: currently only 32 bits of exit_code are used */
6525void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6526{
6527 uint32_t int_ctl;
6528
6529 if (loglevel & CPU_LOG_TB_IN_ASM)
6530 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6531 exit_code, exit_info_1,
6532 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6533 EIP);
6534
6535 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6536 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6537 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6538 } else {
6539 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6540 }
6541
6542 /* Save the VM state in the vmcb */
6543 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6544 &env->segs[R_ES]);
6545 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6546 &env->segs[R_CS]);
6547 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6548 &env->segs[R_SS]);
6549 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6550 &env->segs[R_DS]);
6551
6552 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6553 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6554
6555 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6556 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6557
6558 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6559 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6560 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6561 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6562 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6563
6564 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6565 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6566 int_ctl |= env->v_tpr & V_TPR_MASK;
6567 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6568 int_ctl |= V_IRQ_MASK;
6569 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6570
6571 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6572 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6573 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6574 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6575 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6576 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6577 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6578
6579 /* Reload the host state from vm_hsave */
6580 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6581 env->hflags &= ~HF_SVMI_MASK;
6582 env->intercept = 0;
6583 env->intercept_exceptions = 0;
6584 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6585 env->tsc_offset = 0;
6586
6587 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6588 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6589
6590 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6591 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6592
6593 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6594 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6595 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6596 /* we need to set the efer after the crs so the hidden flags get
6597 set properly */
6598 cpu_load_efer(env,
6599 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6600 env->eflags = 0;
6601 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6602 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6603 CC_OP = CC_OP_EFLAGS;
6604
6605 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6606 env, R_ES);
6607 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6608 env, R_CS);
6609 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6610 env, R_SS);
6611 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6612 env, R_DS);
6613
6614 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6615 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6616 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6617
6618 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6619 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6620
6621 /* other setups */
6622 cpu_x86_set_cpl(env, 0);
6623 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6624 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6625
6626 env->hflags2 &= ~HF2_GIF_MASK;
6627 /* FIXME: Resets the current ASID register to zero (host ASID). */
6628
6629 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6630
6631 /* Clears the TSC_OFFSET inside the processor. */
6632
6633 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6634 from the page table indicated the host's CR3. If the PDPEs contain
6635 illegal state, the processor causes a shutdown. */
6636
6637 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6638 env->cr[0] |= CR0_PE_MASK;
6639 env->eflags &= ~VM_MASK;
6640
6641 /* Disables all breakpoints in the host DR7 register. */
6642
6643 /* Checks the reloaded host state for consistency. */
6644
6645 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6646 host's code segment or non-canonical (in the case of long mode), a
6647 #GP fault is delivered inside the host.) */
6648
6649 /* remove any pending exception */
6650 env->exception_index = -1;
6651 env->error_code = 0;
6652 env->old_exception = -1;
6653
6654 cpu_loop_exit();
6655}
6656
6657#endif
6658
6659/* MMX/SSE */
6660/* XXX: optimize by storing fptt and fptags in the static cpu state */
6661void helper_enter_mmx(void)
6662{
6663 env->fpstt = 0;
6664 *(uint32_t *)(env->fptags) = 0;
6665 *(uint32_t *)(env->fptags + 4) = 0;
6666}
6667
6668void helper_emms(void)
6669{
6670 /* set to empty state */
6671 *(uint32_t *)(env->fptags) = 0x01010101;
6672 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6673}
6674
6675/* XXX: suppress */
6676void helper_movq(uint64_t *d, uint64_t *s)
6677{
6678 *d = *s;
6679}
6680
6681#define SHIFT 0
6682#include "ops_sse.h"
6683
6684#define SHIFT 1
6685#include "ops_sse.h"
6686
6687#define SHIFT 0
6688#include "helper_template.h"
6689#undef SHIFT
6690
6691#define SHIFT 1
6692#include "helper_template.h"
6693#undef SHIFT
6694
6695#define SHIFT 2
6696#include "helper_template.h"
6697#undef SHIFT
6698
6699#ifdef TARGET_X86_64
6700
6701#define SHIFT 3
6702#include "helper_template.h"
6703#undef SHIFT
6704
6705#endif
6706
6707/* bit operations */
6708target_ulong helper_bsf(target_ulong t0)
6709{
6710 int count;
6711 target_ulong res;
6712
6713 res = t0;
6714 count = 0;
6715 while ((res & 1) == 0) {
6716 count++;
6717 res >>= 1;
6718 }
6719 return count;
6720}
6721
6722target_ulong helper_bsr(target_ulong t0)
6723{
6724 int count;
6725 target_ulong res, mask;
6726
6727 res = t0;
6728 count = TARGET_LONG_BITS - 1;
6729 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6730 while ((res & mask) == 0) {
6731 count--;
6732 res <<= 1;
6733 }
6734 return count;
6735}
6736
6737
6738static int compute_all_eflags(void)
6739{
6740 return CC_SRC;
6741}
6742
6743static int compute_c_eflags(void)
6744{
6745 return CC_SRC & CC_C;
6746}
6747
6748#ifndef VBOX
6749CCTable cc_table[CC_OP_NB] = {
6750 [CC_OP_DYNAMIC] = { /* should never happen */ },
6751
6752 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6753
6754 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6755 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6756 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6757
6758 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6759 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6760 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6761
6762 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6763 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6764 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6765
6766 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6767 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6768 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6769
6770 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6771 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6772 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6773
6774 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6775 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6776 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6777
6778 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6779 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6780 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6781
6782 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6783 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6784 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6785
6786 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6787 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6788 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6789
6790 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6791 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6792 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6793
6794#ifdef TARGET_X86_64
6795 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6796
6797 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6798
6799 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6800
6801 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6802
6803 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6804
6805 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6806
6807 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6808
6809 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6810
6811 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6812
6813 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6814#endif
6815};
6816#else /* VBOX */
6817/* Sync carefully with cpu.h */
6818CCTable cc_table[CC_OP_NB] = {
6819 /* CC_OP_DYNAMIC */ { 0, 0 },
6820
6821 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6822
6823 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6824 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6825 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6826#ifdef TARGET_X86_64
6827 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6828#else
6829 /* CC_OP_MULQ */ { 0, 0 },
6830#endif
6831
6832 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6833 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6834 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6835#ifdef TARGET_X86_64
6836 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6837#else
6838 /* CC_OP_ADDQ */ { 0, 0 },
6839#endif
6840
6841 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6842 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6843 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6844#ifdef TARGET_X86_64
6845 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6846#else
6847 /* CC_OP_ADCQ */ { 0, 0 },
6848#endif
6849
6850 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6851 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6852 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6853#ifdef TARGET_X86_64
6854 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6855#else
6856 /* CC_OP_SUBQ */ { 0, 0 },
6857#endif
6858
6859 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6860 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6861 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6862#ifdef TARGET_X86_64
6863 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6864#else
6865 /* CC_OP_SBBQ */ { 0, 0 },
6866#endif
6867
6868 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6869 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6870 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6871#ifdef TARGET_X86_64
6872 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6873#else
6874 /* CC_OP_LOGICQ */ { 0, 0 },
6875#endif
6876
6877 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6878 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6879 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6880#ifdef TARGET_X86_64
6881 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6882#else
6883 /* CC_OP_INCQ */ { 0, 0 },
6884#endif
6885
6886 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6887 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6888 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6889#ifdef TARGET_X86_64
6890 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6891#else
6892 /* CC_OP_DECQ */ { 0, 0 },
6893#endif
6894
6895 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6896 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6897 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6898#ifdef TARGET_X86_64
6899 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6900#else
6901 /* CC_OP_SHLQ */ { 0, 0 },
6902#endif
6903
6904 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6905 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6906 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6907#ifdef TARGET_X86_64
6908 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6909#else
6910 /* CC_OP_SARQ */ { 0, 0 },
6911#endif
6912};
6913#endif /* VBOX */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette