VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 36171

最後變更 在這個檔案從36171是 36171,由 vboxsync 提交於 14 年 前

rem: Merged in changes from the branches/stable_0_10 (r7249).

  • 屬性 svn:eol-style 設為 native
檔案大小: 190.8 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "exec-all.h"
33#include "host-utils.h"
34
35#ifdef VBOX
36# include "qemu-common.h"
37# include <math.h>
38# include "tcg.h"
39#endif /* VBOX */
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205#endif /* VBOX */
206
207/* return non zero if error */
208static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
209 int selector)
210{
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
214
215#ifdef VBOX
216 /* Trying to load a selector with CPL=1? */
217 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
218 {
219 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
220 selector = selector & 0xfffc;
221 }
222#endif /* VBOX */
223
224 if (selector & 0x4)
225 dt = &env->ldt;
226 else
227 dt = &env->gdt;
228 index = selector & ~7;
229 if ((index + 7) > dt->limit)
230 return -1;
231 ptr = dt->base + index;
232 *e1_ptr = ldl_kernel(ptr);
233 *e2_ptr = ldl_kernel(ptr + 4);
234 return 0;
235}
236
237static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
238{
239 unsigned int limit;
240 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
241 if (e2 & DESC_G_MASK)
242 limit = (limit << 12) | 0xfff;
243 return limit;
244}
245
246static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
247{
248 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
249}
250
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256}
257
258/* init the segment cache in vm86 mode. */
259static inline void load_seg_vm(int seg, int selector)
260{
261 selector &= 0xffff;
262#ifdef VBOX
263 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
264 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
265 flags |= (3 << DESC_DPL_SHIFT);
266
267 cpu_x86_load_seg_cache(env, seg, selector,
268 (selector << 4), 0xffff, flags);
269#else /* VBOX */
270 cpu_x86_load_seg_cache(env, seg, selector,
271 (selector << 4), 0xffff, 0);
272#endif /* VBOX */
273}
274
275static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
276 uint32_t *esp_ptr, int dpl)
277{
278#ifndef VBOX
279 int type, index, shift;
280#else
281 unsigned int type, index, shift;
282#endif
283
284#if 0
285 {
286 int i;
287 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
288 for(i=0;i<env->tr.limit;i++) {
289 printf("%02x ", env->tr.base[i]);
290 if ((i & 7) == 7) printf("\n");
291 }
292 printf("\n");
293 }
294#endif
295
296 if (!(env->tr.flags & DESC_P_MASK))
297 cpu_abort(env, "invalid tss");
298 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 if ((type & 7) != 1)
300 cpu_abort(env, "invalid tss type");
301 shift = type >> 3;
302 index = (dpl * 4 + 2) << shift;
303 if (index + (4 << shift) - 1 > env->tr.limit)
304 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
305 if (shift == 0) {
306 *esp_ptr = lduw_kernel(env->tr.base + index);
307 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
308 } else {
309 *esp_ptr = ldl_kernel(env->tr.base + index);
310 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
311 }
312}
313
314/* XXX: merge with load_seg() */
315static void tss_load_seg(int seg_reg, int selector)
316{
317 uint32_t e1, e2;
318 int rpl, dpl, cpl;
319
320#ifdef VBOX
321 e1 = e2 = 0; /* gcc warning? */
322 cpl = env->hflags & HF_CPL_MASK;
323 /* Trying to load a selector with CPL=1? */
324 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
325 {
326 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
327 selector = selector & 0xfffc;
328 }
329#endif /* VBOX */
330
331 if ((selector & 0xfffc) != 0) {
332 if (load_segment(&e1, &e2, selector) != 0)
333 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
334 if (!(e2 & DESC_S_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 rpl = selector & 3;
337 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
338 cpl = env->hflags & HF_CPL_MASK;
339 if (seg_reg == R_CS) {
340 if (!(e2 & DESC_CS_MASK))
341 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342 /* XXX: is it correct ? */
343 if (dpl != rpl)
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if ((e2 & DESC_C_MASK) && dpl > rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else if (seg_reg == R_SS) {
348 /* SS must be writable data */
349 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 if (dpl != cpl || dpl != rpl)
352 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353 } else {
354 /* not readable code */
355 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 /* if data or non conforming code, checks the rights */
358 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
359 if (dpl < cpl || dpl < rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 }
362 }
363 if (!(e2 & DESC_P_MASK))
364 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
365 cpu_x86_load_seg_cache(env, seg_reg, selector,
366 get_seg_base(e1, e2),
367 get_seg_limit(e1, e2),
368 e2);
369 } else {
370 if (seg_reg == R_SS || seg_reg == R_CS)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372#ifdef VBOX
373# if 0
374 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
375 cpu_x86_load_seg_cache(env, seg_reg, selector,
376 0, 0, 0);
377# endif
378#endif /* VBOX */
379 }
380}
381
382#define SWITCH_TSS_JMP 0
383#define SWITCH_TSS_IRET 1
384#define SWITCH_TSS_CALL 2
385
386/* XXX: restore CPU state in registers (PowerPC case) */
387static void switch_tss(int tss_selector,
388 uint32_t e1, uint32_t e2, int source,
389 uint32_t next_eip)
390{
391 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
392 target_ulong tss_base;
393 uint32_t new_regs[8], new_segs[6];
394 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
395 uint32_t old_eflags, eflags_mask;
396 SegmentCache *dt;
397#ifndef VBOX
398 int index;
399#else
400 unsigned int index;
401#endif
402 target_ulong ptr;
403
404 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
405 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
406
407 /* if task gate, we read the TSS segment and we load it */
408 if (type == 5) {
409 if (!(e2 & DESC_P_MASK))
410 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
411 tss_selector = e1 >> 16;
412 if (tss_selector & 4)
413 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
414 if (load_segment(&e1, &e2, tss_selector) != 0)
415 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
416 if (e2 & DESC_S_MASK)
417 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419 if ((type & 7) != 1)
420 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
421 }
422
423 if (!(e2 & DESC_P_MASK))
424 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
425
426 if (type & 8)
427 tss_limit_max = 103;
428 else
429 tss_limit_max = 43;
430 tss_limit = get_seg_limit(e1, e2);
431 tss_base = get_seg_base(e1, e2);
432 if ((tss_selector & 4) != 0 ||
433 tss_limit < tss_limit_max)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
436 if (old_type & 8)
437 old_tss_limit_max = 103;
438 else
439 old_tss_limit_max = 43;
440
441 /* read all the registers from the new TSS */
442 if (type & 8) {
443 /* 32 bit */
444 new_cr3 = ldl_kernel(tss_base + 0x1c);
445 new_eip = ldl_kernel(tss_base + 0x20);
446 new_eflags = ldl_kernel(tss_base + 0x24);
447 for(i = 0; i < 8; i++)
448 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
449 for(i = 0; i < 6; i++)
450 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
451 new_ldt = lduw_kernel(tss_base + 0x60);
452 new_trap = ldl_kernel(tss_base + 0x64);
453 } else {
454 /* 16 bit */
455 new_cr3 = 0;
456 new_eip = lduw_kernel(tss_base + 0x0e);
457 new_eflags = lduw_kernel(tss_base + 0x10);
458 for(i = 0; i < 8; i++)
459 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
460 for(i = 0; i < 4; i++)
461 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
462 new_ldt = lduw_kernel(tss_base + 0x2a);
463 new_segs[R_FS] = 0;
464 new_segs[R_GS] = 0;
465 new_trap = 0;
466 }
467
468 /* NOTE: we must avoid memory exceptions during the task switch,
469 so we make dummy accesses before */
470 /* XXX: it can still fail in some cases, so a bigger hack is
471 necessary to valid the TLB after having done the accesses */
472
473 v1 = ldub_kernel(env->tr.base);
474 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
475 stb_kernel(env->tr.base, v1);
476 stb_kernel(env->tr.base + old_tss_limit_max, v2);
477
478 /* clear busy bit (it is restartable) */
479 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
480 target_ulong ptr;
481 uint32_t e2;
482 ptr = env->gdt.base + (env->tr.selector & ~7);
483 e2 = ldl_kernel(ptr + 4);
484 e2 &= ~DESC_TSS_BUSY_MASK;
485 stl_kernel(ptr + 4, e2);
486 }
487 old_eflags = compute_eflags();
488 if (source == SWITCH_TSS_IRET)
489 old_eflags &= ~NT_MASK;
490
491 /* save the current state in the old TSS */
492 if (type & 8) {
493 /* 32 bit */
494 stl_kernel(env->tr.base + 0x20, next_eip);
495 stl_kernel(env->tr.base + 0x24, old_eflags);
496 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
497 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
498 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
499 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
500 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
501 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
502 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
503 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
504 for(i = 0; i < 6; i++)
505 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
506#ifdef VBOX
507 /* Must store the ldt as it gets reloaded and might have been changed. */
508 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
509#endif
510#if defined(VBOX) && defined(DEBUG)
511 printf("TSS 32 bits switch\n");
512 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
513#endif
514 } else {
515 /* 16 bit */
516 stw_kernel(env->tr.base + 0x0e, next_eip);
517 stw_kernel(env->tr.base + 0x10, old_eflags);
518 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
519 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
520 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
521 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
522 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
523 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
524 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
525 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
526 for(i = 0; i < 4; i++)
527 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
528#ifdef VBOX
529 /* Must store the ldt as it gets reloaded and might have been changed. */
530 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
531#endif
532 }
533
534 /* now if an exception occurs, it will occurs in the next task
535 context */
536
537 if (source == SWITCH_TSS_CALL) {
538 stw_kernel(tss_base, env->tr.selector);
539 new_eflags |= NT_MASK;
540 }
541
542 /* set busy bit */
543 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
544 target_ulong ptr;
545 uint32_t e2;
546 ptr = env->gdt.base + (tss_selector & ~7);
547 e2 = ldl_kernel(ptr + 4);
548 e2 |= DESC_TSS_BUSY_MASK;
549 stl_kernel(ptr + 4, e2);
550 }
551
552 /* set the new CPU state */
553 /* from this point, any exception which occurs can give problems */
554 env->cr[0] |= CR0_TS_MASK;
555 env->hflags |= HF_TS_MASK;
556 env->tr.selector = tss_selector;
557 env->tr.base = tss_base;
558 env->tr.limit = tss_limit;
559 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
560
561 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
562 cpu_x86_update_cr3(env, new_cr3);
563 }
564
565 /* load all registers without an exception, then reload them with
566 possible exception */
567 env->eip = new_eip;
568 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
569 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
570 if (!(type & 8))
571 eflags_mask &= 0xffff;
572 load_eflags(new_eflags, eflags_mask);
573 /* XXX: what to do in 16 bit case ? */
574 EAX = new_regs[0];
575 ECX = new_regs[1];
576 EDX = new_regs[2];
577 EBX = new_regs[3];
578 ESP = new_regs[4];
579 EBP = new_regs[5];
580 ESI = new_regs[6];
581 EDI = new_regs[7];
582 if (new_eflags & VM_MASK) {
583 for(i = 0; i < 6; i++)
584 load_seg_vm(i, new_segs[i]);
585 /* in vm86, CPL is always 3 */
586 cpu_x86_set_cpl(env, 3);
587 } else {
588 /* CPL is set the RPL of CS */
589 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
590 /* first just selectors as the rest may trigger exceptions */
591 for(i = 0; i < 6; i++)
592 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
593 }
594
595 env->ldt.selector = new_ldt & ~4;
596 env->ldt.base = 0;
597 env->ldt.limit = 0;
598 env->ldt.flags = 0;
599
600 /* load the LDT */
601 if (new_ldt & 4)
602 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
603
604 if ((new_ldt & 0xfffc) != 0) {
605 dt = &env->gdt;
606 index = new_ldt & ~7;
607 if ((index + 7) > dt->limit)
608 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
609 ptr = dt->base + index;
610 e1 = ldl_kernel(ptr);
611 e2 = ldl_kernel(ptr + 4);
612 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
613 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
614 if (!(e2 & DESC_P_MASK))
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616 load_seg_cache_raw_dt(&env->ldt, e1, e2);
617 }
618
619 /* load the segments */
620 if (!(new_eflags & VM_MASK)) {
621 tss_load_seg(R_CS, new_segs[R_CS]);
622 tss_load_seg(R_SS, new_segs[R_SS]);
623 tss_load_seg(R_ES, new_segs[R_ES]);
624 tss_load_seg(R_DS, new_segs[R_DS]);
625 tss_load_seg(R_FS, new_segs[R_FS]);
626 tss_load_seg(R_GS, new_segs[R_GS]);
627 }
628
629 /* check that EIP is in the CS segment limits */
630 if (new_eip > env->segs[R_CS].limit) {
631 /* XXX: different exception if CALL ? */
632 raise_exception_err(EXCP0D_GPF, 0);
633 }
634
635#ifndef CONFIG_USER_ONLY
636 /* reset local breakpoints */
637 if (env->dr[7] & 0x55) {
638 for (i = 0; i < 4; i++) {
639 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
640 hw_breakpoint_remove(env, i);
641 }
642 env->dr[7] &= ~0x55;
643 }
644#endif
645}
646
647/* check if Port I/O is allowed in TSS */
648static inline void check_io(int addr, int size)
649{
650#ifndef VBOX
651 int io_offset, val, mask;
652#else
653 int val, mask;
654 unsigned int io_offset;
655#endif /* VBOX */
656
657 /* TSS must be a valid 32 bit one */
658 if (!(env->tr.flags & DESC_P_MASK) ||
659 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
660 env->tr.limit < 103)
661 goto fail;
662 io_offset = lduw_kernel(env->tr.base + 0x66);
663 io_offset += (addr >> 3);
664 /* Note: the check needs two bytes */
665 if ((io_offset + 1) > env->tr.limit)
666 goto fail;
667 val = lduw_kernel(env->tr.base + io_offset);
668 val >>= (addr & 7);
669 mask = (1 << size) - 1;
670 /* all bits must be zero to allow the I/O */
671 if ((val & mask) != 0) {
672 fail:
673 raise_exception_err(EXCP0D_GPF, 0);
674 }
675}
676
677#ifdef VBOX
678/* Keep in sync with gen_check_external_event() */
679void helper_check_external_event()
680{
681 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
682 | CPU_INTERRUPT_EXTERNAL_TIMER
683 | CPU_INTERRUPT_EXTERNAL_DMA))
684 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
685 && (env->eflags & IF_MASK)
686 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
687 {
688 helper_external_event();
689 }
690
691}
692
693void helper_sync_seg(uint32_t reg)
694{
695 if (env->segs[reg].newselector)
696 sync_seg(env, reg, env->segs[reg].newselector);
697}
698#endif /* VBOX */
699
700void helper_check_iob(uint32_t t0)
701{
702 check_io(t0, 1);
703}
704
705void helper_check_iow(uint32_t t0)
706{
707 check_io(t0, 2);
708}
709
710void helper_check_iol(uint32_t t0)
711{
712 check_io(t0, 4);
713}
714
715void helper_outb(uint32_t port, uint32_t data)
716{
717 cpu_outb(env, port, data & 0xff);
718}
719
720target_ulong helper_inb(uint32_t port)
721{
722 return cpu_inb(env, port);
723}
724
725void helper_outw(uint32_t port, uint32_t data)
726{
727 cpu_outw(env, port, data & 0xffff);
728}
729
730target_ulong helper_inw(uint32_t port)
731{
732 return cpu_inw(env, port);
733}
734
735void helper_outl(uint32_t port, uint32_t data)
736{
737 cpu_outl(env, port, data);
738}
739
740target_ulong helper_inl(uint32_t port)
741{
742 return cpu_inl(env, port);
743}
744
745static inline unsigned int get_sp_mask(unsigned int e2)
746{
747 if (e2 & DESC_B_MASK)
748 return 0xffffffff;
749 else
750 return 0xffff;
751}
752
753#ifdef TARGET_X86_64
754#define SET_ESP(val, sp_mask)\
755do {\
756 if ((sp_mask) == 0xffff)\
757 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
758 else if ((sp_mask) == 0xffffffffLL)\
759 ESP = (uint32_t)(val);\
760 else\
761 ESP = (val);\
762} while (0)
763#else
764#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
765#endif
766
767/* in 64-bit machines, this can overflow. So this segment addition macro
768 * can be used to trim the value to 32-bit whenever needed */
769#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
770
771/* XXX: add a is_user flag to have proper security support */
772#define PUSHW(ssp, sp, sp_mask, val)\
773{\
774 sp -= 2;\
775 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
776}
777
778#define PUSHL(ssp, sp, sp_mask, val)\
779{\
780 sp -= 4;\
781 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
782}
783
784#define POPW(ssp, sp, sp_mask, val)\
785{\
786 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
787 sp += 2;\
788}
789
790#define POPL(ssp, sp, sp_mask, val)\
791{\
792 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
793 sp += 4;\
794}
795
796/* protected mode interrupt */
797static void do_interrupt_protected(int intno, int is_int, int error_code,
798 unsigned int next_eip, int is_hw)
799{
800 SegmentCache *dt;
801 target_ulong ptr, ssp;
802 int type, dpl, selector, ss_dpl, cpl;
803 int has_error_code, new_stack, shift;
804 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
805 uint32_t old_eip, sp_mask;
806
807#ifdef VBOX
808 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
809 cpu_loop_exit();
810#endif
811
812 has_error_code = 0;
813 if (!is_int && !is_hw) {
814 switch(intno) {
815 case 8:
816 case 10:
817 case 11:
818 case 12:
819 case 13:
820 case 14:
821 case 17:
822 has_error_code = 1;
823 break;
824 }
825 }
826 if (is_int)
827 old_eip = next_eip;
828 else
829 old_eip = env->eip;
830
831 dt = &env->idt;
832#ifndef VBOX
833 if (intno * 8 + 7 > dt->limit)
834#else
835 if ((unsigned)intno * 8 + 7 > dt->limit)
836#endif
837 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
838 ptr = dt->base + intno * 8;
839 e1 = ldl_kernel(ptr);
840 e2 = ldl_kernel(ptr + 4);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch(type) {
844 case 5: /* task gate */
845 /* must do that check here to return the correct error code */
846 if (!(e2 & DESC_P_MASK))
847 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
848 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
849 if (has_error_code) {
850 int type;
851 uint32_t mask;
852 /* push the error code */
853 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
854 shift = type >> 3;
855 if (env->segs[R_SS].flags & DESC_B_MASK)
856 mask = 0xffffffff;
857 else
858 mask = 0xffff;
859 esp = (ESP - (2 << shift)) & mask;
860 ssp = env->segs[R_SS].base + esp;
861 if (shift)
862 stl_kernel(ssp, error_code);
863 else
864 stw_kernel(ssp, error_code);
865 SET_ESP(esp, mask);
866 }
867 return;
868 case 6: /* 286 interrupt gate */
869 case 7: /* 286 trap gate */
870 case 14: /* 386 interrupt gate */
871 case 15: /* 386 trap gate */
872 break;
873 default:
874 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
875 break;
876 }
877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
878 cpl = env->hflags & HF_CPL_MASK;
879 /* check privilege if software int */
880 if (is_int && dpl < cpl)
881 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
882 /* check valid bit */
883 if (!(e2 & DESC_P_MASK))
884 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
885 selector = e1 >> 16;
886 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
887 if ((selector & 0xfffc) == 0)
888 raise_exception_err(EXCP0D_GPF, 0);
889
890 if (load_segment(&e1, &e2, selector) != 0)
891 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
892 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
893 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 if (dpl > cpl)
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
899 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
900 /* to inner privilege */
901 get_ss_esp_from_tss(&ss, &esp, dpl);
902 if ((ss & 0xfffc) == 0)
903 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
904 if ((ss & 3) != dpl)
905 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
906 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
907 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
908 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
909 if (ss_dpl != dpl)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 if (!(ss_e2 & DESC_S_MASK) ||
912 (ss_e2 & DESC_CS_MASK) ||
913 !(ss_e2 & DESC_W_MASK))
914 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
915 if (!(ss_e2 & DESC_P_MASK))
916#ifdef VBOX /* See page 3-477 of 253666.pdf */
917 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
918#else
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920#endif
921 new_stack = 1;
922 sp_mask = get_sp_mask(ss_e2);
923 ssp = get_seg_base(ss_e1, ss_e2);
924#if defined(VBOX) && defined(DEBUG)
925 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
926#endif
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
929 if (env->eflags & VM_MASK)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 sp_mask = get_sp_mask(env->segs[R_SS].flags);
933 ssp = env->segs[R_SS].base;
934 esp = ESP;
935 dpl = cpl;
936 } else {
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 new_stack = 0; /* avoid warning */
939 sp_mask = 0; /* avoid warning */
940 ssp = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
942 }
943
944 shift = type >> 3;
945
946#if 0
947 /* XXX: check that enough room is available */
948 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
949 if (env->eflags & VM_MASK)
950 push_size += 8;
951 push_size <<= shift;
952#endif
953 if (shift == 1) {
954 if (new_stack) {
955 if (env->eflags & VM_MASK) {
956 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
957 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
958 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
959 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
960 }
961 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
962 PUSHL(ssp, esp, sp_mask, ESP);
963 }
964 PUSHL(ssp, esp, sp_mask, compute_eflags());
965 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
966 PUSHL(ssp, esp, sp_mask, old_eip);
967 if (has_error_code) {
968 PUSHL(ssp, esp, sp_mask, error_code);
969 }
970 } else {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHW(ssp, esp, sp_mask, ESP);
980 }
981 PUSHW(ssp, esp, sp_mask, compute_eflags());
982 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHW(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHW(ssp, esp, sp_mask, error_code);
986 }
987 }
988
989 if (new_stack) {
990 if (env->eflags & VM_MASK) {
991 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
992 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
993 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
994 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
995 }
996 ss = (ss & ~3) | dpl;
997 cpu_x86_load_seg_cache(env, R_SS, ss,
998 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
999 }
1000 SET_ESP(esp, sp_mask);
1001
1002 selector = (selector & ~3) | dpl;
1003 cpu_x86_load_seg_cache(env, R_CS, selector,
1004 get_seg_base(e1, e2),
1005 get_seg_limit(e1, e2),
1006 e2);
1007 cpu_x86_set_cpl(env, dpl);
1008 env->eip = offset;
1009
1010 /* interrupt gate clear IF mask */
1011 if ((type & 1) == 0) {
1012 env->eflags &= ~IF_MASK;
1013 }
1014#ifndef VBOX
1015 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1016#else
1017 /*
1018 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1019 * gets confused by seemingly changed EFLAGS. See #3491 and
1020 * public bug #2341.
1021 */
1022 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1023#endif
1024}
1025
1026#ifdef VBOX
1027
1028/* check if VME interrupt redirection is enabled in TSS */
1029DECLINLINE(bool) is_vme_irq_redirected(int intno)
1030{
1031 unsigned int io_offset, intredir_offset;
1032 unsigned char val, mask;
1033
1034 /* TSS must be a valid 32 bit one */
1035 if (!(env->tr.flags & DESC_P_MASK) ||
1036 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1037 env->tr.limit < 103)
1038 goto fail;
1039 io_offset = lduw_kernel(env->tr.base + 0x66);
1040 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1041 if (io_offset < 0x68 + 0x20)
1042 io_offset = 0x68 + 0x20;
1043 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1044 intredir_offset = io_offset - 0x20;
1045
1046 intredir_offset += (intno >> 3);
1047 if ((intredir_offset) > env->tr.limit)
1048 goto fail;
1049
1050 val = ldub_kernel(env->tr.base + intredir_offset);
1051 mask = 1 << (unsigned char)(intno & 7);
1052
1053 /* bit set means no redirection. */
1054 if ((val & mask) != 0) {
1055 return false;
1056 }
1057 return true;
1058
1059fail:
1060 raise_exception_err(EXCP0D_GPF, 0);
1061 return true;
1062}
1063
1064/* V86 mode software interrupt with CR4.VME=1 */
1065static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1066{
1067 target_ulong ptr, ssp;
1068 int selector;
1069 uint32_t offset, esp;
1070 uint32_t old_cs, old_eflags;
1071 uint32_t iopl;
1072
1073 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1074
1075 if (!is_vme_irq_redirected(intno))
1076 {
1077 if (iopl == 3)
1078 {
1079 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1080 return;
1081 }
1082 else
1083 raise_exception_err(EXCP0D_GPF, 0);
1084 }
1085
1086 /* virtual mode idt is at linear address 0 */
1087 ptr = 0 + intno * 4;
1088 offset = lduw_kernel(ptr);
1089 selector = lduw_kernel(ptr + 2);
1090 esp = ESP;
1091 ssp = env->segs[R_SS].base;
1092 old_cs = env->segs[R_CS].selector;
1093
1094 old_eflags = compute_eflags();
1095 if (iopl < 3)
1096 {
1097 /* copy VIF into IF and set IOPL to 3 */
1098 if (env->eflags & VIF_MASK)
1099 old_eflags |= IF_MASK;
1100 else
1101 old_eflags &= ~IF_MASK;
1102
1103 old_eflags |= (3 << IOPL_SHIFT);
1104 }
1105
1106 /* XXX: use SS segment size ? */
1107 PUSHW(ssp, esp, 0xffff, old_eflags);
1108 PUSHW(ssp, esp, 0xffff, old_cs);
1109 PUSHW(ssp, esp, 0xffff, next_eip);
1110
1111 /* update processor state */
1112 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1113 env->eip = offset;
1114 env->segs[R_CS].selector = selector;
1115 env->segs[R_CS].base = (selector << 4);
1116 env->eflags &= ~(TF_MASK | RF_MASK);
1117
1118 if (iopl < 3)
1119 env->eflags &= ~VIF_MASK;
1120 else
1121 env->eflags &= ~IF_MASK;
1122}
1123
1124#endif /* VBOX */
1125
1126#ifdef TARGET_X86_64
1127
1128#define PUSHQ(sp, val)\
1129{\
1130 sp -= 8;\
1131 stq_kernel(sp, (val));\
1132}
1133
1134#define POPQ(sp, val)\
1135{\
1136 val = ldq_kernel(sp);\
1137 sp += 8;\
1138}
1139
1140static inline target_ulong get_rsp_from_tss(int level)
1141{
1142 int index;
1143
1144#if 0
1145 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1146 env->tr.base, env->tr.limit);
1147#endif
1148
1149 if (!(env->tr.flags & DESC_P_MASK))
1150 cpu_abort(env, "invalid tss");
1151 index = 8 * level + 4;
1152 if ((index + 7) > env->tr.limit)
1153 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1154 return ldq_kernel(env->tr.base + index);
1155}
1156
1157/* 64 bit interrupt */
1158static void do_interrupt64(int intno, int is_int, int error_code,
1159 target_ulong next_eip, int is_hw)
1160{
1161 SegmentCache *dt;
1162 target_ulong ptr;
1163 int type, dpl, selector, cpl, ist;
1164 int has_error_code, new_stack;
1165 uint32_t e1, e2, e3, ss;
1166 target_ulong old_eip, esp, offset;
1167
1168#ifdef VBOX
1169 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1170 cpu_loop_exit();
1171#endif
1172
1173 has_error_code = 0;
1174 if (!is_int && !is_hw) {
1175 switch(intno) {
1176 case 8:
1177 case 10:
1178 case 11:
1179 case 12:
1180 case 13:
1181 case 14:
1182 case 17:
1183 has_error_code = 1;
1184 break;
1185 }
1186 }
1187 if (is_int)
1188 old_eip = next_eip;
1189 else
1190 old_eip = env->eip;
1191
1192 dt = &env->idt;
1193 if (intno * 16 + 15 > dt->limit)
1194 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1195 ptr = dt->base + intno * 16;
1196 e1 = ldl_kernel(ptr);
1197 e2 = ldl_kernel(ptr + 4);
1198 e3 = ldl_kernel(ptr + 8);
1199 /* check gate type */
1200 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1201 switch(type) {
1202 case 14: /* 386 interrupt gate */
1203 case 15: /* 386 trap gate */
1204 break;
1205 default:
1206 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1207 break;
1208 }
1209 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1210 cpl = env->hflags & HF_CPL_MASK;
1211 /* check privilege if software int */
1212 if (is_int && dpl < cpl)
1213 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1214 /* check valid bit */
1215 if (!(e2 & DESC_P_MASK))
1216 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1217 selector = e1 >> 16;
1218 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1219 ist = e2 & 7;
1220 if ((selector & 0xfffc) == 0)
1221 raise_exception_err(EXCP0D_GPF, 0);
1222
1223 if (load_segment(&e1, &e2, selector) != 0)
1224 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1225 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1226 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1228 if (dpl > cpl)
1229 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1230 if (!(e2 & DESC_P_MASK))
1231 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1232 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1234 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1235 /* to inner privilege */
1236 if (ist != 0)
1237 esp = get_rsp_from_tss(ist + 3);
1238 else
1239 esp = get_rsp_from_tss(dpl);
1240 esp &= ~0xfLL; /* align stack */
1241 ss = 0;
1242 new_stack = 1;
1243 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1244 /* to same privilege */
1245 if (env->eflags & VM_MASK)
1246 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1247 new_stack = 0;
1248 if (ist != 0)
1249 esp = get_rsp_from_tss(ist + 3);
1250 else
1251 esp = ESP;
1252 esp &= ~0xfLL; /* align stack */
1253 dpl = cpl;
1254 } else {
1255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1256 new_stack = 0; /* avoid warning */
1257 esp = 0; /* avoid warning */
1258 }
1259
1260 PUSHQ(esp, env->segs[R_SS].selector);
1261 PUSHQ(esp, ESP);
1262 PUSHQ(esp, compute_eflags());
1263 PUSHQ(esp, env->segs[R_CS].selector);
1264 PUSHQ(esp, old_eip);
1265 if (has_error_code) {
1266 PUSHQ(esp, error_code);
1267 }
1268
1269 if (new_stack) {
1270 ss = 0 | dpl;
1271 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1272 }
1273 ESP = esp;
1274
1275 selector = (selector & ~3) | dpl;
1276 cpu_x86_load_seg_cache(env, R_CS, selector,
1277 get_seg_base(e1, e2),
1278 get_seg_limit(e1, e2),
1279 e2);
1280 cpu_x86_set_cpl(env, dpl);
1281 env->eip = offset;
1282
1283 /* interrupt gate clear IF mask */
1284 if ((type & 1) == 0) {
1285 env->eflags &= ~IF_MASK;
1286 }
1287#ifndef VBOX
1288 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1289#else /* VBOX */
1290 /*
1291 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1292 * gets confused by seemingly changed EFLAGS. See #3491 and
1293 * public bug #2341.
1294 */
1295 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1296#endif /* VBOX */
1297}
1298#endif
1299
1300#ifdef TARGET_X86_64
1301#if defined(CONFIG_USER_ONLY)
1302void helper_syscall(int next_eip_addend)
1303{
1304 env->exception_index = EXCP_SYSCALL;
1305 env->exception_next_eip = env->eip + next_eip_addend;
1306 cpu_loop_exit();
1307}
1308#else
1309void helper_syscall(int next_eip_addend)
1310{
1311 int selector;
1312
1313 if (!(env->efer & MSR_EFER_SCE)) {
1314 raise_exception_err(EXCP06_ILLOP, 0);
1315 }
1316 selector = (env->star >> 32) & 0xffff;
1317 if (env->hflags & HF_LMA_MASK) {
1318 int code64;
1319
1320 ECX = env->eip + next_eip_addend;
1321 env->regs[11] = compute_eflags();
1322
1323 code64 = env->hflags & HF_CS64_MASK;
1324
1325 cpu_x86_set_cpl(env, 0);
1326 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1327 0, 0xffffffff,
1328 DESC_G_MASK | DESC_P_MASK |
1329 DESC_S_MASK |
1330 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1331 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1332 0, 0xffffffff,
1333 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1334 DESC_S_MASK |
1335 DESC_W_MASK | DESC_A_MASK);
1336 env->eflags &= ~env->fmask;
1337 load_eflags(env->eflags, 0);
1338 if (code64)
1339 env->eip = env->lstar;
1340 else
1341 env->eip = env->cstar;
1342 } else {
1343 ECX = (uint32_t)(env->eip + next_eip_addend);
1344
1345 cpu_x86_set_cpl(env, 0);
1346 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1347 0, 0xffffffff,
1348 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1349 DESC_S_MASK |
1350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1351 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1352 0, 0xffffffff,
1353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1354 DESC_S_MASK |
1355 DESC_W_MASK | DESC_A_MASK);
1356 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1357 env->eip = (uint32_t)env->star;
1358 }
1359}
1360#endif
1361#endif
1362
1363#ifdef TARGET_X86_64
1364void helper_sysret(int dflag)
1365{
1366 int cpl, selector;
1367
1368 if (!(env->efer & MSR_EFER_SCE)) {
1369 raise_exception_err(EXCP06_ILLOP, 0);
1370 }
1371 cpl = env->hflags & HF_CPL_MASK;
1372 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1373 raise_exception_err(EXCP0D_GPF, 0);
1374 }
1375 selector = (env->star >> 48) & 0xffff;
1376 if (env->hflags & HF_LMA_MASK) {
1377 if (dflag == 2) {
1378 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1379 0, 0xffffffff,
1380 DESC_G_MASK | DESC_P_MASK |
1381 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1382 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1383 DESC_L_MASK);
1384 env->eip = ECX;
1385 } else {
1386 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1387 0, 0xffffffff,
1388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1389 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1390 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1391 env->eip = (uint32_t)ECX;
1392 }
1393 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1394 0, 0xffffffff,
1395 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1396 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1397 DESC_W_MASK | DESC_A_MASK);
1398 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1399 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1400 cpu_x86_set_cpl(env, 3);
1401 } else {
1402 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1403 0, 0xffffffff,
1404 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1405 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1406 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1407 env->eip = (uint32_t)ECX;
1408 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1409 0, 0xffffffff,
1410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1411 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1412 DESC_W_MASK | DESC_A_MASK);
1413 env->eflags |= IF_MASK;
1414 cpu_x86_set_cpl(env, 3);
1415 }
1416#ifdef USE_KQEMU
1417 if (kqemu_is_ok(env)) {
1418 if (env->hflags & HF_LMA_MASK)
1419 CC_OP = CC_OP_EFLAGS;
1420 env->exception_index = -1;
1421 cpu_loop_exit();
1422 }
1423#endif
1424}
1425#endif
1426
1427#ifdef VBOX
1428/**
1429 * Checks and processes external VMM events.
1430 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1431 */
1432void helper_external_event(void)
1433{
1434# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1435 uintptr_t uSP;
1436# ifdef RT_ARCH_AMD64
1437 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1438# else
1439 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1440# endif
1441 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1442# endif
1443 /* Keep in sync with flags checked by gen_check_external_event() */
1444 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1445 {
1446 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1447 ~CPU_INTERRUPT_EXTERNAL_HARD);
1448 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1449 }
1450 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1451 {
1452 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1453 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1454 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1455 }
1456 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1457 {
1458 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1459 ~CPU_INTERRUPT_EXTERNAL_DMA);
1460 remR3DmaRun(env);
1461 }
1462 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1463 {
1464 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1465 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1466 remR3TimersRun(env);
1467 }
1468}
1469/* helper for recording call instruction addresses for later scanning */
1470void helper_record_call()
1471{
1472 if ( !(env->state & CPU_RAW_RING0)
1473 && (env->cr[0] & CR0_PG_MASK)
1474 && !(env->eflags & X86_EFL_IF))
1475 remR3RecordCall(env);
1476}
1477#endif /* VBOX */
1478
1479/* real mode interrupt */
1480static void do_interrupt_real(int intno, int is_int, int error_code,
1481 unsigned int next_eip)
1482{
1483 SegmentCache *dt;
1484 target_ulong ptr, ssp;
1485 int selector;
1486 uint32_t offset, esp;
1487 uint32_t old_cs, old_eip;
1488
1489 /* real mode (simpler !) */
1490 dt = &env->idt;
1491#ifndef VBOX
1492 if (intno * 4 + 3 > dt->limit)
1493#else
1494 if ((unsigned)intno * 4 + 3 > dt->limit)
1495#endif
1496 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1497 ptr = dt->base + intno * 4;
1498 offset = lduw_kernel(ptr);
1499 selector = lduw_kernel(ptr + 2);
1500 esp = ESP;
1501 ssp = env->segs[R_SS].base;
1502 if (is_int)
1503 old_eip = next_eip;
1504 else
1505 old_eip = env->eip;
1506 old_cs = env->segs[R_CS].selector;
1507 /* XXX: use SS segment size ? */
1508 PUSHW(ssp, esp, 0xffff, compute_eflags());
1509 PUSHW(ssp, esp, 0xffff, old_cs);
1510 PUSHW(ssp, esp, 0xffff, old_eip);
1511
1512 /* update processor state */
1513 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1514 env->eip = offset;
1515 env->segs[R_CS].selector = selector;
1516 env->segs[R_CS].base = (selector << 4);
1517 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1518}
1519
1520/* fake user mode interrupt */
1521void do_interrupt_user(int intno, int is_int, int error_code,
1522 target_ulong next_eip)
1523{
1524 SegmentCache *dt;
1525 target_ulong ptr;
1526 int dpl, cpl, shift;
1527 uint32_t e2;
1528
1529 dt = &env->idt;
1530 if (env->hflags & HF_LMA_MASK) {
1531 shift = 4;
1532 } else {
1533 shift = 3;
1534 }
1535 ptr = dt->base + (intno << shift);
1536 e2 = ldl_kernel(ptr + 4);
1537
1538 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1539 cpl = env->hflags & HF_CPL_MASK;
1540 /* check privilege if software int */
1541 if (is_int && dpl < cpl)
1542 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1543
1544 /* Since we emulate only user space, we cannot do more than
1545 exiting the emulation with the suitable exception and error
1546 code */
1547 if (is_int)
1548 EIP = next_eip;
1549}
1550
1551/*
1552 * Begin execution of an interruption. is_int is TRUE if coming from
1553 * the int instruction. next_eip is the EIP value AFTER the interrupt
1554 * instruction. It is only relevant if is_int is TRUE.
1555 */
1556void do_interrupt(int intno, int is_int, int error_code,
1557 target_ulong next_eip, int is_hw)
1558{
1559 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1560 if ((env->cr[0] & CR0_PE_MASK)) {
1561 static int count;
1562 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1563 count, intno, error_code, is_int,
1564 env->hflags & HF_CPL_MASK,
1565 env->segs[R_CS].selector, EIP,
1566 (int)env->segs[R_CS].base + EIP,
1567 env->segs[R_SS].selector, ESP);
1568 if (intno == 0x0e) {
1569 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1570 } else {
1571 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1572 }
1573 qemu_log("\n");
1574 log_cpu_state(env, X86_DUMP_CCOP);
1575#if 0
1576 {
1577 int i;
1578 uint8_t *ptr;
1579 qemu_log(" code=");
1580 ptr = env->segs[R_CS].base + env->eip;
1581 for(i = 0; i < 16; i++) {
1582 qemu_log(" %02x", ldub(ptr + i));
1583 }
1584 qemu_log("\n");
1585 }
1586#endif
1587 count++;
1588 }
1589 }
1590#ifdef VBOX
1591 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1592 if (is_int) {
1593 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1594 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1595 } else {
1596 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1597 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1598 }
1599 }
1600#endif
1601 if (env->cr[0] & CR0_PE_MASK) {
1602#ifdef TARGET_X86_64
1603 if (env->hflags & HF_LMA_MASK) {
1604 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1605 } else
1606#endif
1607 {
1608#ifdef VBOX
1609 /* int xx *, v86 code and VME enabled? */
1610 if ( (env->eflags & VM_MASK)
1611 && (env->cr[4] & CR4_VME_MASK)
1612 && is_int
1613 && !is_hw
1614 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1615 )
1616 do_soft_interrupt_vme(intno, error_code, next_eip);
1617 else
1618#endif /* VBOX */
1619 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1620 }
1621 } else {
1622 do_interrupt_real(intno, is_int, error_code, next_eip);
1623 }
1624}
1625
1626/* This should come from sysemu.h - if we could include it here... */
1627void qemu_system_reset_request(void);
1628
1629/*
1630 * Check nested exceptions and change to double or triple fault if
1631 * needed. It should only be called, if this is not an interrupt.
1632 * Returns the new exception number.
1633 */
1634static int check_exception(int intno, int *error_code)
1635{
1636 int first_contributory = env->old_exception == 0 ||
1637 (env->old_exception >= 10 &&
1638 env->old_exception <= 13);
1639 int second_contributory = intno == 0 ||
1640 (intno >= 10 && intno <= 13);
1641
1642 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1643 env->old_exception, intno);
1644
1645#if !defined(CONFIG_USER_ONLY)
1646 if (env->old_exception == EXCP08_DBLE) {
1647 if (env->hflags & HF_SVMI_MASK)
1648 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1649
1650 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1651
1652# ifndef VBOX
1653 qemu_system_reset_request();
1654# else
1655 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1656# endif
1657 return EXCP_HLT;
1658 }
1659#endif
1660
1661 if ((first_contributory && second_contributory)
1662 || (env->old_exception == EXCP0E_PAGE &&
1663 (second_contributory || (intno == EXCP0E_PAGE)))) {
1664 intno = EXCP08_DBLE;
1665 *error_code = 0;
1666 }
1667
1668 if (second_contributory || (intno == EXCP0E_PAGE) ||
1669 (intno == EXCP08_DBLE))
1670 env->old_exception = intno;
1671
1672 return intno;
1673}
1674
1675/*
1676 * Signal an interruption. It is executed in the main CPU loop.
1677 * is_int is TRUE if coming from the int instruction. next_eip is the
1678 * EIP value AFTER the interrupt instruction. It is only relevant if
1679 * is_int is TRUE.
1680 */
1681static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1682 int next_eip_addend)
1683{
1684#if defined(VBOX) && defined(DEBUG)
1685 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1686#endif
1687 if (!is_int) {
1688 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1689 intno = check_exception(intno, &error_code);
1690 } else {
1691 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1692 }
1693
1694 env->exception_index = intno;
1695 env->error_code = error_code;
1696 env->exception_is_int = is_int;
1697 env->exception_next_eip = env->eip + next_eip_addend;
1698 cpu_loop_exit();
1699}
1700
1701/* shortcuts to generate exceptions */
1702
1703void raise_exception_err(int exception_index, int error_code)
1704{
1705 raise_interrupt(exception_index, 0, error_code, 0);
1706}
1707
1708void raise_exception(int exception_index)
1709{
1710 raise_interrupt(exception_index, 0, 0, 0);
1711}
1712
1713/* SMM support */
1714
1715#if defined(CONFIG_USER_ONLY)
1716
1717void do_smm_enter(void)
1718{
1719}
1720
1721void helper_rsm(void)
1722{
1723}
1724
1725#else
1726
1727#ifdef TARGET_X86_64
1728#define SMM_REVISION_ID 0x00020064
1729#else
1730#define SMM_REVISION_ID 0x00020000
1731#endif
1732
1733void do_smm_enter(void)
1734{
1735 target_ulong sm_state;
1736 SegmentCache *dt;
1737 int i, offset;
1738
1739 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1740 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1741
1742 env->hflags |= HF_SMM_MASK;
1743 cpu_smm_update(env);
1744
1745 sm_state = env->smbase + 0x8000;
1746
1747#ifdef TARGET_X86_64
1748 for(i = 0; i < 6; i++) {
1749 dt = &env->segs[i];
1750 offset = 0x7e00 + i * 16;
1751 stw_phys(sm_state + offset, dt->selector);
1752 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1753 stl_phys(sm_state + offset + 4, dt->limit);
1754 stq_phys(sm_state + offset + 8, dt->base);
1755 }
1756
1757 stq_phys(sm_state + 0x7e68, env->gdt.base);
1758 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1759
1760 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1761 stq_phys(sm_state + 0x7e78, env->ldt.base);
1762 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1763 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1764
1765 stq_phys(sm_state + 0x7e88, env->idt.base);
1766 stl_phys(sm_state + 0x7e84, env->idt.limit);
1767
1768 stw_phys(sm_state + 0x7e90, env->tr.selector);
1769 stq_phys(sm_state + 0x7e98, env->tr.base);
1770 stl_phys(sm_state + 0x7e94, env->tr.limit);
1771 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1772
1773 stq_phys(sm_state + 0x7ed0, env->efer);
1774
1775 stq_phys(sm_state + 0x7ff8, EAX);
1776 stq_phys(sm_state + 0x7ff0, ECX);
1777 stq_phys(sm_state + 0x7fe8, EDX);
1778 stq_phys(sm_state + 0x7fe0, EBX);
1779 stq_phys(sm_state + 0x7fd8, ESP);
1780 stq_phys(sm_state + 0x7fd0, EBP);
1781 stq_phys(sm_state + 0x7fc8, ESI);
1782 stq_phys(sm_state + 0x7fc0, EDI);
1783 for(i = 8; i < 16; i++)
1784 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1785 stq_phys(sm_state + 0x7f78, env->eip);
1786 stl_phys(sm_state + 0x7f70, compute_eflags());
1787 stl_phys(sm_state + 0x7f68, env->dr[6]);
1788 stl_phys(sm_state + 0x7f60, env->dr[7]);
1789
1790 stl_phys(sm_state + 0x7f48, env->cr[4]);
1791 stl_phys(sm_state + 0x7f50, env->cr[3]);
1792 stl_phys(sm_state + 0x7f58, env->cr[0]);
1793
1794 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1795 stl_phys(sm_state + 0x7f00, env->smbase);
1796#else
1797 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1798 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1799 stl_phys(sm_state + 0x7ff4, compute_eflags());
1800 stl_phys(sm_state + 0x7ff0, env->eip);
1801 stl_phys(sm_state + 0x7fec, EDI);
1802 stl_phys(sm_state + 0x7fe8, ESI);
1803 stl_phys(sm_state + 0x7fe4, EBP);
1804 stl_phys(sm_state + 0x7fe0, ESP);
1805 stl_phys(sm_state + 0x7fdc, EBX);
1806 stl_phys(sm_state + 0x7fd8, EDX);
1807 stl_phys(sm_state + 0x7fd4, ECX);
1808 stl_phys(sm_state + 0x7fd0, EAX);
1809 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1810 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1811
1812 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1813 stl_phys(sm_state + 0x7f64, env->tr.base);
1814 stl_phys(sm_state + 0x7f60, env->tr.limit);
1815 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1816
1817 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1818 stl_phys(sm_state + 0x7f80, env->ldt.base);
1819 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1820 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1821
1822 stl_phys(sm_state + 0x7f74, env->gdt.base);
1823 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1824
1825 stl_phys(sm_state + 0x7f58, env->idt.base);
1826 stl_phys(sm_state + 0x7f54, env->idt.limit);
1827
1828 for(i = 0; i < 6; i++) {
1829 dt = &env->segs[i];
1830 if (i < 3)
1831 offset = 0x7f84 + i * 12;
1832 else
1833 offset = 0x7f2c + (i - 3) * 12;
1834 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1835 stl_phys(sm_state + offset + 8, dt->base);
1836 stl_phys(sm_state + offset + 4, dt->limit);
1837 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1838 }
1839 stl_phys(sm_state + 0x7f14, env->cr[4]);
1840
1841 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1842 stl_phys(sm_state + 0x7ef8, env->smbase);
1843#endif
1844 /* init SMM cpu state */
1845
1846#ifdef TARGET_X86_64
1847 cpu_load_efer(env, 0);
1848#endif
1849 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1850 env->eip = 0x00008000;
1851 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1852 0xffffffff, 0);
1853 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1854 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1855 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1856 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1857 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1858
1859 cpu_x86_update_cr0(env,
1860 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1861 cpu_x86_update_cr4(env, 0);
1862 env->dr[7] = 0x00000400;
1863 CC_OP = CC_OP_EFLAGS;
1864}
1865
1866void helper_rsm(void)
1867{
1868#ifdef VBOX
1869 cpu_abort(env, "helper_rsm");
1870#else /* !VBOX */
1871 target_ulong sm_state;
1872 int i, offset;
1873 uint32_t val;
1874
1875 sm_state = env->smbase + 0x8000;
1876#ifdef TARGET_X86_64
1877 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1878
1879 for(i = 0; i < 6; i++) {
1880 offset = 0x7e00 + i * 16;
1881 cpu_x86_load_seg_cache(env, i,
1882 lduw_phys(sm_state + offset),
1883 ldq_phys(sm_state + offset + 8),
1884 ldl_phys(sm_state + offset + 4),
1885 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1886 }
1887
1888 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1889 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1890
1891 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1892 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1893 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1894 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1895
1896 env->idt.base = ldq_phys(sm_state + 0x7e88);
1897 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1898
1899 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1900 env->tr.base = ldq_phys(sm_state + 0x7e98);
1901 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1902 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1903
1904 EAX = ldq_phys(sm_state + 0x7ff8);
1905 ECX = ldq_phys(sm_state + 0x7ff0);
1906 EDX = ldq_phys(sm_state + 0x7fe8);
1907 EBX = ldq_phys(sm_state + 0x7fe0);
1908 ESP = ldq_phys(sm_state + 0x7fd8);
1909 EBP = ldq_phys(sm_state + 0x7fd0);
1910 ESI = ldq_phys(sm_state + 0x7fc8);
1911 EDI = ldq_phys(sm_state + 0x7fc0);
1912 for(i = 8; i < 16; i++)
1913 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1914 env->eip = ldq_phys(sm_state + 0x7f78);
1915 load_eflags(ldl_phys(sm_state + 0x7f70),
1916 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1917 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1918 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1919
1920 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1921 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1922 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1923
1924 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1925 if (val & 0x20000) {
1926 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1927 }
1928#else
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1930 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1931 load_eflags(ldl_phys(sm_state + 0x7ff4),
1932 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1933 env->eip = ldl_phys(sm_state + 0x7ff0);
1934 EDI = ldl_phys(sm_state + 0x7fec);
1935 ESI = ldl_phys(sm_state + 0x7fe8);
1936 EBP = ldl_phys(sm_state + 0x7fe4);
1937 ESP = ldl_phys(sm_state + 0x7fe0);
1938 EBX = ldl_phys(sm_state + 0x7fdc);
1939 EDX = ldl_phys(sm_state + 0x7fd8);
1940 ECX = ldl_phys(sm_state + 0x7fd4);
1941 EAX = ldl_phys(sm_state + 0x7fd0);
1942 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1943 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1944
1945 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1946 env->tr.base = ldl_phys(sm_state + 0x7f64);
1947 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1948 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1949
1950 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1951 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1952 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1953 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1954
1955 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1956 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1957
1958 env->idt.base = ldl_phys(sm_state + 0x7f58);
1959 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1960
1961 for(i = 0; i < 6; i++) {
1962 if (i < 3)
1963 offset = 0x7f84 + i * 12;
1964 else
1965 offset = 0x7f2c + (i - 3) * 12;
1966 cpu_x86_load_seg_cache(env, i,
1967 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1968 ldl_phys(sm_state + offset + 8),
1969 ldl_phys(sm_state + offset + 4),
1970 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1971 }
1972 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1973
1974 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1975 if (val & 0x20000) {
1976 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1977 }
1978#endif
1979 CC_OP = CC_OP_EFLAGS;
1980 env->hflags &= ~HF_SMM_MASK;
1981 cpu_smm_update(env);
1982
1983 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1984 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1985#endif /* !VBOX */
1986}
1987
1988#endif /* !CONFIG_USER_ONLY */
1989
1990
1991/* division, flags are undefined */
1992
1993void helper_divb_AL(target_ulong t0)
1994{
1995 unsigned int num, den, q, r;
1996
1997 num = (EAX & 0xffff);
1998 den = (t0 & 0xff);
1999 if (den == 0) {
2000 raise_exception(EXCP00_DIVZ);
2001 }
2002 q = (num / den);
2003 if (q > 0xff)
2004 raise_exception(EXCP00_DIVZ);
2005 q &= 0xff;
2006 r = (num % den) & 0xff;
2007 EAX = (EAX & ~0xffff) | (r << 8) | q;
2008}
2009
2010void helper_idivb_AL(target_ulong t0)
2011{
2012 int num, den, q, r;
2013
2014 num = (int16_t)EAX;
2015 den = (int8_t)t0;
2016 if (den == 0) {
2017 raise_exception(EXCP00_DIVZ);
2018 }
2019 q = (num / den);
2020 if (q != (int8_t)q)
2021 raise_exception(EXCP00_DIVZ);
2022 q &= 0xff;
2023 r = (num % den) & 0xff;
2024 EAX = (EAX & ~0xffff) | (r << 8) | q;
2025}
2026
2027void helper_divw_AX(target_ulong t0)
2028{
2029 unsigned int num, den, q, r;
2030
2031 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2032 den = (t0 & 0xffff);
2033 if (den == 0) {
2034 raise_exception(EXCP00_DIVZ);
2035 }
2036 q = (num / den);
2037 if (q > 0xffff)
2038 raise_exception(EXCP00_DIVZ);
2039 q &= 0xffff;
2040 r = (num % den) & 0xffff;
2041 EAX = (EAX & ~0xffff) | q;
2042 EDX = (EDX & ~0xffff) | r;
2043}
2044
2045void helper_idivw_AX(target_ulong t0)
2046{
2047 int num, den, q, r;
2048
2049 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2050 den = (int16_t)t0;
2051 if (den == 0) {
2052 raise_exception(EXCP00_DIVZ);
2053 }
2054 q = (num / den);
2055 if (q != (int16_t)q)
2056 raise_exception(EXCP00_DIVZ);
2057 q &= 0xffff;
2058 r = (num % den) & 0xffff;
2059 EAX = (EAX & ~0xffff) | q;
2060 EDX = (EDX & ~0xffff) | r;
2061}
2062
2063void helper_divl_EAX(target_ulong t0)
2064{
2065 unsigned int den, r;
2066 uint64_t num, q;
2067
2068 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2069 den = t0;
2070 if (den == 0) {
2071 raise_exception(EXCP00_DIVZ);
2072 }
2073 q = (num / den);
2074 r = (num % den);
2075 if (q > 0xffffffff)
2076 raise_exception(EXCP00_DIVZ);
2077 EAX = (uint32_t)q;
2078 EDX = (uint32_t)r;
2079}
2080
2081void helper_idivl_EAX(target_ulong t0)
2082{
2083 int den, r;
2084 int64_t num, q;
2085
2086 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2087 den = t0;
2088 if (den == 0) {
2089 raise_exception(EXCP00_DIVZ);
2090 }
2091 q = (num / den);
2092 r = (num % den);
2093 if (q != (int32_t)q)
2094 raise_exception(EXCP00_DIVZ);
2095 EAX = (uint32_t)q;
2096 EDX = (uint32_t)r;
2097}
2098
2099/* bcd */
2100
2101/* XXX: exception */
2102void helper_aam(int base)
2103{
2104 int al, ah;
2105 al = EAX & 0xff;
2106 ah = al / base;
2107 al = al % base;
2108 EAX = (EAX & ~0xffff) | al | (ah << 8);
2109 CC_DST = al;
2110}
2111
2112void helper_aad(int base)
2113{
2114 int al, ah;
2115 al = EAX & 0xff;
2116 ah = (EAX >> 8) & 0xff;
2117 al = ((ah * base) + al) & 0xff;
2118 EAX = (EAX & ~0xffff) | al;
2119 CC_DST = al;
2120}
2121
2122void helper_aaa(void)
2123{
2124 int icarry;
2125 int al, ah, af;
2126 int eflags;
2127
2128 eflags = helper_cc_compute_all(CC_OP);
2129 af = eflags & CC_A;
2130 al = EAX & 0xff;
2131 ah = (EAX >> 8) & 0xff;
2132
2133 icarry = (al > 0xf9);
2134 if (((al & 0x0f) > 9 ) || af) {
2135 al = (al + 6) & 0x0f;
2136 ah = (ah + 1 + icarry) & 0xff;
2137 eflags |= CC_C | CC_A;
2138 } else {
2139 eflags &= ~(CC_C | CC_A);
2140 al &= 0x0f;
2141 }
2142 EAX = (EAX & ~0xffff) | al | (ah << 8);
2143 CC_SRC = eflags;
2144}
2145
2146void helper_aas(void)
2147{
2148 int icarry;
2149 int al, ah, af;
2150 int eflags;
2151
2152 eflags = helper_cc_compute_all(CC_OP);
2153 af = eflags & CC_A;
2154 al = EAX & 0xff;
2155 ah = (EAX >> 8) & 0xff;
2156
2157 icarry = (al < 6);
2158 if (((al & 0x0f) > 9 ) || af) {
2159 al = (al - 6) & 0x0f;
2160 ah = (ah - 1 - icarry) & 0xff;
2161 eflags |= CC_C | CC_A;
2162 } else {
2163 eflags &= ~(CC_C | CC_A);
2164 al &= 0x0f;
2165 }
2166 EAX = (EAX & ~0xffff) | al | (ah << 8);
2167 CC_SRC = eflags;
2168}
2169
2170void helper_daa(void)
2171{
2172 int al, af, cf;
2173 int eflags;
2174
2175 eflags = helper_cc_compute_all(CC_OP);
2176 cf = eflags & CC_C;
2177 af = eflags & CC_A;
2178 al = EAX & 0xff;
2179
2180 eflags = 0;
2181 if (((al & 0x0f) > 9 ) || af) {
2182 al = (al + 6) & 0xff;
2183 eflags |= CC_A;
2184 }
2185 if ((al > 0x9f) || cf) {
2186 al = (al + 0x60) & 0xff;
2187 eflags |= CC_C;
2188 }
2189 EAX = (EAX & ~0xff) | al;
2190 /* well, speed is not an issue here, so we compute the flags by hand */
2191 eflags |= (al == 0) << 6; /* zf */
2192 eflags |= parity_table[al]; /* pf */
2193 eflags |= (al & 0x80); /* sf */
2194 CC_SRC = eflags;
2195}
2196
2197void helper_das(void)
2198{
2199 int al, al1, af, cf;
2200 int eflags;
2201
2202 eflags = helper_cc_compute_all(CC_OP);
2203 cf = eflags & CC_C;
2204 af = eflags & CC_A;
2205 al = EAX & 0xff;
2206
2207 eflags = 0;
2208 al1 = al;
2209 if (((al & 0x0f) > 9 ) || af) {
2210 eflags |= CC_A;
2211 if (al < 6 || cf)
2212 eflags |= CC_C;
2213 al = (al - 6) & 0xff;
2214 }
2215 if ((al1 > 0x99) || cf) {
2216 al = (al - 0x60) & 0xff;
2217 eflags |= CC_C;
2218 }
2219 EAX = (EAX & ~0xff) | al;
2220 /* well, speed is not an issue here, so we compute the flags by hand */
2221 eflags |= (al == 0) << 6; /* zf */
2222 eflags |= parity_table[al]; /* pf */
2223 eflags |= (al & 0x80); /* sf */
2224 CC_SRC = eflags;
2225}
2226
2227void helper_into(int next_eip_addend)
2228{
2229 int eflags;
2230 eflags = helper_cc_compute_all(CC_OP);
2231 if (eflags & CC_O) {
2232 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2233 }
2234}
2235
2236void helper_cmpxchg8b(target_ulong a0)
2237{
2238 uint64_t d;
2239 int eflags;
2240
2241 eflags = helper_cc_compute_all(CC_OP);
2242 d = ldq(a0);
2243 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2244 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2245 eflags |= CC_Z;
2246 } else {
2247 /* always do the store */
2248 stq(a0, d);
2249 EDX = (uint32_t)(d >> 32);
2250 EAX = (uint32_t)d;
2251 eflags &= ~CC_Z;
2252 }
2253 CC_SRC = eflags;
2254}
2255
2256#ifdef TARGET_X86_64
2257void helper_cmpxchg16b(target_ulong a0)
2258{
2259 uint64_t d0, d1;
2260 int eflags;
2261
2262 if ((a0 & 0xf) != 0)
2263 raise_exception(EXCP0D_GPF);
2264 eflags = helper_cc_compute_all(CC_OP);
2265 d0 = ldq(a0);
2266 d1 = ldq(a0 + 8);
2267 if (d0 == EAX && d1 == EDX) {
2268 stq(a0, EBX);
2269 stq(a0 + 8, ECX);
2270 eflags |= CC_Z;
2271 } else {
2272 /* always do the store */
2273 stq(a0, d0);
2274 stq(a0 + 8, d1);
2275 EDX = d1;
2276 EAX = d0;
2277 eflags &= ~CC_Z;
2278 }
2279 CC_SRC = eflags;
2280}
2281#endif
2282
2283void helper_single_step(void)
2284{
2285#ifndef CONFIG_USER_ONLY
2286 check_hw_breakpoints(env, 1);
2287 env->dr[6] |= DR6_BS;
2288#endif
2289 raise_exception(EXCP01_DB);
2290}
2291
2292void helper_cpuid(void)
2293{
2294 uint32_t eax, ebx, ecx, edx;
2295
2296 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2297
2298 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2299 EAX = eax;
2300 EBX = ebx;
2301 ECX = ecx;
2302 EDX = edx;
2303}
2304
2305void helper_enter_level(int level, int data32, target_ulong t1)
2306{
2307 target_ulong ssp;
2308 uint32_t esp_mask, esp, ebp;
2309
2310 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2311 ssp = env->segs[R_SS].base;
2312 ebp = EBP;
2313 esp = ESP;
2314 if (data32) {
2315 /* 32 bit */
2316 esp -= 4;
2317 while (--level) {
2318 esp -= 4;
2319 ebp -= 4;
2320 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2321 }
2322 esp -= 4;
2323 stl(ssp + (esp & esp_mask), t1);
2324 } else {
2325 /* 16 bit */
2326 esp -= 2;
2327 while (--level) {
2328 esp -= 2;
2329 ebp -= 2;
2330 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2331 }
2332 esp -= 2;
2333 stw(ssp + (esp & esp_mask), t1);
2334 }
2335}
2336
2337#ifdef TARGET_X86_64
2338void helper_enter64_level(int level, int data64, target_ulong t1)
2339{
2340 target_ulong esp, ebp;
2341 ebp = EBP;
2342 esp = ESP;
2343
2344 if (data64) {
2345 /* 64 bit */
2346 esp -= 8;
2347 while (--level) {
2348 esp -= 8;
2349 ebp -= 8;
2350 stq(esp, ldq(ebp));
2351 }
2352 esp -= 8;
2353 stq(esp, t1);
2354 } else {
2355 /* 16 bit */
2356 esp -= 2;
2357 while (--level) {
2358 esp -= 2;
2359 ebp -= 2;
2360 stw(esp, lduw(ebp));
2361 }
2362 esp -= 2;
2363 stw(esp, t1);
2364 }
2365}
2366#endif
2367
2368void helper_lldt(int selector)
2369{
2370 SegmentCache *dt;
2371 uint32_t e1, e2;
2372#ifndef VBOX
2373 int index, entry_limit;
2374#else
2375 unsigned int index, entry_limit;
2376#endif
2377 target_ulong ptr;
2378
2379#ifdef VBOX
2380 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2381 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2382#endif
2383
2384 selector &= 0xffff;
2385 if ((selector & 0xfffc) == 0) {
2386 /* XXX: NULL selector case: invalid LDT */
2387 env->ldt.base = 0;
2388 env->ldt.limit = 0;
2389 } else {
2390 if (selector & 0x4)
2391 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2392 dt = &env->gdt;
2393 index = selector & ~7;
2394#ifdef TARGET_X86_64
2395 if (env->hflags & HF_LMA_MASK)
2396 entry_limit = 15;
2397 else
2398#endif
2399 entry_limit = 7;
2400 if ((index + entry_limit) > dt->limit)
2401 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2402 ptr = dt->base + index;
2403 e1 = ldl_kernel(ptr);
2404 e2 = ldl_kernel(ptr + 4);
2405 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2406 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2407 if (!(e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2409#ifdef TARGET_X86_64
2410 if (env->hflags & HF_LMA_MASK) {
2411 uint32_t e3;
2412 e3 = ldl_kernel(ptr + 8);
2413 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2414 env->ldt.base |= (target_ulong)e3 << 32;
2415 } else
2416#endif
2417 {
2418 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2419 }
2420 }
2421 env->ldt.selector = selector;
2422#ifdef VBOX
2423 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2424 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2425#endif
2426}
2427
2428void helper_ltr(int selector)
2429{
2430 SegmentCache *dt;
2431 uint32_t e1, e2;
2432#ifndef VBOX
2433 int index, type, entry_limit;
2434#else
2435 unsigned int index;
2436 int type, entry_limit;
2437#endif
2438 target_ulong ptr;
2439
2440#ifdef VBOX
2441 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2442 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2443 env->tr.flags, (RTSEL)(selector & 0xffff)));
2444#endif
2445 selector &= 0xffff;
2446 if ((selector & 0xfffc) == 0) {
2447 /* NULL selector case: invalid TR */
2448 env->tr.base = 0;
2449 env->tr.limit = 0;
2450 env->tr.flags = 0;
2451 } else {
2452 if (selector & 0x4)
2453 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2454 dt = &env->gdt;
2455 index = selector & ~7;
2456#ifdef TARGET_X86_64
2457 if (env->hflags & HF_LMA_MASK)
2458 entry_limit = 15;
2459 else
2460#endif
2461 entry_limit = 7;
2462 if ((index + entry_limit) > dt->limit)
2463 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2464 ptr = dt->base + index;
2465 e1 = ldl_kernel(ptr);
2466 e2 = ldl_kernel(ptr + 4);
2467 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2468 if ((e2 & DESC_S_MASK) ||
2469 (type != 1 && type != 9))
2470 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2471 if (!(e2 & DESC_P_MASK))
2472 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2473#ifdef TARGET_X86_64
2474 if (env->hflags & HF_LMA_MASK) {
2475 uint32_t e3, e4;
2476 e3 = ldl_kernel(ptr + 8);
2477 e4 = ldl_kernel(ptr + 12);
2478 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2479 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2480 load_seg_cache_raw_dt(&env->tr, e1, e2);
2481 env->tr.base |= (target_ulong)e3 << 32;
2482 } else
2483#endif
2484 {
2485 load_seg_cache_raw_dt(&env->tr, e1, e2);
2486 }
2487 e2 |= DESC_TSS_BUSY_MASK;
2488 stl_kernel(ptr + 4, e2);
2489 }
2490 env->tr.selector = selector;
2491#ifdef VBOX
2492 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2493 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2494 env->tr.flags, (RTSEL)(selector & 0xffff)));
2495#endif
2496}
2497
2498/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2499void helper_load_seg(int seg_reg, int selector)
2500{
2501 uint32_t e1, e2;
2502 int cpl, dpl, rpl;
2503 SegmentCache *dt;
2504#ifndef VBOX
2505 int index;
2506#else
2507 unsigned int index;
2508#endif
2509 target_ulong ptr;
2510
2511 selector &= 0xffff;
2512 cpl = env->hflags & HF_CPL_MASK;
2513#ifdef VBOX
2514
2515 /* Trying to load a selector with CPL=1? */
2516 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2517 {
2518 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2519 selector = selector & 0xfffc;
2520 }
2521#endif /* VBOX */
2522 if ((selector & 0xfffc) == 0) {
2523 /* null selector case */
2524 if (seg_reg == R_SS
2525#ifdef TARGET_X86_64
2526 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2527#endif
2528 )
2529 raise_exception_err(EXCP0D_GPF, 0);
2530 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2531 } else {
2532
2533 if (selector & 0x4)
2534 dt = &env->ldt;
2535 else
2536 dt = &env->gdt;
2537 index = selector & ~7;
2538 if ((index + 7) > dt->limit)
2539 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2540 ptr = dt->base + index;
2541 e1 = ldl_kernel(ptr);
2542 e2 = ldl_kernel(ptr + 4);
2543
2544 if (!(e2 & DESC_S_MASK))
2545 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2546 rpl = selector & 3;
2547 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2548 if (seg_reg == R_SS) {
2549 /* must be writable segment */
2550 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2551 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2552 if (rpl != cpl || dpl != cpl)
2553 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2554 } else {
2555 /* must be readable segment */
2556 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2557 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2558
2559 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2560 /* if not conforming code, test rights */
2561 if (dpl < cpl || dpl < rpl)
2562 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2563 }
2564 }
2565
2566 if (!(e2 & DESC_P_MASK)) {
2567 if (seg_reg == R_SS)
2568 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2569 else
2570 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2571 }
2572
2573 /* set the access bit if not already set */
2574 if (!(e2 & DESC_A_MASK)) {
2575 e2 |= DESC_A_MASK;
2576 stl_kernel(ptr + 4, e2);
2577 }
2578
2579 cpu_x86_load_seg_cache(env, seg_reg, selector,
2580 get_seg_base(e1, e2),
2581 get_seg_limit(e1, e2),
2582 e2);
2583#if 0
2584 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2585 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2586#endif
2587 }
2588}
2589
2590/* protected mode jump */
2591void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2592 int next_eip_addend)
2593{
2594 int gate_cs, type;
2595 uint32_t e1, e2, cpl, dpl, rpl, limit;
2596 target_ulong next_eip;
2597
2598#ifdef VBOX /** @todo Why do we do this? */
2599 e1 = e2 = 0;
2600#endif
2601 if ((new_cs & 0xfffc) == 0)
2602 raise_exception_err(EXCP0D_GPF, 0);
2603 if (load_segment(&e1, &e2, new_cs) != 0)
2604 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2605 cpl = env->hflags & HF_CPL_MASK;
2606 if (e2 & DESC_S_MASK) {
2607 if (!(e2 & DESC_CS_MASK))
2608 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2609 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2610 if (e2 & DESC_C_MASK) {
2611 /* conforming code segment */
2612 if (dpl > cpl)
2613 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2614 } else {
2615 /* non conforming code segment */
2616 rpl = new_cs & 3;
2617 if (rpl > cpl)
2618 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2619 if (dpl != cpl)
2620 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621 }
2622 if (!(e2 & DESC_P_MASK))
2623 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2624 limit = get_seg_limit(e1, e2);
2625 if (new_eip > limit &&
2626 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2627 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2629 get_seg_base(e1, e2), limit, e2);
2630 EIP = new_eip;
2631 } else {
2632 /* jump to call or task gate */
2633 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2634 rpl = new_cs & 3;
2635 cpl = env->hflags & HF_CPL_MASK;
2636 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2637 switch(type) {
2638 case 1: /* 286 TSS */
2639 case 9: /* 386 TSS */
2640 case 5: /* task gate */
2641 if (dpl < cpl || dpl < rpl)
2642 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2643 next_eip = env->eip + next_eip_addend;
2644 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2645 CC_OP = CC_OP_EFLAGS;
2646 break;
2647 case 4: /* 286 call gate */
2648 case 12: /* 386 call gate */
2649 if ((dpl < cpl) || (dpl < rpl))
2650 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2651 if (!(e2 & DESC_P_MASK))
2652 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2653 gate_cs = e1 >> 16;
2654 new_eip = (e1 & 0xffff);
2655 if (type == 12)
2656 new_eip |= (e2 & 0xffff0000);
2657 if (load_segment(&e1, &e2, gate_cs) != 0)
2658 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2659 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2660 /* must be code segment */
2661 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2662 (DESC_S_MASK | DESC_CS_MASK)))
2663 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2664 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2665 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2666 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2667 if (!(e2 & DESC_P_MASK))
2668#ifdef VBOX /* See page 3-514 of 253666.pdf */
2669 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2670#else
2671 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2672#endif
2673 limit = get_seg_limit(e1, e2);
2674 if (new_eip > limit)
2675 raise_exception_err(EXCP0D_GPF, 0);
2676 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2677 get_seg_base(e1, e2), limit, e2);
2678 EIP = new_eip;
2679 break;
2680 default:
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 break;
2683 }
2684 }
2685}
2686
2687/* real mode call */
2688void helper_lcall_real(int new_cs, target_ulong new_eip1,
2689 int shift, int next_eip)
2690{
2691 int new_eip;
2692 uint32_t esp, esp_mask;
2693 target_ulong ssp;
2694
2695 new_eip = new_eip1;
2696 esp = ESP;
2697 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2698 ssp = env->segs[R_SS].base;
2699 if (shift) {
2700 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2701 PUSHL(ssp, esp, esp_mask, next_eip);
2702 } else {
2703 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2704 PUSHW(ssp, esp, esp_mask, next_eip);
2705 }
2706
2707 SET_ESP(esp, esp_mask);
2708 env->eip = new_eip;
2709 env->segs[R_CS].selector = new_cs;
2710 env->segs[R_CS].base = (new_cs << 4);
2711}
2712
2713/* protected mode call */
2714void helper_lcall_protected(int new_cs, target_ulong new_eip,
2715 int shift, int next_eip_addend)
2716{
2717 int new_stack, i;
2718 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2719 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2720 uint32_t val, limit, old_sp_mask;
2721 target_ulong ssp, old_ssp, next_eip;
2722
2723#ifdef VBOX /** @todo Why do we do this? */
2724 e1 = e2 = 0;
2725#endif
2726 next_eip = env->eip + next_eip_addend;
2727 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2728 LOG_PCALL_STATE(env);
2729 if ((new_cs & 0xfffc) == 0)
2730 raise_exception_err(EXCP0D_GPF, 0);
2731 if (load_segment(&e1, &e2, new_cs) != 0)
2732 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2733 cpl = env->hflags & HF_CPL_MASK;
2734 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2735 if (e2 & DESC_S_MASK) {
2736 if (!(e2 & DESC_CS_MASK))
2737 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2738 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2739 if (e2 & DESC_C_MASK) {
2740 /* conforming code segment */
2741 if (dpl > cpl)
2742 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2743 } else {
2744 /* non conforming code segment */
2745 rpl = new_cs & 3;
2746 if (rpl > cpl)
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 if (dpl != cpl)
2749 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2750 }
2751 if (!(e2 & DESC_P_MASK))
2752 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2753
2754#ifdef TARGET_X86_64
2755 /* XXX: check 16/32 bit cases in long mode */
2756 if (shift == 2) {
2757 target_ulong rsp;
2758 /* 64 bit case */
2759 rsp = ESP;
2760 PUSHQ(rsp, env->segs[R_CS].selector);
2761 PUSHQ(rsp, next_eip);
2762 /* from this point, not restartable */
2763 ESP = rsp;
2764 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2765 get_seg_base(e1, e2),
2766 get_seg_limit(e1, e2), e2);
2767 EIP = new_eip;
2768 } else
2769#endif
2770 {
2771 sp = ESP;
2772 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2773 ssp = env->segs[R_SS].base;
2774 if (shift) {
2775 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2776 PUSHL(ssp, sp, sp_mask, next_eip);
2777 } else {
2778 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2779 PUSHW(ssp, sp, sp_mask, next_eip);
2780 }
2781
2782 limit = get_seg_limit(e1, e2);
2783 if (new_eip > limit)
2784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2785 /* from this point, not restartable */
2786 SET_ESP(sp, sp_mask);
2787 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2788 get_seg_base(e1, e2), limit, e2);
2789 EIP = new_eip;
2790 }
2791 } else {
2792 /* check gate type */
2793 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2794 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2795 rpl = new_cs & 3;
2796 switch(type) {
2797 case 1: /* available 286 TSS */
2798 case 9: /* available 386 TSS */
2799 case 5: /* task gate */
2800 if (dpl < cpl || dpl < rpl)
2801 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2802 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2803 CC_OP = CC_OP_EFLAGS;
2804 return;
2805 case 4: /* 286 call gate */
2806 case 12: /* 386 call gate */
2807 break;
2808 default:
2809 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2810 break;
2811 }
2812 shift = type >> 3;
2813
2814 if (dpl < cpl || dpl < rpl)
2815 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2816 /* check valid bit */
2817 if (!(e2 & DESC_P_MASK))
2818 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2819 selector = e1 >> 16;
2820 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2821 param_count = e2 & 0x1f;
2822 if ((selector & 0xfffc) == 0)
2823 raise_exception_err(EXCP0D_GPF, 0);
2824
2825 if (load_segment(&e1, &e2, selector) != 0)
2826 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2827 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2828 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2829 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2830 if (dpl > cpl)
2831 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2832 if (!(e2 & DESC_P_MASK))
2833 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2834
2835 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2836 /* to inner privilege */
2837 get_ss_esp_from_tss(&ss, &sp, dpl);
2838 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2839 ss, sp, param_count, ESP);
2840 if ((ss & 0xfffc) == 0)
2841 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2842 if ((ss & 3) != dpl)
2843 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2844 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2845 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2846 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2847 if (ss_dpl != dpl)
2848 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2849 if (!(ss_e2 & DESC_S_MASK) ||
2850 (ss_e2 & DESC_CS_MASK) ||
2851 !(ss_e2 & DESC_W_MASK))
2852 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2853 if (!(ss_e2 & DESC_P_MASK))
2854#ifdef VBOX /* See page 3-99 of 253666.pdf */
2855 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2856#else
2857 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2858#endif
2859
2860 // push_size = ((param_count * 2) + 8) << shift;
2861
2862 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2863 old_ssp = env->segs[R_SS].base;
2864
2865 sp_mask = get_sp_mask(ss_e2);
2866 ssp = get_seg_base(ss_e1, ss_e2);
2867 if (shift) {
2868 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2869 PUSHL(ssp, sp, sp_mask, ESP);
2870 for(i = param_count - 1; i >= 0; i--) {
2871 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2872 PUSHL(ssp, sp, sp_mask, val);
2873 }
2874 } else {
2875 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2876 PUSHW(ssp, sp, sp_mask, ESP);
2877 for(i = param_count - 1; i >= 0; i--) {
2878 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2879 PUSHW(ssp, sp, sp_mask, val);
2880 }
2881 }
2882 new_stack = 1;
2883 } else {
2884 /* to same privilege */
2885 sp = ESP;
2886 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2887 ssp = env->segs[R_SS].base;
2888 // push_size = (4 << shift);
2889 new_stack = 0;
2890 }
2891
2892 if (shift) {
2893 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2894 PUSHL(ssp, sp, sp_mask, next_eip);
2895 } else {
2896 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2897 PUSHW(ssp, sp, sp_mask, next_eip);
2898 }
2899
2900 /* from this point, not restartable */
2901
2902 if (new_stack) {
2903 ss = (ss & ~3) | dpl;
2904 cpu_x86_load_seg_cache(env, R_SS, ss,
2905 ssp,
2906 get_seg_limit(ss_e1, ss_e2),
2907 ss_e2);
2908 }
2909
2910 selector = (selector & ~3) | dpl;
2911 cpu_x86_load_seg_cache(env, R_CS, selector,
2912 get_seg_base(e1, e2),
2913 get_seg_limit(e1, e2),
2914 e2);
2915 cpu_x86_set_cpl(env, dpl);
2916 SET_ESP(sp, sp_mask);
2917 EIP = offset;
2918 }
2919#ifdef USE_KQEMU
2920 if (kqemu_is_ok(env)) {
2921 env->exception_index = -1;
2922 cpu_loop_exit();
2923 }
2924#endif
2925}
2926
2927/* real and vm86 mode iret */
2928void helper_iret_real(int shift)
2929{
2930 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2931 target_ulong ssp;
2932 int eflags_mask;
2933#ifdef VBOX
2934 bool fVME = false;
2935
2936 remR3TrapClear(env->pVM);
2937#endif /* VBOX */
2938
2939 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2940 sp = ESP;
2941 ssp = env->segs[R_SS].base;
2942 if (shift == 1) {
2943 /* 32 bits */
2944 POPL(ssp, sp, sp_mask, new_eip);
2945 POPL(ssp, sp, sp_mask, new_cs);
2946 new_cs &= 0xffff;
2947 POPL(ssp, sp, sp_mask, new_eflags);
2948 } else {
2949 /* 16 bits */
2950 POPW(ssp, sp, sp_mask, new_eip);
2951 POPW(ssp, sp, sp_mask, new_cs);
2952 POPW(ssp, sp, sp_mask, new_eflags);
2953 }
2954#ifdef VBOX
2955 if ( (env->eflags & VM_MASK)
2956 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2957 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2958 {
2959 fVME = true;
2960 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2961 /* if TF will be set -> #GP */
2962 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2963 || (new_eflags & TF_MASK))
2964 raise_exception(EXCP0D_GPF);
2965 }
2966#endif /* VBOX */
2967 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2968 env->segs[R_CS].selector = new_cs;
2969 env->segs[R_CS].base = (new_cs << 4);
2970 env->eip = new_eip;
2971#ifdef VBOX
2972 if (fVME)
2973 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2974 else
2975#endif
2976 if (env->eflags & VM_MASK)
2977 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2978 else
2979 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2980 if (shift == 0)
2981 eflags_mask &= 0xffff;
2982 load_eflags(new_eflags, eflags_mask);
2983 env->hflags2 &= ~HF2_NMI_MASK;
2984#ifdef VBOX
2985 if (fVME)
2986 {
2987 if (new_eflags & IF_MASK)
2988 env->eflags |= VIF_MASK;
2989 else
2990 env->eflags &= ~VIF_MASK;
2991 }
2992#endif /* VBOX */
2993}
2994
2995static inline void validate_seg(int seg_reg, int cpl)
2996{
2997 int dpl;
2998 uint32_t e2;
2999
3000 /* XXX: on x86_64, we do not want to nullify FS and GS because
3001 they may still contain a valid base. I would be interested to
3002 know how a real x86_64 CPU behaves */
3003 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3004 (env->segs[seg_reg].selector & 0xfffc) == 0)
3005 return;
3006
3007 e2 = env->segs[seg_reg].flags;
3008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3010 /* data or non conforming code segment */
3011 if (dpl < cpl) {
3012 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3013 }
3014 }
3015}
3016
3017/* protected mode iret */
3018static inline void helper_ret_protected(int shift, int is_iret, int addend)
3019{
3020 uint32_t new_cs, new_eflags, new_ss;
3021 uint32_t new_es, new_ds, new_fs, new_gs;
3022 uint32_t e1, e2, ss_e1, ss_e2;
3023 int cpl, dpl, rpl, eflags_mask, iopl;
3024 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3025
3026#ifdef VBOX /** @todo Why do we do this? */
3027 ss_e1 = ss_e2 = e1 = e2 = 0;
3028#endif
3029
3030#ifdef TARGET_X86_64
3031 if (shift == 2)
3032 sp_mask = -1;
3033 else
3034#endif
3035 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3036 sp = ESP;
3037 ssp = env->segs[R_SS].base;
3038 new_eflags = 0; /* avoid warning */
3039#ifdef TARGET_X86_64
3040 if (shift == 2) {
3041 POPQ(sp, new_eip);
3042 POPQ(sp, new_cs);
3043 new_cs &= 0xffff;
3044 if (is_iret) {
3045 POPQ(sp, new_eflags);
3046 }
3047 } else
3048#endif
3049 if (shift == 1) {
3050 /* 32 bits */
3051 POPL(ssp, sp, sp_mask, new_eip);
3052 POPL(ssp, sp, sp_mask, new_cs);
3053 new_cs &= 0xffff;
3054 if (is_iret) {
3055 POPL(ssp, sp, sp_mask, new_eflags);
3056#if defined(VBOX) && defined(DEBUG)
3057 printf("iret: new CS %04X\n", new_cs);
3058 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3059 printf("iret: new EFLAGS %08X\n", new_eflags);
3060 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3061#endif
3062 if (new_eflags & VM_MASK)
3063 goto return_to_vm86;
3064 }
3065#ifdef VBOX
3066 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3067 {
3068#ifdef DEBUG
3069 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3070#endif
3071 new_cs = new_cs & 0xfffc;
3072 }
3073#endif
3074 } else {
3075 /* 16 bits */
3076 POPW(ssp, sp, sp_mask, new_eip);
3077 POPW(ssp, sp, sp_mask, new_cs);
3078 if (is_iret)
3079 POPW(ssp, sp, sp_mask, new_eflags);
3080 }
3081 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3082 new_cs, new_eip, shift, addend);
3083 LOG_PCALL_STATE(env);
3084 if ((new_cs & 0xfffc) == 0)
3085 {
3086#if defined(VBOX) && defined(DEBUG)
3087 printf("new_cs & 0xfffc) == 0\n");
3088#endif
3089 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3090 }
3091 if (load_segment(&e1, &e2, new_cs) != 0)
3092 {
3093#if defined(VBOX) && defined(DEBUG)
3094 printf("load_segment failed\n");
3095#endif
3096 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3097 }
3098 if (!(e2 & DESC_S_MASK) ||
3099 !(e2 & DESC_CS_MASK))
3100 {
3101#if defined(VBOX) && defined(DEBUG)
3102 printf("e2 mask %08x\n", e2);
3103#endif
3104 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3105 }
3106 cpl = env->hflags & HF_CPL_MASK;
3107 rpl = new_cs & 3;
3108 if (rpl < cpl)
3109 {
3110#if defined(VBOX) && defined(DEBUG)
3111 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3112#endif
3113 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3114 }
3115 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3116 if (e2 & DESC_C_MASK) {
3117 if (dpl > rpl)
3118 {
3119#if defined(VBOX) && defined(DEBUG)
3120 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3121#endif
3122 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3123 }
3124 } else {
3125 if (dpl != rpl)
3126 {
3127#if defined(VBOX) && defined(DEBUG)
3128 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3129#endif
3130 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3131 }
3132 }
3133 if (!(e2 & DESC_P_MASK))
3134 {
3135#if defined(VBOX) && defined(DEBUG)
3136 printf("DESC_P_MASK e2=%08x\n", e2);
3137#endif
3138 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3139 }
3140
3141 sp += addend;
3142 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3143 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3144 /* return to same privilege level */
3145 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3146 get_seg_base(e1, e2),
3147 get_seg_limit(e1, e2),
3148 e2);
3149 } else {
3150 /* return to different privilege level */
3151#ifdef TARGET_X86_64
3152 if (shift == 2) {
3153 POPQ(sp, new_esp);
3154 POPQ(sp, new_ss);
3155 new_ss &= 0xffff;
3156 } else
3157#endif
3158 if (shift == 1) {
3159 /* 32 bits */
3160 POPL(ssp, sp, sp_mask, new_esp);
3161 POPL(ssp, sp, sp_mask, new_ss);
3162 new_ss &= 0xffff;
3163 } else {
3164 /* 16 bits */
3165 POPW(ssp, sp, sp_mask, new_esp);
3166 POPW(ssp, sp, sp_mask, new_ss);
3167 }
3168 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3169 new_ss, new_esp);
3170 if ((new_ss & 0xfffc) == 0) {
3171#ifdef TARGET_X86_64
3172 /* NULL ss is allowed in long mode if cpl != 3*/
3173 /* XXX: test CS64 ? */
3174 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3175 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3176 0, 0xffffffff,
3177 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3178 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3179 DESC_W_MASK | DESC_A_MASK);
3180 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3181 } else
3182#endif
3183 {
3184 raise_exception_err(EXCP0D_GPF, 0);
3185 }
3186 } else {
3187 if ((new_ss & 3) != rpl)
3188 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3189 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3190 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3191 if (!(ss_e2 & DESC_S_MASK) ||
3192 (ss_e2 & DESC_CS_MASK) ||
3193 !(ss_e2 & DESC_W_MASK))
3194 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3195 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3196 if (dpl != rpl)
3197 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3198 if (!(ss_e2 & DESC_P_MASK))
3199 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3200 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3201 get_seg_base(ss_e1, ss_e2),
3202 get_seg_limit(ss_e1, ss_e2),
3203 ss_e2);
3204 }
3205
3206 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3207 get_seg_base(e1, e2),
3208 get_seg_limit(e1, e2),
3209 e2);
3210 cpu_x86_set_cpl(env, rpl);
3211 sp = new_esp;
3212#ifdef TARGET_X86_64
3213 if (env->hflags & HF_CS64_MASK)
3214 sp_mask = -1;
3215 else
3216#endif
3217 sp_mask = get_sp_mask(ss_e2);
3218
3219 /* validate data segments */
3220 validate_seg(R_ES, rpl);
3221 validate_seg(R_DS, rpl);
3222 validate_seg(R_FS, rpl);
3223 validate_seg(R_GS, rpl);
3224
3225 sp += addend;
3226 }
3227 SET_ESP(sp, sp_mask);
3228 env->eip = new_eip;
3229 if (is_iret) {
3230 /* NOTE: 'cpl' is the _old_ CPL */
3231 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3232 if (cpl == 0)
3233#ifdef VBOX
3234 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3235#else
3236 eflags_mask |= IOPL_MASK;
3237#endif
3238 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3239 if (cpl <= iopl)
3240 eflags_mask |= IF_MASK;
3241 if (shift == 0)
3242 eflags_mask &= 0xffff;
3243 load_eflags(new_eflags, eflags_mask);
3244 }
3245 return;
3246
3247 return_to_vm86:
3248 POPL(ssp, sp, sp_mask, new_esp);
3249 POPL(ssp, sp, sp_mask, new_ss);
3250 POPL(ssp, sp, sp_mask, new_es);
3251 POPL(ssp, sp, sp_mask, new_ds);
3252 POPL(ssp, sp, sp_mask, new_fs);
3253 POPL(ssp, sp, sp_mask, new_gs);
3254
3255 /* modify processor state */
3256 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3257 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3258 load_seg_vm(R_CS, new_cs & 0xffff);
3259 cpu_x86_set_cpl(env, 3);
3260 load_seg_vm(R_SS, new_ss & 0xffff);
3261 load_seg_vm(R_ES, new_es & 0xffff);
3262 load_seg_vm(R_DS, new_ds & 0xffff);
3263 load_seg_vm(R_FS, new_fs & 0xffff);
3264 load_seg_vm(R_GS, new_gs & 0xffff);
3265
3266 env->eip = new_eip & 0xffff;
3267 ESP = new_esp;
3268}
3269
3270void helper_iret_protected(int shift, int next_eip)
3271{
3272 int tss_selector, type;
3273 uint32_t e1, e2;
3274
3275#ifdef VBOX
3276 e1 = e2 = 0; /** @todo Why do we do this? */
3277 remR3TrapClear(env->pVM);
3278#endif
3279
3280 /* specific case for TSS */
3281 if (env->eflags & NT_MASK) {
3282#ifdef TARGET_X86_64
3283 if (env->hflags & HF_LMA_MASK)
3284 raise_exception_err(EXCP0D_GPF, 0);
3285#endif
3286 tss_selector = lduw_kernel(env->tr.base + 0);
3287 if (tss_selector & 4)
3288 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3289 if (load_segment(&e1, &e2, tss_selector) != 0)
3290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3291 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3292 /* NOTE: we check both segment and busy TSS */
3293 if (type != 3)
3294 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3295 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3296 } else {
3297 helper_ret_protected(shift, 1, 0);
3298 }
3299 env->hflags2 &= ~HF2_NMI_MASK;
3300#ifdef USE_KQEMU
3301 if (kqemu_is_ok(env)) {
3302 CC_OP = CC_OP_EFLAGS;
3303 env->exception_index = -1;
3304 cpu_loop_exit();
3305 }
3306#endif
3307}
3308
3309void helper_lret_protected(int shift, int addend)
3310{
3311 helper_ret_protected(shift, 0, addend);
3312#ifdef USE_KQEMU
3313 if (kqemu_is_ok(env)) {
3314 env->exception_index = -1;
3315 cpu_loop_exit();
3316 }
3317#endif
3318}
3319
3320void helper_sysenter(void)
3321{
3322 if (env->sysenter_cs == 0) {
3323 raise_exception_err(EXCP0D_GPF, 0);
3324 }
3325 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3326 cpu_x86_set_cpl(env, 0);
3327
3328#ifdef TARGET_X86_64
3329 if (env->hflags & HF_LMA_MASK) {
3330 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3331 0, 0xffffffff,
3332 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3333 DESC_S_MASK |
3334 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3335 } else
3336#endif
3337 {
3338 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3339 0, 0xffffffff,
3340 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3341 DESC_S_MASK |
3342 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3343 }
3344 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3345 0, 0xffffffff,
3346 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3347 DESC_S_MASK |
3348 DESC_W_MASK | DESC_A_MASK);
3349 ESP = env->sysenter_esp;
3350 EIP = env->sysenter_eip;
3351}
3352
3353void helper_sysexit(int dflag)
3354{
3355 int cpl;
3356
3357 cpl = env->hflags & HF_CPL_MASK;
3358 if (env->sysenter_cs == 0 || cpl != 0) {
3359 raise_exception_err(EXCP0D_GPF, 0);
3360 }
3361 cpu_x86_set_cpl(env, 3);
3362#ifdef TARGET_X86_64
3363 if (dflag == 2) {
3364 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3365 0, 0xffffffff,
3366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3367 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3369 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3370 0, 0xffffffff,
3371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3372 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3373 DESC_W_MASK | DESC_A_MASK);
3374 } else
3375#endif
3376 {
3377 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3378 0, 0xffffffff,
3379 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3380 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3381 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3382 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3383 0, 0xffffffff,
3384 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3385 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3386 DESC_W_MASK | DESC_A_MASK);
3387 }
3388 ESP = ECX;
3389 EIP = EDX;
3390#ifdef USE_KQEMU
3391 if (kqemu_is_ok(env)) {
3392 env->exception_index = -1;
3393 cpu_loop_exit();
3394 }
3395#endif
3396}
3397
3398#if defined(CONFIG_USER_ONLY)
3399target_ulong helper_read_crN(int reg)
3400{
3401 return 0;
3402}
3403
3404void helper_write_crN(int reg, target_ulong t0)
3405{
3406}
3407
3408void helper_movl_drN_T0(int reg, target_ulong t0)
3409{
3410}
3411#else
3412target_ulong helper_read_crN(int reg)
3413{
3414 target_ulong val;
3415
3416 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3417 switch(reg) {
3418 default:
3419 val = env->cr[reg];
3420 break;
3421 case 8:
3422 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3423 val = cpu_get_apic_tpr(env);
3424 } else {
3425 val = env->v_tpr;
3426 }
3427 break;
3428 }
3429 return val;
3430}
3431
3432void helper_write_crN(int reg, target_ulong t0)
3433{
3434 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3435 switch(reg) {
3436 case 0:
3437 cpu_x86_update_cr0(env, t0);
3438 break;
3439 case 3:
3440 cpu_x86_update_cr3(env, t0);
3441 break;
3442 case 4:
3443 cpu_x86_update_cr4(env, t0);
3444 break;
3445 case 8:
3446 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3447 cpu_set_apic_tpr(env, t0);
3448 }
3449 env->v_tpr = t0 & 0x0f;
3450 break;
3451 default:
3452 env->cr[reg] = t0;
3453 break;
3454 }
3455}
3456
3457void helper_movl_drN_T0(int reg, target_ulong t0)
3458{
3459 int i;
3460
3461 if (reg < 4) {
3462 hw_breakpoint_remove(env, reg);
3463 env->dr[reg] = t0;
3464 hw_breakpoint_insert(env, reg);
3465 } else if (reg == 7) {
3466 for (i = 0; i < 4; i++)
3467 hw_breakpoint_remove(env, i);
3468 env->dr[7] = t0;
3469 for (i = 0; i < 4; i++)
3470 hw_breakpoint_insert(env, i);
3471 } else
3472 env->dr[reg] = t0;
3473}
3474#endif
3475
3476void helper_lmsw(target_ulong t0)
3477{
3478 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3479 if already set to one. */
3480 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3481 helper_write_crN(0, t0);
3482}
3483
3484void helper_clts(void)
3485{
3486 env->cr[0] &= ~CR0_TS_MASK;
3487 env->hflags &= ~HF_TS_MASK;
3488}
3489
3490void helper_invlpg(target_ulong addr)
3491{
3492 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3493 tlb_flush_page(env, addr);
3494}
3495
3496void helper_rdtsc(void)
3497{
3498 uint64_t val;
3499
3500 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3501 raise_exception(EXCP0D_GPF);
3502 }
3503 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3504
3505 val = cpu_get_tsc(env) + env->tsc_offset;
3506 EAX = (uint32_t)(val);
3507 EDX = (uint32_t)(val >> 32);
3508}
3509
3510#ifdef VBOX
3511void helper_rdtscp(void)
3512{
3513 uint64_t val;
3514 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3515 raise_exception(EXCP0D_GPF);
3516 }
3517
3518 val = cpu_get_tsc(env);
3519 EAX = (uint32_t)(val);
3520 EDX = (uint32_t)(val >> 32);
3521 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3522 ECX = (uint32_t)(val);
3523 else
3524 ECX = 0;
3525}
3526#endif /* VBOX */
3527
3528void helper_rdpmc(void)
3529{
3530#ifdef VBOX
3531 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3532 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3533 raise_exception(EXCP0D_GPF);
3534 }
3535 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3536 EAX = 0;
3537 EDX = 0;
3538#else /* !VBOX */
3539 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3540 raise_exception(EXCP0D_GPF);
3541 }
3542 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3543
3544 /* currently unimplemented */
3545 raise_exception_err(EXCP06_ILLOP, 0);
3546#endif /* !VBOX */
3547}
3548
3549#if defined(CONFIG_USER_ONLY)
3550void helper_wrmsr(void)
3551{
3552}
3553
3554void helper_rdmsr(void)
3555{
3556}
3557#else
3558void helper_wrmsr(void)
3559{
3560 uint64_t val;
3561
3562 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3563
3564 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3565
3566 switch((uint32_t)ECX) {
3567 case MSR_IA32_SYSENTER_CS:
3568 env->sysenter_cs = val & 0xffff;
3569 break;
3570 case MSR_IA32_SYSENTER_ESP:
3571 env->sysenter_esp = val;
3572 break;
3573 case MSR_IA32_SYSENTER_EIP:
3574 env->sysenter_eip = val;
3575 break;
3576 case MSR_IA32_APICBASE:
3577# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3578 cpu_set_apic_base(env, val);
3579# endif
3580 break;
3581 case MSR_EFER:
3582 {
3583 uint64_t update_mask;
3584 update_mask = 0;
3585 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3586 update_mask |= MSR_EFER_SCE;
3587 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3588 update_mask |= MSR_EFER_LME;
3589 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3590 update_mask |= MSR_EFER_FFXSR;
3591 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3592 update_mask |= MSR_EFER_NXE;
3593 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3594 update_mask |= MSR_EFER_SVME;
3595 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3596 update_mask |= MSR_EFER_FFXSR;
3597 cpu_load_efer(env, (env->efer & ~update_mask) |
3598 (val & update_mask));
3599 }
3600 break;
3601 case MSR_STAR:
3602 env->star = val;
3603 break;
3604 case MSR_PAT:
3605 env->pat = val;
3606 break;
3607 case MSR_VM_HSAVE_PA:
3608 env->vm_hsave = val;
3609 break;
3610#ifdef TARGET_X86_64
3611 case MSR_LSTAR:
3612 env->lstar = val;
3613 break;
3614 case MSR_CSTAR:
3615 env->cstar = val;
3616 break;
3617 case MSR_FMASK:
3618 env->fmask = val;
3619 break;
3620 case MSR_FSBASE:
3621 env->segs[R_FS].base = val;
3622 break;
3623 case MSR_GSBASE:
3624 env->segs[R_GS].base = val;
3625 break;
3626 case MSR_KERNELGSBASE:
3627 env->kernelgsbase = val;
3628 break;
3629#endif
3630# ifndef VBOX
3631 case MSR_MTRRphysBase(0):
3632 case MSR_MTRRphysBase(1):
3633 case MSR_MTRRphysBase(2):
3634 case MSR_MTRRphysBase(3):
3635 case MSR_MTRRphysBase(4):
3636 case MSR_MTRRphysBase(5):
3637 case MSR_MTRRphysBase(6):
3638 case MSR_MTRRphysBase(7):
3639 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3640 break;
3641 case MSR_MTRRphysMask(0):
3642 case MSR_MTRRphysMask(1):
3643 case MSR_MTRRphysMask(2):
3644 case MSR_MTRRphysMask(3):
3645 case MSR_MTRRphysMask(4):
3646 case MSR_MTRRphysMask(5):
3647 case MSR_MTRRphysMask(6):
3648 case MSR_MTRRphysMask(7):
3649 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3650 break;
3651 case MSR_MTRRfix64K_00000:
3652 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3653 break;
3654 case MSR_MTRRfix16K_80000:
3655 case MSR_MTRRfix16K_A0000:
3656 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3657 break;
3658 case MSR_MTRRfix4K_C0000:
3659 case MSR_MTRRfix4K_C8000:
3660 case MSR_MTRRfix4K_D0000:
3661 case MSR_MTRRfix4K_D8000:
3662 case MSR_MTRRfix4K_E0000:
3663 case MSR_MTRRfix4K_E8000:
3664 case MSR_MTRRfix4K_F0000:
3665 case MSR_MTRRfix4K_F8000:
3666 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3667 break;
3668 case MSR_MTRRdefType:
3669 env->mtrr_deftype = val;
3670 break;
3671# endif /* !VBOX */
3672 default:
3673# ifndef VBOX
3674 /* XXX: exception ? */
3675# endif
3676 break;
3677 }
3678
3679# ifdef VBOX
3680 /* call CPUM. */
3681 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3682 {
3683 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3684 }
3685# endif
3686}
3687
3688void helper_rdmsr(void)
3689{
3690 uint64_t val;
3691
3692 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3693
3694 switch((uint32_t)ECX) {
3695 case MSR_IA32_SYSENTER_CS:
3696 val = env->sysenter_cs;
3697 break;
3698 case MSR_IA32_SYSENTER_ESP:
3699 val = env->sysenter_esp;
3700 break;
3701 case MSR_IA32_SYSENTER_EIP:
3702 val = env->sysenter_eip;
3703 break;
3704 case MSR_IA32_APICBASE:
3705 val = cpu_get_apic_base(env);
3706 break;
3707 case MSR_EFER:
3708 val = env->efer;
3709 break;
3710 case MSR_STAR:
3711 val = env->star;
3712 break;
3713 case MSR_PAT:
3714 val = env->pat;
3715 break;
3716 case MSR_VM_HSAVE_PA:
3717 val = env->vm_hsave;
3718 break;
3719# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3720 case MSR_IA32_PERF_STATUS:
3721 /* tsc_increment_by_tick */
3722 val = 1000ULL;
3723 /* CPU multiplier */
3724 val |= (((uint64_t)4ULL) << 40);
3725 break;
3726# endif /* !VBOX */
3727#ifdef TARGET_X86_64
3728 case MSR_LSTAR:
3729 val = env->lstar;
3730 break;
3731 case MSR_CSTAR:
3732 val = env->cstar;
3733 break;
3734 case MSR_FMASK:
3735 val = env->fmask;
3736 break;
3737 case MSR_FSBASE:
3738 val = env->segs[R_FS].base;
3739 break;
3740 case MSR_GSBASE:
3741 val = env->segs[R_GS].base;
3742 break;
3743 case MSR_KERNELGSBASE:
3744 val = env->kernelgsbase;
3745 break;
3746#endif
3747#ifdef USE_KQEMU
3748 case MSR_QPI_COMMBASE:
3749 if (env->kqemu_enabled) {
3750 val = kqemu_comm_base;
3751 } else {
3752 val = 0;
3753 }
3754 break;
3755#endif
3756# ifndef VBOX
3757 case MSR_MTRRphysBase(0):
3758 case MSR_MTRRphysBase(1):
3759 case MSR_MTRRphysBase(2):
3760 case MSR_MTRRphysBase(3):
3761 case MSR_MTRRphysBase(4):
3762 case MSR_MTRRphysBase(5):
3763 case MSR_MTRRphysBase(6):
3764 case MSR_MTRRphysBase(7):
3765 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3766 break;
3767 case MSR_MTRRphysMask(0):
3768 case MSR_MTRRphysMask(1):
3769 case MSR_MTRRphysMask(2):
3770 case MSR_MTRRphysMask(3):
3771 case MSR_MTRRphysMask(4):
3772 case MSR_MTRRphysMask(5):
3773 case MSR_MTRRphysMask(6):
3774 case MSR_MTRRphysMask(7):
3775 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3776 break;
3777 case MSR_MTRRfix64K_00000:
3778 val = env->mtrr_fixed[0];
3779 break;
3780 case MSR_MTRRfix16K_80000:
3781 case MSR_MTRRfix16K_A0000:
3782 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3783 break;
3784 case MSR_MTRRfix4K_C0000:
3785 case MSR_MTRRfix4K_C8000:
3786 case MSR_MTRRfix4K_D0000:
3787 case MSR_MTRRfix4K_D8000:
3788 case MSR_MTRRfix4K_E0000:
3789 case MSR_MTRRfix4K_E8000:
3790 case MSR_MTRRfix4K_F0000:
3791 case MSR_MTRRfix4K_F8000:
3792 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3793 break;
3794 case MSR_MTRRdefType:
3795 val = env->mtrr_deftype;
3796 break;
3797 case MSR_MTRRcap:
3798 if (env->cpuid_features & CPUID_MTRR)
3799 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3800 else
3801 /* XXX: exception ? */
3802 val = 0;
3803 break;
3804# endif /* !VBOX */
3805 default:
3806# ifndef VBOX
3807 /* XXX: exception ? */
3808 val = 0;
3809# else /* VBOX */
3810 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3811 {
3812 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3813 val = 0;
3814 }
3815# endif /* VBOX */
3816 break;
3817 }
3818 EAX = (uint32_t)(val);
3819 EDX = (uint32_t)(val >> 32);
3820
3821# ifdef VBOX_STRICT
3822 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3823 val = 0;
3824 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3825# endif
3826}
3827#endif
3828
3829target_ulong helper_lsl(target_ulong selector1)
3830{
3831 unsigned int limit;
3832 uint32_t e1, e2, eflags, selector;
3833 int rpl, dpl, cpl, type;
3834
3835 selector = selector1 & 0xffff;
3836 eflags = helper_cc_compute_all(CC_OP);
3837 if ((selector & 0xfffc) == 0)
3838 goto fail;
3839 if (load_segment(&e1, &e2, selector) != 0)
3840 goto fail;
3841 rpl = selector & 3;
3842 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3843 cpl = env->hflags & HF_CPL_MASK;
3844 if (e2 & DESC_S_MASK) {
3845 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3846 /* conforming */
3847 } else {
3848 if (dpl < cpl || dpl < rpl)
3849 goto fail;
3850 }
3851 } else {
3852 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3853 switch(type) {
3854 case 1:
3855 case 2:
3856 case 3:
3857 case 9:
3858 case 11:
3859 break;
3860 default:
3861 goto fail;
3862 }
3863 if (dpl < cpl || dpl < rpl) {
3864 fail:
3865 CC_SRC = eflags & ~CC_Z;
3866 return 0;
3867 }
3868 }
3869 limit = get_seg_limit(e1, e2);
3870 CC_SRC = eflags | CC_Z;
3871 return limit;
3872}
3873
3874target_ulong helper_lar(target_ulong selector1)
3875{
3876 uint32_t e1, e2, eflags, selector;
3877 int rpl, dpl, cpl, type;
3878
3879 selector = selector1 & 0xffff;
3880 eflags = helper_cc_compute_all(CC_OP);
3881 if ((selector & 0xfffc) == 0)
3882 goto fail;
3883 if (load_segment(&e1, &e2, selector) != 0)
3884 goto fail;
3885 rpl = selector & 3;
3886 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3887 cpl = env->hflags & HF_CPL_MASK;
3888 if (e2 & DESC_S_MASK) {
3889 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3890 /* conforming */
3891 } else {
3892 if (dpl < cpl || dpl < rpl)
3893 goto fail;
3894 }
3895 } else {
3896 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3897 switch(type) {
3898 case 1:
3899 case 2:
3900 case 3:
3901 case 4:
3902 case 5:
3903 case 9:
3904 case 11:
3905 case 12:
3906 break;
3907 default:
3908 goto fail;
3909 }
3910 if (dpl < cpl || dpl < rpl) {
3911 fail:
3912 CC_SRC = eflags & ~CC_Z;
3913 return 0;
3914 }
3915 }
3916 CC_SRC = eflags | CC_Z;
3917 return e2 & 0x00f0ff00;
3918}
3919
3920void helper_verr(target_ulong selector1)
3921{
3922 uint32_t e1, e2, eflags, selector;
3923 int rpl, dpl, cpl;
3924
3925 selector = selector1 & 0xffff;
3926 eflags = helper_cc_compute_all(CC_OP);
3927 if ((selector & 0xfffc) == 0)
3928 goto fail;
3929 if (load_segment(&e1, &e2, selector) != 0)
3930 goto fail;
3931 if (!(e2 & DESC_S_MASK))
3932 goto fail;
3933 rpl = selector & 3;
3934 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3935 cpl = env->hflags & HF_CPL_MASK;
3936 if (e2 & DESC_CS_MASK) {
3937 if (!(e2 & DESC_R_MASK))
3938 goto fail;
3939 if (!(e2 & DESC_C_MASK)) {
3940 if (dpl < cpl || dpl < rpl)
3941 goto fail;
3942 }
3943 } else {
3944 if (dpl < cpl || dpl < rpl) {
3945 fail:
3946 CC_SRC = eflags & ~CC_Z;
3947 return;
3948 }
3949 }
3950 CC_SRC = eflags | CC_Z;
3951}
3952
3953void helper_verw(target_ulong selector1)
3954{
3955 uint32_t e1, e2, eflags, selector;
3956 int rpl, dpl, cpl;
3957
3958 selector = selector1 & 0xffff;
3959 eflags = helper_cc_compute_all(CC_OP);
3960 if ((selector & 0xfffc) == 0)
3961 goto fail;
3962 if (load_segment(&e1, &e2, selector) != 0)
3963 goto fail;
3964 if (!(e2 & DESC_S_MASK))
3965 goto fail;
3966 rpl = selector & 3;
3967 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3968 cpl = env->hflags & HF_CPL_MASK;
3969 if (e2 & DESC_CS_MASK) {
3970 goto fail;
3971 } else {
3972 if (dpl < cpl || dpl < rpl)
3973 goto fail;
3974 if (!(e2 & DESC_W_MASK)) {
3975 fail:
3976 CC_SRC = eflags & ~CC_Z;
3977 return;
3978 }
3979 }
3980 CC_SRC = eflags | CC_Z;
3981}
3982
3983/* x87 FPU helpers */
3984
3985static void fpu_set_exception(int mask)
3986{
3987 env->fpus |= mask;
3988 if (env->fpus & (~env->fpuc & FPUC_EM))
3989 env->fpus |= FPUS_SE | FPUS_B;
3990}
3991
3992static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3993{
3994 if (b == 0.0)
3995 fpu_set_exception(FPUS_ZE);
3996 return a / b;
3997}
3998
3999static void fpu_raise_exception(void)
4000{
4001 if (env->cr[0] & CR0_NE_MASK) {
4002 raise_exception(EXCP10_COPR);
4003 }
4004#if !defined(CONFIG_USER_ONLY)
4005 else {
4006 cpu_set_ferr(env);
4007 }
4008#endif
4009}
4010
4011void helper_flds_FT0(uint32_t val)
4012{
4013 union {
4014 float32 f;
4015 uint32_t i;
4016 } u;
4017 u.i = val;
4018 FT0 = float32_to_floatx(u.f, &env->fp_status);
4019}
4020
4021void helper_fldl_FT0(uint64_t val)
4022{
4023 union {
4024 float64 f;
4025 uint64_t i;
4026 } u;
4027 u.i = val;
4028 FT0 = float64_to_floatx(u.f, &env->fp_status);
4029}
4030
4031void helper_fildl_FT0(int32_t val)
4032{
4033 FT0 = int32_to_floatx(val, &env->fp_status);
4034}
4035
4036void helper_flds_ST0(uint32_t val)
4037{
4038 int new_fpstt;
4039 union {
4040 float32 f;
4041 uint32_t i;
4042 } u;
4043 new_fpstt = (env->fpstt - 1) & 7;
4044 u.i = val;
4045 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4046 env->fpstt = new_fpstt;
4047 env->fptags[new_fpstt] = 0; /* validate stack entry */
4048}
4049
4050void helper_fldl_ST0(uint64_t val)
4051{
4052 int new_fpstt;
4053 union {
4054 float64 f;
4055 uint64_t i;
4056 } u;
4057 new_fpstt = (env->fpstt - 1) & 7;
4058 u.i = val;
4059 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4060 env->fpstt = new_fpstt;
4061 env->fptags[new_fpstt] = 0; /* validate stack entry */
4062}
4063
4064void helper_fildl_ST0(int32_t val)
4065{
4066 int new_fpstt;
4067 new_fpstt = (env->fpstt - 1) & 7;
4068 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4069 env->fpstt = new_fpstt;
4070 env->fptags[new_fpstt] = 0; /* validate stack entry */
4071}
4072
4073void helper_fildll_ST0(int64_t val)
4074{
4075 int new_fpstt;
4076 new_fpstt = (env->fpstt - 1) & 7;
4077 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4078 env->fpstt = new_fpstt;
4079 env->fptags[new_fpstt] = 0; /* validate stack entry */
4080}
4081
4082#ifndef VBOX
4083uint32_t helper_fsts_ST0(void)
4084#else
4085RTCCUINTREG helper_fsts_ST0(void)
4086#endif
4087{
4088 union {
4089 float32 f;
4090 uint32_t i;
4091 } u;
4092 u.f = floatx_to_float32(ST0, &env->fp_status);
4093 return u.i;
4094}
4095
4096uint64_t helper_fstl_ST0(void)
4097{
4098 union {
4099 float64 f;
4100 uint64_t i;
4101 } u;
4102 u.f = floatx_to_float64(ST0, &env->fp_status);
4103 return u.i;
4104}
4105
4106#ifndef VBOX
4107int32_t helper_fist_ST0(void)
4108#else
4109RTCCINTREG helper_fist_ST0(void)
4110#endif
4111{
4112 int32_t val;
4113 val = floatx_to_int32(ST0, &env->fp_status);
4114 if (val != (int16_t)val)
4115 val = -32768;
4116 return val;
4117}
4118
4119#ifndef VBOX
4120int32_t helper_fistl_ST0(void)
4121#else
4122RTCCINTREG helper_fistl_ST0(void)
4123#endif
4124{
4125 int32_t val;
4126 val = floatx_to_int32(ST0, &env->fp_status);
4127 return val;
4128}
4129
4130int64_t helper_fistll_ST0(void)
4131{
4132 int64_t val;
4133 val = floatx_to_int64(ST0, &env->fp_status);
4134 return val;
4135}
4136
4137#ifndef VBOX
4138int32_t helper_fistt_ST0(void)
4139#else
4140RTCCINTREG helper_fistt_ST0(void)
4141#endif
4142{
4143 int32_t val;
4144 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4145 if (val != (int16_t)val)
4146 val = -32768;
4147 return val;
4148}
4149
4150#ifndef VBOX
4151int32_t helper_fisttl_ST0(void)
4152#else
4153RTCCINTREG helper_fisttl_ST0(void)
4154#endif
4155{
4156 int32_t val;
4157 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4158 return val;
4159}
4160
4161int64_t helper_fisttll_ST0(void)
4162{
4163 int64_t val;
4164 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4165 return val;
4166}
4167
4168void helper_fldt_ST0(target_ulong ptr)
4169{
4170 int new_fpstt;
4171 new_fpstt = (env->fpstt - 1) & 7;
4172 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4173 env->fpstt = new_fpstt;
4174 env->fptags[new_fpstt] = 0; /* validate stack entry */
4175}
4176
4177void helper_fstt_ST0(target_ulong ptr)
4178{
4179 helper_fstt(ST0, ptr);
4180}
4181
4182void helper_fpush(void)
4183{
4184 fpush();
4185}
4186
4187void helper_fpop(void)
4188{
4189 fpop();
4190}
4191
4192void helper_fdecstp(void)
4193{
4194 env->fpstt = (env->fpstt - 1) & 7;
4195 env->fpus &= (~0x4700);
4196}
4197
4198void helper_fincstp(void)
4199{
4200 env->fpstt = (env->fpstt + 1) & 7;
4201 env->fpus &= (~0x4700);
4202}
4203
4204/* FPU move */
4205
4206void helper_ffree_STN(int st_index)
4207{
4208 env->fptags[(env->fpstt + st_index) & 7] = 1;
4209}
4210
4211void helper_fmov_ST0_FT0(void)
4212{
4213 ST0 = FT0;
4214}
4215
4216void helper_fmov_FT0_STN(int st_index)
4217{
4218 FT0 = ST(st_index);
4219}
4220
4221void helper_fmov_ST0_STN(int st_index)
4222{
4223 ST0 = ST(st_index);
4224}
4225
4226void helper_fmov_STN_ST0(int st_index)
4227{
4228 ST(st_index) = ST0;
4229}
4230
4231void helper_fxchg_ST0_STN(int st_index)
4232{
4233 CPU86_LDouble tmp;
4234 tmp = ST(st_index);
4235 ST(st_index) = ST0;
4236 ST0 = tmp;
4237}
4238
4239/* FPU operations */
4240
4241static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4242
4243void helper_fcom_ST0_FT0(void)
4244{
4245 int ret;
4246
4247 ret = floatx_compare(ST0, FT0, &env->fp_status);
4248 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4249}
4250
4251void helper_fucom_ST0_FT0(void)
4252{
4253 int ret;
4254
4255 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4256 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4257}
4258
4259static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4260
4261void helper_fcomi_ST0_FT0(void)
4262{
4263 int eflags;
4264 int ret;
4265
4266 ret = floatx_compare(ST0, FT0, &env->fp_status);
4267 eflags = helper_cc_compute_all(CC_OP);
4268 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4269 CC_SRC = eflags;
4270}
4271
4272void helper_fucomi_ST0_FT0(void)
4273{
4274 int eflags;
4275 int ret;
4276
4277 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4278 eflags = helper_cc_compute_all(CC_OP);
4279 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4280 CC_SRC = eflags;
4281}
4282
4283void helper_fadd_ST0_FT0(void)
4284{
4285 ST0 += FT0;
4286}
4287
4288void helper_fmul_ST0_FT0(void)
4289{
4290 ST0 *= FT0;
4291}
4292
4293void helper_fsub_ST0_FT0(void)
4294{
4295 ST0 -= FT0;
4296}
4297
4298void helper_fsubr_ST0_FT0(void)
4299{
4300 ST0 = FT0 - ST0;
4301}
4302
4303void helper_fdiv_ST0_FT0(void)
4304{
4305 ST0 = helper_fdiv(ST0, FT0);
4306}
4307
4308void helper_fdivr_ST0_FT0(void)
4309{
4310 ST0 = helper_fdiv(FT0, ST0);
4311}
4312
4313/* fp operations between STN and ST0 */
4314
4315void helper_fadd_STN_ST0(int st_index)
4316{
4317 ST(st_index) += ST0;
4318}
4319
4320void helper_fmul_STN_ST0(int st_index)
4321{
4322 ST(st_index) *= ST0;
4323}
4324
4325void helper_fsub_STN_ST0(int st_index)
4326{
4327 ST(st_index) -= ST0;
4328}
4329
4330void helper_fsubr_STN_ST0(int st_index)
4331{
4332 CPU86_LDouble *p;
4333 p = &ST(st_index);
4334 *p = ST0 - *p;
4335}
4336
4337void helper_fdiv_STN_ST0(int st_index)
4338{
4339 CPU86_LDouble *p;
4340 p = &ST(st_index);
4341 *p = helper_fdiv(*p, ST0);
4342}
4343
4344void helper_fdivr_STN_ST0(int st_index)
4345{
4346 CPU86_LDouble *p;
4347 p = &ST(st_index);
4348 *p = helper_fdiv(ST0, *p);
4349}
4350
4351/* misc FPU operations */
4352void helper_fchs_ST0(void)
4353{
4354 ST0 = floatx_chs(ST0);
4355}
4356
4357void helper_fabs_ST0(void)
4358{
4359 ST0 = floatx_abs(ST0);
4360}
4361
4362void helper_fld1_ST0(void)
4363{
4364 ST0 = f15rk[1];
4365}
4366
4367void helper_fldl2t_ST0(void)
4368{
4369 ST0 = f15rk[6];
4370}
4371
4372void helper_fldl2e_ST0(void)
4373{
4374 ST0 = f15rk[5];
4375}
4376
4377void helper_fldpi_ST0(void)
4378{
4379 ST0 = f15rk[2];
4380}
4381
4382void helper_fldlg2_ST0(void)
4383{
4384 ST0 = f15rk[3];
4385}
4386
4387void helper_fldln2_ST0(void)
4388{
4389 ST0 = f15rk[4];
4390}
4391
4392void helper_fldz_ST0(void)
4393{
4394 ST0 = f15rk[0];
4395}
4396
4397void helper_fldz_FT0(void)
4398{
4399 FT0 = f15rk[0];
4400}
4401
4402#ifndef VBOX
4403uint32_t helper_fnstsw(void)
4404#else
4405RTCCUINTREG helper_fnstsw(void)
4406#endif
4407{
4408 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4409}
4410
4411#ifndef VBOX
4412uint32_t helper_fnstcw(void)
4413#else
4414RTCCUINTREG helper_fnstcw(void)
4415#endif
4416{
4417 return env->fpuc;
4418}
4419
4420static void update_fp_status(void)
4421{
4422 int rnd_type;
4423
4424 /* set rounding mode */
4425 switch(env->fpuc & RC_MASK) {
4426 default:
4427 case RC_NEAR:
4428 rnd_type = float_round_nearest_even;
4429 break;
4430 case RC_DOWN:
4431 rnd_type = float_round_down;
4432 break;
4433 case RC_UP:
4434 rnd_type = float_round_up;
4435 break;
4436 case RC_CHOP:
4437 rnd_type = float_round_to_zero;
4438 break;
4439 }
4440 set_float_rounding_mode(rnd_type, &env->fp_status);
4441#ifdef FLOATX80
4442 switch((env->fpuc >> 8) & 3) {
4443 case 0:
4444 rnd_type = 32;
4445 break;
4446 case 2:
4447 rnd_type = 64;
4448 break;
4449 case 3:
4450 default:
4451 rnd_type = 80;
4452 break;
4453 }
4454 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4455#endif
4456}
4457
4458void helper_fldcw(uint32_t val)
4459{
4460 env->fpuc = val;
4461 update_fp_status();
4462}
4463
4464void helper_fclex(void)
4465{
4466 env->fpus &= 0x7f00;
4467}
4468
4469void helper_fwait(void)
4470{
4471 if (env->fpus & FPUS_SE)
4472 fpu_raise_exception();
4473}
4474
4475void helper_fninit(void)
4476{
4477 env->fpus = 0;
4478 env->fpstt = 0;
4479 env->fpuc = 0x37f;
4480 env->fptags[0] = 1;
4481 env->fptags[1] = 1;
4482 env->fptags[2] = 1;
4483 env->fptags[3] = 1;
4484 env->fptags[4] = 1;
4485 env->fptags[5] = 1;
4486 env->fptags[6] = 1;
4487 env->fptags[7] = 1;
4488}
4489
4490/* BCD ops */
4491
4492void helper_fbld_ST0(target_ulong ptr)
4493{
4494 CPU86_LDouble tmp;
4495 uint64_t val;
4496 unsigned int v;
4497 int i;
4498
4499 val = 0;
4500 for(i = 8; i >= 0; i--) {
4501 v = ldub(ptr + i);
4502 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4503 }
4504 tmp = val;
4505 if (ldub(ptr + 9) & 0x80)
4506 tmp = -tmp;
4507 fpush();
4508 ST0 = tmp;
4509}
4510
4511void helper_fbst_ST0(target_ulong ptr)
4512{
4513 int v;
4514 target_ulong mem_ref, mem_end;
4515 int64_t val;
4516
4517 val = floatx_to_int64(ST0, &env->fp_status);
4518 mem_ref = ptr;
4519 mem_end = mem_ref + 9;
4520 if (val < 0) {
4521 stb(mem_end, 0x80);
4522 val = -val;
4523 } else {
4524 stb(mem_end, 0x00);
4525 }
4526 while (mem_ref < mem_end) {
4527 if (val == 0)
4528 break;
4529 v = val % 100;
4530 val = val / 100;
4531 v = ((v / 10) << 4) | (v % 10);
4532 stb(mem_ref++, v);
4533 }
4534 while (mem_ref < mem_end) {
4535 stb(mem_ref++, 0);
4536 }
4537}
4538
4539void helper_f2xm1(void)
4540{
4541 ST0 = pow(2.0,ST0) - 1.0;
4542}
4543
4544void helper_fyl2x(void)
4545{
4546 CPU86_LDouble fptemp;
4547
4548 fptemp = ST0;
4549 if (fptemp>0.0){
4550 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4551 ST1 *= fptemp;
4552 fpop();
4553 } else {
4554 env->fpus &= (~0x4700);
4555 env->fpus |= 0x400;
4556 }
4557}
4558
4559void helper_fptan(void)
4560{
4561 CPU86_LDouble fptemp;
4562
4563 fptemp = ST0;
4564 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4565 env->fpus |= 0x400;
4566 } else {
4567 ST0 = tan(fptemp);
4568 fpush();
4569 ST0 = 1.0;
4570 env->fpus &= (~0x400); /* C2 <-- 0 */
4571 /* the above code is for |arg| < 2**52 only */
4572 }
4573}
4574
4575void helper_fpatan(void)
4576{
4577 CPU86_LDouble fptemp, fpsrcop;
4578
4579 fpsrcop = ST1;
4580 fptemp = ST0;
4581 ST1 = atan2(fpsrcop,fptemp);
4582 fpop();
4583}
4584
4585void helper_fxtract(void)
4586{
4587 CPU86_LDoubleU temp;
4588 unsigned int expdif;
4589
4590 temp.d = ST0;
4591 expdif = EXPD(temp) - EXPBIAS;
4592 /*DP exponent bias*/
4593 ST0 = expdif;
4594 fpush();
4595 BIASEXPONENT(temp);
4596 ST0 = temp.d;
4597}
4598
4599void helper_fprem1(void)
4600{
4601 CPU86_LDouble dblq, fpsrcop, fptemp;
4602 CPU86_LDoubleU fpsrcop1, fptemp1;
4603 int expdif;
4604 signed long long int q;
4605
4606#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4607 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4608#else
4609 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4610#endif
4611 ST0 = 0.0 / 0.0; /* NaN */
4612 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4613 return;
4614 }
4615
4616 fpsrcop = ST0;
4617 fptemp = ST1;
4618 fpsrcop1.d = fpsrcop;
4619 fptemp1.d = fptemp;
4620 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4621
4622 if (expdif < 0) {
4623 /* optimisation? taken from the AMD docs */
4624 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4625 /* ST0 is unchanged */
4626 return;
4627 }
4628
4629 if (expdif < 53) {
4630 dblq = fpsrcop / fptemp;
4631 /* round dblq towards nearest integer */
4632 dblq = rint(dblq);
4633 ST0 = fpsrcop - fptemp * dblq;
4634
4635 /* convert dblq to q by truncating towards zero */
4636 if (dblq < 0.0)
4637 q = (signed long long int)(-dblq);
4638 else
4639 q = (signed long long int)dblq;
4640
4641 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4642 /* (C0,C3,C1) <-- (q2,q1,q0) */
4643 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4644 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4645 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4646 } else {
4647 env->fpus |= 0x400; /* C2 <-- 1 */
4648 fptemp = pow(2.0, expdif - 50);
4649 fpsrcop = (ST0 / ST1) / fptemp;
4650 /* fpsrcop = integer obtained by chopping */
4651 fpsrcop = (fpsrcop < 0.0) ?
4652 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4653 ST0 -= (ST1 * fpsrcop * fptemp);
4654 }
4655}
4656
4657void helper_fprem(void)
4658{
4659 CPU86_LDouble dblq, fpsrcop, fptemp;
4660 CPU86_LDoubleU fpsrcop1, fptemp1;
4661 int expdif;
4662 signed long long int q;
4663
4664#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4665 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4666#else
4667 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4668#endif
4669 ST0 = 0.0 / 0.0; /* NaN */
4670 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4671 return;
4672 }
4673
4674 fpsrcop = (CPU86_LDouble)ST0;
4675 fptemp = (CPU86_LDouble)ST1;
4676 fpsrcop1.d = fpsrcop;
4677 fptemp1.d = fptemp;
4678 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4679
4680 if (expdif < 0) {
4681 /* optimisation? taken from the AMD docs */
4682 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4683 /* ST0 is unchanged */
4684 return;
4685 }
4686
4687 if ( expdif < 53 ) {
4688 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4689 /* round dblq towards zero */
4690 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4691 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4692
4693 /* convert dblq to q by truncating towards zero */
4694 if (dblq < 0.0)
4695 q = (signed long long int)(-dblq);
4696 else
4697 q = (signed long long int)dblq;
4698
4699 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4700 /* (C0,C3,C1) <-- (q2,q1,q0) */
4701 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4702 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4703 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4704 } else {
4705 int N = 32 + (expdif % 32); /* as per AMD docs */
4706 env->fpus |= 0x400; /* C2 <-- 1 */
4707 fptemp = pow(2.0, (double)(expdif - N));
4708 fpsrcop = (ST0 / ST1) / fptemp;
4709 /* fpsrcop = integer obtained by chopping */
4710 fpsrcop = (fpsrcop < 0.0) ?
4711 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4712 ST0 -= (ST1 * fpsrcop * fptemp);
4713 }
4714}
4715
4716void helper_fyl2xp1(void)
4717{
4718 CPU86_LDouble fptemp;
4719
4720 fptemp = ST0;
4721 if ((fptemp+1.0)>0.0) {
4722 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4723 ST1 *= fptemp;
4724 fpop();
4725 } else {
4726 env->fpus &= (~0x4700);
4727 env->fpus |= 0x400;
4728 }
4729}
4730
4731void helper_fsqrt(void)
4732{
4733 CPU86_LDouble fptemp;
4734
4735 fptemp = ST0;
4736 if (fptemp<0.0) {
4737 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4738 env->fpus |= 0x400;
4739 }
4740 ST0 = sqrt(fptemp);
4741}
4742
4743void helper_fsincos(void)
4744{
4745 CPU86_LDouble fptemp;
4746
4747 fptemp = ST0;
4748 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4749 env->fpus |= 0x400;
4750 } else {
4751 ST0 = sin(fptemp);
4752 fpush();
4753 ST0 = cos(fptemp);
4754 env->fpus &= (~0x400); /* C2 <-- 0 */
4755 /* the above code is for |arg| < 2**63 only */
4756 }
4757}
4758
4759void helper_frndint(void)
4760{
4761 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4762}
4763
4764void helper_fscale(void)
4765{
4766 ST0 = ldexp (ST0, (int)(ST1));
4767}
4768
4769void helper_fsin(void)
4770{
4771 CPU86_LDouble fptemp;
4772
4773 fptemp = ST0;
4774 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4775 env->fpus |= 0x400;
4776 } else {
4777 ST0 = sin(fptemp);
4778 env->fpus &= (~0x400); /* C2 <-- 0 */
4779 /* the above code is for |arg| < 2**53 only */
4780 }
4781}
4782
4783void helper_fcos(void)
4784{
4785 CPU86_LDouble fptemp;
4786
4787 fptemp = ST0;
4788 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4789 env->fpus |= 0x400;
4790 } else {
4791 ST0 = cos(fptemp);
4792 env->fpus &= (~0x400); /* C2 <-- 0 */
4793 /* the above code is for |arg5 < 2**63 only */
4794 }
4795}
4796
4797void helper_fxam_ST0(void)
4798{
4799 CPU86_LDoubleU temp;
4800 int expdif;
4801
4802 temp.d = ST0;
4803
4804 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4805 if (SIGND(temp))
4806 env->fpus |= 0x200; /* C1 <-- 1 */
4807
4808 /* XXX: test fptags too */
4809 expdif = EXPD(temp);
4810 if (expdif == MAXEXPD) {
4811#ifdef USE_X86LDOUBLE
4812 if (MANTD(temp) == 0x8000000000000000ULL)
4813#else
4814 if (MANTD(temp) == 0)
4815#endif
4816 env->fpus |= 0x500 /*Infinity*/;
4817 else
4818 env->fpus |= 0x100 /*NaN*/;
4819 } else if (expdif == 0) {
4820 if (MANTD(temp) == 0)
4821 env->fpus |= 0x4000 /*Zero*/;
4822 else
4823 env->fpus |= 0x4400 /*Denormal*/;
4824 } else {
4825 env->fpus |= 0x400;
4826 }
4827}
4828
4829void helper_fstenv(target_ulong ptr, int data32)
4830{
4831 int fpus, fptag, exp, i;
4832 uint64_t mant;
4833 CPU86_LDoubleU tmp;
4834
4835 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4836 fptag = 0;
4837 for (i=7; i>=0; i--) {
4838 fptag <<= 2;
4839 if (env->fptags[i]) {
4840 fptag |= 3;
4841 } else {
4842 tmp.d = env->fpregs[i].d;
4843 exp = EXPD(tmp);
4844 mant = MANTD(tmp);
4845 if (exp == 0 && mant == 0) {
4846 /* zero */
4847 fptag |= 1;
4848 } else if (exp == 0 || exp == MAXEXPD
4849#ifdef USE_X86LDOUBLE
4850 || (mant & (1LL << 63)) == 0
4851#endif
4852 ) {
4853 /* NaNs, infinity, denormal */
4854 fptag |= 2;
4855 }
4856 }
4857 }
4858 if (data32) {
4859 /* 32 bit */
4860 stl(ptr, env->fpuc);
4861 stl(ptr + 4, fpus);
4862 stl(ptr + 8, fptag);
4863 stl(ptr + 12, 0); /* fpip */
4864 stl(ptr + 16, 0); /* fpcs */
4865 stl(ptr + 20, 0); /* fpoo */
4866 stl(ptr + 24, 0); /* fpos */
4867 } else {
4868 /* 16 bit */
4869 stw(ptr, env->fpuc);
4870 stw(ptr + 2, fpus);
4871 stw(ptr + 4, fptag);
4872 stw(ptr + 6, 0);
4873 stw(ptr + 8, 0);
4874 stw(ptr + 10, 0);
4875 stw(ptr + 12, 0);
4876 }
4877}
4878
4879void helper_fldenv(target_ulong ptr, int data32)
4880{
4881 int i, fpus, fptag;
4882
4883 if (data32) {
4884 env->fpuc = lduw(ptr);
4885 fpus = lduw(ptr + 4);
4886 fptag = lduw(ptr + 8);
4887 }
4888 else {
4889 env->fpuc = lduw(ptr);
4890 fpus = lduw(ptr + 2);
4891 fptag = lduw(ptr + 4);
4892 }
4893 env->fpstt = (fpus >> 11) & 7;
4894 env->fpus = fpus & ~0x3800;
4895 for(i = 0;i < 8; i++) {
4896 env->fptags[i] = ((fptag & 3) == 3);
4897 fptag >>= 2;
4898 }
4899}
4900
4901void helper_fsave(target_ulong ptr, int data32)
4902{
4903 CPU86_LDouble tmp;
4904 int i;
4905
4906 helper_fstenv(ptr, data32);
4907
4908 ptr += (14 << data32);
4909 for(i = 0;i < 8; i++) {
4910 tmp = ST(i);
4911 helper_fstt(tmp, ptr);
4912 ptr += 10;
4913 }
4914
4915 /* fninit */
4916 env->fpus = 0;
4917 env->fpstt = 0;
4918 env->fpuc = 0x37f;
4919 env->fptags[0] = 1;
4920 env->fptags[1] = 1;
4921 env->fptags[2] = 1;
4922 env->fptags[3] = 1;
4923 env->fptags[4] = 1;
4924 env->fptags[5] = 1;
4925 env->fptags[6] = 1;
4926 env->fptags[7] = 1;
4927}
4928
4929void helper_frstor(target_ulong ptr, int data32)
4930{
4931 CPU86_LDouble tmp;
4932 int i;
4933
4934 helper_fldenv(ptr, data32);
4935 ptr += (14 << data32);
4936
4937 for(i = 0;i < 8; i++) {
4938 tmp = helper_fldt(ptr);
4939 ST(i) = tmp;
4940 ptr += 10;
4941 }
4942}
4943
4944void helper_fxsave(target_ulong ptr, int data64)
4945{
4946 int fpus, fptag, i, nb_xmm_regs;
4947 CPU86_LDouble tmp;
4948 target_ulong addr;
4949
4950 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4951 fptag = 0;
4952 for(i = 0; i < 8; i++) {
4953 fptag |= (env->fptags[i] << i);
4954 }
4955 stw(ptr, env->fpuc);
4956 stw(ptr + 2, fpus);
4957 stw(ptr + 4, fptag ^ 0xff);
4958#ifdef TARGET_X86_64
4959 if (data64) {
4960 stq(ptr + 0x08, 0); /* rip */
4961 stq(ptr + 0x10, 0); /* rdp */
4962 } else
4963#endif
4964 {
4965 stl(ptr + 0x08, 0); /* eip */
4966 stl(ptr + 0x0c, 0); /* sel */
4967 stl(ptr + 0x10, 0); /* dp */
4968 stl(ptr + 0x14, 0); /* sel */
4969 }
4970
4971 addr = ptr + 0x20;
4972 for(i = 0;i < 8; i++) {
4973 tmp = ST(i);
4974 helper_fstt(tmp, addr);
4975 addr += 16;
4976 }
4977
4978 if (env->cr[4] & CR4_OSFXSR_MASK) {
4979 /* XXX: finish it */
4980 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4981 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4982 if (env->hflags & HF_CS64_MASK)
4983 nb_xmm_regs = 16;
4984 else
4985 nb_xmm_regs = 8;
4986 addr = ptr + 0xa0;
4987 /* Fast FXSAVE leaves out the XMM registers */
4988 if (!(env->efer & MSR_EFER_FFXSR)
4989 || (env->hflags & HF_CPL_MASK)
4990 || !(env->hflags & HF_LMA_MASK)) {
4991 for(i = 0; i < nb_xmm_regs; i++) {
4992 stq(addr, env->xmm_regs[i].XMM_Q(0));
4993 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4994 addr += 16;
4995 }
4996 }
4997 }
4998}
4999
5000void helper_fxrstor(target_ulong ptr, int data64)
5001{
5002 int i, fpus, fptag, nb_xmm_regs;
5003 CPU86_LDouble tmp;
5004 target_ulong addr;
5005
5006 env->fpuc = lduw(ptr);
5007 fpus = lduw(ptr + 2);
5008 fptag = lduw(ptr + 4);
5009 env->fpstt = (fpus >> 11) & 7;
5010 env->fpus = fpus & ~0x3800;
5011 fptag ^= 0xff;
5012 for(i = 0;i < 8; i++) {
5013 env->fptags[i] = ((fptag >> i) & 1);
5014 }
5015
5016 addr = ptr + 0x20;
5017 for(i = 0;i < 8; i++) {
5018 tmp = helper_fldt(addr);
5019 ST(i) = tmp;
5020 addr += 16;
5021 }
5022
5023 if (env->cr[4] & CR4_OSFXSR_MASK) {
5024 /* XXX: finish it */
5025 env->mxcsr = ldl(ptr + 0x18);
5026 //ldl(ptr + 0x1c);
5027 if (env->hflags & HF_CS64_MASK)
5028 nb_xmm_regs = 16;
5029 else
5030 nb_xmm_regs = 8;
5031 addr = ptr + 0xa0;
5032 /* Fast FXRESTORE leaves out the XMM registers */
5033 if (!(env->efer & MSR_EFER_FFXSR)
5034 || (env->hflags & HF_CPL_MASK)
5035 || !(env->hflags & HF_LMA_MASK)) {
5036 for(i = 0; i < nb_xmm_regs; i++) {
5037#if !defined(VBOX) || __GNUC__ < 4
5038 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5039 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5040#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5041# if 1
5042 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5043 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5044 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5045 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5046# else
5047 /* this works fine on Mac OS X, gcc 4.0.1 */
5048 uint64_t u64 = ldq(addr);
5049 env->xmm_regs[i].XMM_Q(0);
5050 u64 = ldq(addr + 4);
5051 env->xmm_regs[i].XMM_Q(1) = u64;
5052# endif
5053#endif
5054 addr += 16;
5055 }
5056 }
5057 }
5058}
5059
5060#ifndef USE_X86LDOUBLE
5061
5062void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5063{
5064 CPU86_LDoubleU temp;
5065 int e;
5066
5067 temp.d = f;
5068 /* mantissa */
5069 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5070 /* exponent + sign */
5071 e = EXPD(temp) - EXPBIAS + 16383;
5072 e |= SIGND(temp) >> 16;
5073 *pexp = e;
5074}
5075
5076CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5077{
5078 CPU86_LDoubleU temp;
5079 int e;
5080 uint64_t ll;
5081
5082 /* XXX: handle overflow ? */
5083 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5084 e |= (upper >> 4) & 0x800; /* sign */
5085 ll = (mant >> 11) & ((1LL << 52) - 1);
5086#ifdef __arm__
5087 temp.l.upper = (e << 20) | (ll >> 32);
5088 temp.l.lower = ll;
5089#else
5090 temp.ll = ll | ((uint64_t)e << 52);
5091#endif
5092 return temp.d;
5093}
5094
5095#else
5096
5097void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5098{
5099 CPU86_LDoubleU temp;
5100
5101 temp.d = f;
5102 *pmant = temp.l.lower;
5103 *pexp = temp.l.upper;
5104}
5105
5106CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5107{
5108 CPU86_LDoubleU temp;
5109
5110 temp.l.upper = upper;
5111 temp.l.lower = mant;
5112 return temp.d;
5113}
5114#endif
5115
5116#ifdef TARGET_X86_64
5117
5118//#define DEBUG_MULDIV
5119
5120static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5121{
5122 *plow += a;
5123 /* carry test */
5124 if (*plow < a)
5125 (*phigh)++;
5126 *phigh += b;
5127}
5128
5129static void neg128(uint64_t *plow, uint64_t *phigh)
5130{
5131 *plow = ~ *plow;
5132 *phigh = ~ *phigh;
5133 add128(plow, phigh, 1, 0);
5134}
5135
5136/* return TRUE if overflow */
5137static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5138{
5139 uint64_t q, r, a1, a0;
5140 int i, qb, ab;
5141
5142 a0 = *plow;
5143 a1 = *phigh;
5144 if (a1 == 0) {
5145 q = a0 / b;
5146 r = a0 % b;
5147 *plow = q;
5148 *phigh = r;
5149 } else {
5150 if (a1 >= b)
5151 return 1;
5152 /* XXX: use a better algorithm */
5153 for(i = 0; i < 64; i++) {
5154 ab = a1 >> 63;
5155 a1 = (a1 << 1) | (a0 >> 63);
5156 if (ab || a1 >= b) {
5157 a1 -= b;
5158 qb = 1;
5159 } else {
5160 qb = 0;
5161 }
5162 a0 = (a0 << 1) | qb;
5163 }
5164#if defined(DEBUG_MULDIV)
5165 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5166 *phigh, *plow, b, a0, a1);
5167#endif
5168 *plow = a0;
5169 *phigh = a1;
5170 }
5171 return 0;
5172}
5173
5174/* return TRUE if overflow */
5175static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5176{
5177 int sa, sb;
5178 sa = ((int64_t)*phigh < 0);
5179 if (sa)
5180 neg128(plow, phigh);
5181 sb = (b < 0);
5182 if (sb)
5183 b = -b;
5184 if (div64(plow, phigh, b) != 0)
5185 return 1;
5186 if (sa ^ sb) {
5187 if (*plow > (1ULL << 63))
5188 return 1;
5189 *plow = - *plow;
5190 } else {
5191 if (*plow >= (1ULL << 63))
5192 return 1;
5193 }
5194 if (sa)
5195 *phigh = - *phigh;
5196 return 0;
5197}
5198
5199void helper_mulq_EAX_T0(target_ulong t0)
5200{
5201 uint64_t r0, r1;
5202
5203 mulu64(&r0, &r1, EAX, t0);
5204 EAX = r0;
5205 EDX = r1;
5206 CC_DST = r0;
5207 CC_SRC = r1;
5208}
5209
5210void helper_imulq_EAX_T0(target_ulong t0)
5211{
5212 uint64_t r0, r1;
5213
5214 muls64(&r0, &r1, EAX, t0);
5215 EAX = r0;
5216 EDX = r1;
5217 CC_DST = r0;
5218 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5219}
5220
5221target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5222{
5223 uint64_t r0, r1;
5224
5225 muls64(&r0, &r1, t0, t1);
5226 CC_DST = r0;
5227 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5228 return r0;
5229}
5230
5231void helper_divq_EAX(target_ulong t0)
5232{
5233 uint64_t r0, r1;
5234 if (t0 == 0) {
5235 raise_exception(EXCP00_DIVZ);
5236 }
5237 r0 = EAX;
5238 r1 = EDX;
5239 if (div64(&r0, &r1, t0))
5240 raise_exception(EXCP00_DIVZ);
5241 EAX = r0;
5242 EDX = r1;
5243}
5244
5245void helper_idivq_EAX(target_ulong t0)
5246{
5247 uint64_t r0, r1;
5248 if (t0 == 0) {
5249 raise_exception(EXCP00_DIVZ);
5250 }
5251 r0 = EAX;
5252 r1 = EDX;
5253 if (idiv64(&r0, &r1, t0))
5254 raise_exception(EXCP00_DIVZ);
5255 EAX = r0;
5256 EDX = r1;
5257}
5258#endif
5259
5260static void do_hlt(void)
5261{
5262 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5263 env->halted = 1;
5264 env->exception_index = EXCP_HLT;
5265 cpu_loop_exit();
5266}
5267
5268void helper_hlt(int next_eip_addend)
5269{
5270 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5271 EIP += next_eip_addend;
5272
5273 do_hlt();
5274}
5275
5276void helper_monitor(target_ulong ptr)
5277{
5278#ifdef VBOX
5279 if ((uint32_t)ECX > 1)
5280 raise_exception(EXCP0D_GPF);
5281#else /* !VBOX */
5282 if ((uint32_t)ECX != 0)
5283 raise_exception(EXCP0D_GPF);
5284#endif /* !VBOX */
5285 /* XXX: store address ? */
5286 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5287}
5288
5289void helper_mwait(int next_eip_addend)
5290{
5291 if ((uint32_t)ECX != 0)
5292 raise_exception(EXCP0D_GPF);
5293#ifdef VBOX
5294 helper_hlt(next_eip_addend);
5295#else /* !VBOX */
5296 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5297 EIP += next_eip_addend;
5298
5299 /* XXX: not complete but not completely erroneous */
5300 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5301 /* more than one CPU: do not sleep because another CPU may
5302 wake this one */
5303 } else {
5304 do_hlt();
5305 }
5306#endif /* !VBOX */
5307}
5308
5309void helper_debug(void)
5310{
5311 env->exception_index = EXCP_DEBUG;
5312 cpu_loop_exit();
5313}
5314
5315void helper_raise_interrupt(int intno, int next_eip_addend)
5316{
5317 raise_interrupt(intno, 1, 0, next_eip_addend);
5318}
5319
5320void helper_raise_exception(int exception_index)
5321{
5322 raise_exception(exception_index);
5323}
5324
5325void helper_cli(void)
5326{
5327 env->eflags &= ~IF_MASK;
5328}
5329
5330void helper_sti(void)
5331{
5332 env->eflags |= IF_MASK;
5333}
5334
5335#ifdef VBOX
5336void helper_cli_vme(void)
5337{
5338 env->eflags &= ~VIF_MASK;
5339}
5340
5341void helper_sti_vme(void)
5342{
5343 /* First check, then change eflags according to the AMD manual */
5344 if (env->eflags & VIP_MASK) {
5345 raise_exception(EXCP0D_GPF);
5346 }
5347 env->eflags |= VIF_MASK;
5348}
5349#endif /* VBOX */
5350
5351#if 0
5352/* vm86plus instructions */
5353void helper_cli_vm(void)
5354{
5355 env->eflags &= ~VIF_MASK;
5356}
5357
5358void helper_sti_vm(void)
5359{
5360 env->eflags |= VIF_MASK;
5361 if (env->eflags & VIP_MASK) {
5362 raise_exception(EXCP0D_GPF);
5363 }
5364}
5365#endif
5366
5367void helper_set_inhibit_irq(void)
5368{
5369 env->hflags |= HF_INHIBIT_IRQ_MASK;
5370}
5371
5372void helper_reset_inhibit_irq(void)
5373{
5374 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5375}
5376
5377void helper_boundw(target_ulong a0, int v)
5378{
5379 int low, high;
5380 low = ldsw(a0);
5381 high = ldsw(a0 + 2);
5382 v = (int16_t)v;
5383 if (v < low || v > high) {
5384 raise_exception(EXCP05_BOUND);
5385 }
5386}
5387
5388void helper_boundl(target_ulong a0, int v)
5389{
5390 int low, high;
5391 low = ldl(a0);
5392 high = ldl(a0 + 4);
5393 if (v < low || v > high) {
5394 raise_exception(EXCP05_BOUND);
5395 }
5396}
5397
5398static float approx_rsqrt(float a)
5399{
5400 return 1.0 / sqrt(a);
5401}
5402
5403static float approx_rcp(float a)
5404{
5405 return 1.0 / a;
5406}
5407
5408#if !defined(CONFIG_USER_ONLY)
5409
5410#define MMUSUFFIX _mmu
5411
5412#define SHIFT 0
5413#include "softmmu_template.h"
5414
5415#define SHIFT 1
5416#include "softmmu_template.h"
5417
5418#define SHIFT 2
5419#include "softmmu_template.h"
5420
5421#define SHIFT 3
5422#include "softmmu_template.h"
5423
5424#endif
5425
5426#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5427/* This code assumes real physical address always fit into host CPU reg,
5428 which is wrong in general, but true for our current use cases. */
5429RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5430{
5431 return remR3PhysReadS8(addr);
5432}
5433RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5434{
5435 return remR3PhysReadU8(addr);
5436}
5437void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5438{
5439 remR3PhysWriteU8(addr, val);
5440}
5441RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5442{
5443 return remR3PhysReadS16(addr);
5444}
5445RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5446{
5447 return remR3PhysReadU16(addr);
5448}
5449void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5450{
5451 remR3PhysWriteU16(addr, val);
5452}
5453RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5454{
5455 return remR3PhysReadS32(addr);
5456}
5457RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5458{
5459 return remR3PhysReadU32(addr);
5460}
5461void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5462{
5463 remR3PhysWriteU32(addr, val);
5464}
5465uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5466{
5467 return remR3PhysReadU64(addr);
5468}
5469void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5470{
5471 remR3PhysWriteU64(addr, val);
5472}
5473#endif /* VBOX */
5474
5475#if !defined(CONFIG_USER_ONLY)
5476/* try to fill the TLB and return an exception if error. If retaddr is
5477 NULL, it means that the function was called in C code (i.e. not
5478 from generated code or from helper.c) */
5479/* XXX: fix it to restore all registers */
5480void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5481{
5482 TranslationBlock *tb;
5483 int ret;
5484 unsigned long pc;
5485 CPUX86State *saved_env;
5486
5487 /* XXX: hack to restore env in all cases, even if not called from
5488 generated code */
5489 saved_env = env;
5490 env = cpu_single_env;
5491
5492 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5493 if (ret) {
5494 if (retaddr) {
5495 /* now we have a real cpu fault */
5496 pc = (unsigned long)retaddr;
5497 tb = tb_find_pc(pc);
5498 if (tb) {
5499 /* the PC is inside the translated code. It means that we have
5500 a virtual CPU fault */
5501 cpu_restore_state(tb, env, pc, NULL);
5502 }
5503 }
5504 raise_exception_err(env->exception_index, env->error_code);
5505 }
5506 env = saved_env;
5507}
5508#endif
5509
5510#ifdef VBOX
5511
5512/**
5513 * Correctly computes the eflags.
5514 * @returns eflags.
5515 * @param env1 CPU environment.
5516 */
5517uint32_t raw_compute_eflags(CPUX86State *env1)
5518{
5519 CPUX86State *savedenv = env;
5520 uint32_t efl;
5521 env = env1;
5522 efl = compute_eflags();
5523 env = savedenv;
5524 return efl;
5525}
5526
5527/**
5528 * Reads byte from virtual address in guest memory area.
5529 * XXX: is it working for any addresses? swapped out pages?
5530 * @returns read data byte.
5531 * @param env1 CPU environment.
5532 * @param pvAddr GC Virtual address.
5533 */
5534uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5535{
5536 CPUX86State *savedenv = env;
5537 uint8_t u8;
5538 env = env1;
5539 u8 = ldub_kernel(addr);
5540 env = savedenv;
5541 return u8;
5542}
5543
5544/**
5545 * Reads byte from virtual address in guest memory area.
5546 * XXX: is it working for any addresses? swapped out pages?
5547 * @returns read data byte.
5548 * @param env1 CPU environment.
5549 * @param pvAddr GC Virtual address.
5550 */
5551uint16_t read_word(CPUX86State *env1, target_ulong addr)
5552{
5553 CPUX86State *savedenv = env;
5554 uint16_t u16;
5555 env = env1;
5556 u16 = lduw_kernel(addr);
5557 env = savedenv;
5558 return u16;
5559}
5560
5561/**
5562 * Reads byte from virtual address in guest memory area.
5563 * XXX: is it working for any addresses? swapped out pages?
5564 * @returns read data byte.
5565 * @param env1 CPU environment.
5566 * @param pvAddr GC Virtual address.
5567 */
5568uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5569{
5570 CPUX86State *savedenv = env;
5571 uint32_t u32;
5572 env = env1;
5573 u32 = ldl_kernel(addr);
5574 env = savedenv;
5575 return u32;
5576}
5577
5578/**
5579 * Writes byte to virtual address in guest memory area.
5580 * XXX: is it working for any addresses? swapped out pages?
5581 * @returns read data byte.
5582 * @param env1 CPU environment.
5583 * @param pvAddr GC Virtual address.
5584 * @param val byte value
5585 */
5586void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5587{
5588 CPUX86State *savedenv = env;
5589 env = env1;
5590 stb(addr, val);
5591 env = savedenv;
5592}
5593
5594void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5595{
5596 CPUX86State *savedenv = env;
5597 env = env1;
5598 stw(addr, val);
5599 env = savedenv;
5600}
5601
5602void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5603{
5604 CPUX86State *savedenv = env;
5605 env = env1;
5606 stl(addr, val);
5607 env = savedenv;
5608}
5609
5610/**
5611 * Correctly loads selector into segment register with updating internal
5612 * qemu data/caches.
5613 * @param env1 CPU environment.
5614 * @param seg_reg Segment register.
5615 * @param selector Selector to load.
5616 */
5617void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5618{
5619 CPUX86State *savedenv = env;
5620#ifdef FORCE_SEGMENT_SYNC
5621 jmp_buf old_buf;
5622#endif
5623
5624 env = env1;
5625
5626 if ( env->eflags & X86_EFL_VM
5627 || !(env->cr[0] & X86_CR0_PE))
5628 {
5629 load_seg_vm(seg_reg, selector);
5630
5631 env = savedenv;
5632
5633 /* Successful sync. */
5634 env1->segs[seg_reg].newselector = 0;
5635 }
5636 else
5637 {
5638 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5639 time critical - let's not do that */
5640#ifdef FORCE_SEGMENT_SYNC
5641 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5642#endif
5643 if (setjmp(env1->jmp_env) == 0)
5644 {
5645 if (seg_reg == R_CS)
5646 {
5647 uint32_t e1, e2;
5648 e1 = e2 = 0;
5649 load_segment(&e1, &e2, selector);
5650 cpu_x86_load_seg_cache(env, R_CS, selector,
5651 get_seg_base(e1, e2),
5652 get_seg_limit(e1, e2),
5653 e2);
5654 }
5655 else
5656 helper_load_seg(seg_reg, selector);
5657 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5658 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5659
5660 env = savedenv;
5661
5662 /* Successful sync. */
5663 env1->segs[seg_reg].newselector = 0;
5664 }
5665 else
5666 {
5667 env = savedenv;
5668
5669 /* Postpone sync until the guest uses the selector. */
5670 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5671 env1->segs[seg_reg].newselector = selector;
5672 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5673 env1->exception_index = -1;
5674 env1->error_code = 0;
5675 env1->old_exception = -1;
5676 }
5677#ifdef FORCE_SEGMENT_SYNC
5678 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5679#endif
5680 }
5681
5682}
5683
5684DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5685{
5686 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5687}
5688
5689
5690int emulate_single_instr(CPUX86State *env1)
5691{
5692 TranslationBlock *tb;
5693 TranslationBlock *current;
5694 int flags;
5695 uint8_t *tc_ptr;
5696 target_ulong old_eip;
5697
5698 /* ensures env is loaded! */
5699 CPUX86State *savedenv = env;
5700 env = env1;
5701
5702 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5703
5704 current = env->current_tb;
5705 env->current_tb = NULL;
5706 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5707
5708 /*
5709 * Translate only one instruction.
5710 */
5711 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5712 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5713 env->segs[R_CS].base, flags, 0);
5714
5715 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5716
5717
5718 /* tb_link_phys: */
5719 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5720 tb->jmp_next[0] = NULL;
5721 tb->jmp_next[1] = NULL;
5722 Assert(tb->jmp_next[0] == NULL);
5723 Assert(tb->jmp_next[1] == NULL);
5724 if (tb->tb_next_offset[0] != 0xffff)
5725 tb_reset_jump(tb, 0);
5726 if (tb->tb_next_offset[1] != 0xffff)
5727 tb_reset_jump(tb, 1);
5728
5729 /*
5730 * Execute it using emulation
5731 */
5732 old_eip = env->eip;
5733 env->current_tb = tb;
5734
5735 /*
5736 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5737 * perhaps not a very safe hack
5738 */
5739 while(old_eip == env->eip)
5740 {
5741 tc_ptr = tb->tc_ptr;
5742
5743#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5744 int fake_ret;
5745 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5746#else
5747 tcg_qemu_tb_exec(tc_ptr);
5748#endif
5749 /*
5750 * Exit once we detect an external interrupt and interrupts are enabled
5751 */
5752 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5753 ( (env->eflags & IF_MASK) &&
5754 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5755 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5756 {
5757 break;
5758 }
5759 }
5760 env->current_tb = current;
5761
5762 tb_phys_invalidate(tb, -1);
5763 tb_free(tb);
5764/*
5765 Assert(tb->tb_next_offset[0] == 0xffff);
5766 Assert(tb->tb_next_offset[1] == 0xffff);
5767 Assert(tb->tb_next[0] == 0xffff);
5768 Assert(tb->tb_next[1] == 0xffff);
5769 Assert(tb->jmp_next[0] == NULL);
5770 Assert(tb->jmp_next[1] == NULL);
5771 Assert(tb->jmp_first == NULL); */
5772
5773 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5774
5775 /*
5776 * Execute the next instruction when we encounter instruction fusing.
5777 */
5778 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5779 {
5780 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5781 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5782 emulate_single_instr(env);
5783 }
5784
5785 env = savedenv;
5786 return 0;
5787}
5788
5789/**
5790 * Correctly loads a new ldtr selector.
5791 *
5792 * @param env1 CPU environment.
5793 * @param selector Selector to load.
5794 */
5795void sync_ldtr(CPUX86State *env1, int selector)
5796{
5797 CPUX86State *saved_env = env;
5798 if (setjmp(env1->jmp_env) == 0)
5799 {
5800 env = env1;
5801 helper_lldt(selector);
5802 env = saved_env;
5803 }
5804 else
5805 {
5806 env = saved_env;
5807#ifdef VBOX_STRICT
5808 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5809#endif
5810 }
5811}
5812
5813int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5814 uint32_t *esp_ptr, int dpl)
5815{
5816 int type, index, shift;
5817
5818 CPUX86State *savedenv = env;
5819 env = env1;
5820
5821 if (!(env->tr.flags & DESC_P_MASK))
5822 cpu_abort(env, "invalid tss");
5823 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5824 if ((type & 7) != 1)
5825 cpu_abort(env, "invalid tss type %d", type);
5826 shift = type >> 3;
5827 index = (dpl * 4 + 2) << shift;
5828 if (index + (4 << shift) - 1 > env->tr.limit)
5829 {
5830 env = savedenv;
5831 return 0;
5832 }
5833 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5834
5835 if (shift == 0) {
5836 *esp_ptr = lduw_kernel(env->tr.base + index);
5837 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5838 } else {
5839 *esp_ptr = ldl_kernel(env->tr.base + index);
5840 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5841 }
5842
5843 env = savedenv;
5844 return 1;
5845}
5846
5847//*****************************************************************************
5848// Needs to be at the bottom of the file (overriding macros)
5849
5850static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5851{
5852 return *(CPU86_LDouble *)ptr;
5853}
5854
5855static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5856{
5857 *(CPU86_LDouble *)ptr = f;
5858}
5859
5860#undef stw
5861#undef stl
5862#undef stq
5863#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5864#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5865#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5866
5867//*****************************************************************************
5868void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5869{
5870 int fpus, fptag, i, nb_xmm_regs;
5871 CPU86_LDouble tmp;
5872 uint8_t *addr;
5873 int data64 = !!(env->hflags & HF_LMA_MASK);
5874
5875 if (env->cpuid_features & CPUID_FXSR)
5876 {
5877 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5878 fptag = 0;
5879 for(i = 0; i < 8; i++) {
5880 fptag |= (env->fptags[i] << i);
5881 }
5882 stw(ptr, env->fpuc);
5883 stw(ptr + 2, fpus);
5884 stw(ptr + 4, fptag ^ 0xff);
5885
5886 addr = ptr + 0x20;
5887 for(i = 0;i < 8; i++) {
5888 tmp = ST(i);
5889 helper_fstt_raw(tmp, addr);
5890 addr += 16;
5891 }
5892
5893 if (env->cr[4] & CR4_OSFXSR_MASK) {
5894 /* XXX: finish it */
5895 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5896 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5897 nb_xmm_regs = 8 << data64;
5898 addr = ptr + 0xa0;
5899 for(i = 0; i < nb_xmm_regs; i++) {
5900#if __GNUC__ < 4
5901 stq(addr, env->xmm_regs[i].XMM_Q(0));
5902 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5903#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5904 stl(addr, env->xmm_regs[i].XMM_L(0));
5905 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5906 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5907 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5908#endif
5909 addr += 16;
5910 }
5911 }
5912 }
5913 else
5914 {
5915 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5916 int fptag;
5917
5918 fp->FCW = env->fpuc;
5919 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5920 fptag = 0;
5921 for (i=7; i>=0; i--) {
5922 fptag <<= 2;
5923 if (env->fptags[i]) {
5924 fptag |= 3;
5925 } else {
5926 /* the FPU automatically computes it */
5927 }
5928 }
5929 fp->FTW = fptag;
5930
5931 for(i = 0;i < 8; i++) {
5932 tmp = ST(i);
5933 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
5934 }
5935 }
5936}
5937
5938//*****************************************************************************
5939#undef lduw
5940#undef ldl
5941#undef ldq
5942#define lduw(a) *(uint16_t *)(a)
5943#define ldl(a) *(uint32_t *)(a)
5944#define ldq(a) *(uint64_t *)(a)
5945//*****************************************************************************
5946void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5947{
5948 int i, fpus, fptag, nb_xmm_regs;
5949 CPU86_LDouble tmp;
5950 uint8_t *addr;
5951 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
5952
5953 if (env->cpuid_features & CPUID_FXSR)
5954 {
5955 env->fpuc = lduw(ptr);
5956 fpus = lduw(ptr + 2);
5957 fptag = lduw(ptr + 4);
5958 env->fpstt = (fpus >> 11) & 7;
5959 env->fpus = fpus & ~0x3800;
5960 fptag ^= 0xff;
5961 for(i = 0;i < 8; i++) {
5962 env->fptags[i] = ((fptag >> i) & 1);
5963 }
5964
5965 addr = ptr + 0x20;
5966 for(i = 0;i < 8; i++) {
5967 tmp = helper_fldt_raw(addr);
5968 ST(i) = tmp;
5969 addr += 16;
5970 }
5971
5972 if (env->cr[4] & CR4_OSFXSR_MASK) {
5973 /* XXX: finish it, endianness */
5974 env->mxcsr = ldl(ptr + 0x18);
5975 //ldl(ptr + 0x1c);
5976 nb_xmm_regs = 8 << data64;
5977 addr = ptr + 0xa0;
5978 for(i = 0; i < nb_xmm_regs; i++) {
5979#if HC_ARCH_BITS == 32
5980 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
5981 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5982 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5983 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5984 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5985#else
5986 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5987 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5988#endif
5989 addr += 16;
5990 }
5991 }
5992 }
5993 else
5994 {
5995 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5996 int fptag, j;
5997
5998 env->fpuc = fp->FCW;
5999 env->fpstt = (fp->FSW >> 11) & 7;
6000 env->fpus = fp->FSW & ~0x3800;
6001 fptag = fp->FTW;
6002 for(i = 0;i < 8; i++) {
6003 env->fptags[i] = ((fptag & 3) == 3);
6004 fptag >>= 2;
6005 }
6006 j = env->fpstt;
6007 for(i = 0;i < 8; i++) {
6008 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6009 ST(i) = tmp;
6010 }
6011 }
6012}
6013//*****************************************************************************
6014//*****************************************************************************
6015
6016#endif /* VBOX */
6017
6018/* Secure Virtual Machine helpers */
6019
6020#if defined(CONFIG_USER_ONLY)
6021
6022void helper_vmrun(int aflag, int next_eip_addend)
6023{
6024}
6025void helper_vmmcall(void)
6026{
6027}
6028void helper_vmload(int aflag)
6029{
6030}
6031void helper_vmsave(int aflag)
6032{
6033}
6034void helper_stgi(void)
6035{
6036}
6037void helper_clgi(void)
6038{
6039}
6040void helper_skinit(void)
6041{
6042}
6043void helper_invlpga(int aflag)
6044{
6045}
6046void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6047{
6048}
6049void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6050{
6051}
6052
6053void helper_svm_check_io(uint32_t port, uint32_t param,
6054 uint32_t next_eip_addend)
6055{
6056}
6057#else
6058
6059static inline void svm_save_seg(target_phys_addr_t addr,
6060 const SegmentCache *sc)
6061{
6062 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6063 sc->selector);
6064 stq_phys(addr + offsetof(struct vmcb_seg, base),
6065 sc->base);
6066 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6067 sc->limit);
6068 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6069 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6070}
6071
6072static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6073{
6074 unsigned int flags;
6075
6076 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6077 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6078 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6079 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6080 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6081}
6082
6083static inline void svm_load_seg_cache(target_phys_addr_t addr,
6084 CPUState *env, int seg_reg)
6085{
6086 SegmentCache sc1, *sc = &sc1;
6087 svm_load_seg(addr, sc);
6088 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6089 sc->base, sc->limit, sc->flags);
6090}
6091
6092void helper_vmrun(int aflag, int next_eip_addend)
6093{
6094 target_ulong addr;
6095 uint32_t event_inj;
6096 uint32_t int_ctl;
6097
6098 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6099
6100 if (aflag == 2)
6101 addr = EAX;
6102 else
6103 addr = (uint32_t)EAX;
6104
6105 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6106
6107 env->vm_vmcb = addr;
6108
6109 /* save the current CPU state in the hsave page */
6110 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6111 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6112
6113 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6114 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6115
6116 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6117 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6118 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6119 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6120 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6121 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6122
6123 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6124 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6125
6126 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6127 &env->segs[R_ES]);
6128 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6129 &env->segs[R_CS]);
6130 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6131 &env->segs[R_SS]);
6132 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6133 &env->segs[R_DS]);
6134
6135 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6136 EIP + next_eip_addend);
6137 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6138 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6139
6140 /* load the interception bitmaps so we do not need to access the
6141 vmcb in svm mode */
6142 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6143 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6144 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6145 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6146 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6147 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6148
6149 /* enable intercepts */
6150 env->hflags |= HF_SVMI_MASK;
6151
6152 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6153
6154 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6155 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6156
6157 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6158 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6159
6160 /* clear exit_info_2 so we behave like the real hardware */
6161 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6162
6163 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6164 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6165 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6166 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6167 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6168 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6169 if (int_ctl & V_INTR_MASKING_MASK) {
6170 env->v_tpr = int_ctl & V_TPR_MASK;
6171 env->hflags2 |= HF2_VINTR_MASK;
6172 if (env->eflags & IF_MASK)
6173 env->hflags2 |= HF2_HIF_MASK;
6174 }
6175
6176 cpu_load_efer(env,
6177 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6178 env->eflags = 0;
6179 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6180 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6181 CC_OP = CC_OP_EFLAGS;
6182
6183 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6184 env, R_ES);
6185 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6186 env, R_CS);
6187 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6188 env, R_SS);
6189 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6190 env, R_DS);
6191
6192 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6193 env->eip = EIP;
6194 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6195 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6196 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6197 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6198 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6199
6200 /* FIXME: guest state consistency checks */
6201
6202 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6203 case TLB_CONTROL_DO_NOTHING:
6204 break;
6205 case TLB_CONTROL_FLUSH_ALL_ASID:
6206 /* FIXME: this is not 100% correct but should work for now */
6207 tlb_flush(env, 1);
6208 break;
6209 }
6210
6211 env->hflags2 |= HF2_GIF_MASK;
6212
6213 if (int_ctl & V_IRQ_MASK) {
6214 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6215 }
6216
6217 /* maybe we need to inject an event */
6218 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6219 if (event_inj & SVM_EVTINJ_VALID) {
6220 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6221 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6222 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6223 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6224
6225 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6226 /* FIXME: need to implement valid_err */
6227 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6228 case SVM_EVTINJ_TYPE_INTR:
6229 env->exception_index = vector;
6230 env->error_code = event_inj_err;
6231 env->exception_is_int = 0;
6232 env->exception_next_eip = -1;
6233 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6234 /* XXX: is it always correct ? */
6235 do_interrupt(vector, 0, 0, 0, 1);
6236 break;
6237 case SVM_EVTINJ_TYPE_NMI:
6238 env->exception_index = EXCP02_NMI;
6239 env->error_code = event_inj_err;
6240 env->exception_is_int = 0;
6241 env->exception_next_eip = EIP;
6242 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6243 cpu_loop_exit();
6244 break;
6245 case SVM_EVTINJ_TYPE_EXEPT:
6246 env->exception_index = vector;
6247 env->error_code = event_inj_err;
6248 env->exception_is_int = 0;
6249 env->exception_next_eip = -1;
6250 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6251 cpu_loop_exit();
6252 break;
6253 case SVM_EVTINJ_TYPE_SOFT:
6254 env->exception_index = vector;
6255 env->error_code = event_inj_err;
6256 env->exception_is_int = 1;
6257 env->exception_next_eip = EIP;
6258 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6259 cpu_loop_exit();
6260 break;
6261 }
6262 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6263 }
6264}
6265
6266void helper_vmmcall(void)
6267{
6268 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6269 raise_exception(EXCP06_ILLOP);
6270}
6271
6272void helper_vmload(int aflag)
6273{
6274 target_ulong addr;
6275 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6276
6277 if (aflag == 2)
6278 addr = EAX;
6279 else
6280 addr = (uint32_t)EAX;
6281
6282 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6283 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6284 env->segs[R_FS].base);
6285
6286 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6287 env, R_FS);
6288 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6289 env, R_GS);
6290 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6291 &env->tr);
6292 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6293 &env->ldt);
6294
6295#ifdef TARGET_X86_64
6296 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6297 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6298 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6299 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6300#endif
6301 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6302 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6303 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6304 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6305}
6306
6307void helper_vmsave(int aflag)
6308{
6309 target_ulong addr;
6310 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6311
6312 if (aflag == 2)
6313 addr = EAX;
6314 else
6315 addr = (uint32_t)EAX;
6316
6317 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6318 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6319 env->segs[R_FS].base);
6320
6321 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6322 &env->segs[R_FS]);
6323 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6324 &env->segs[R_GS]);
6325 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6326 &env->tr);
6327 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6328 &env->ldt);
6329
6330#ifdef TARGET_X86_64
6331 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6332 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6333 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6334 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6335#endif
6336 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6337 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6338 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6339 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6340}
6341
6342void helper_stgi(void)
6343{
6344 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6345 env->hflags2 |= HF2_GIF_MASK;
6346}
6347
6348void helper_clgi(void)
6349{
6350 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6351 env->hflags2 &= ~HF2_GIF_MASK;
6352}
6353
6354void helper_skinit(void)
6355{
6356 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6357 /* XXX: not implemented */
6358 raise_exception(EXCP06_ILLOP);
6359}
6360
6361void helper_invlpga(int aflag)
6362{
6363 target_ulong addr;
6364 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6365
6366 if (aflag == 2)
6367 addr = EAX;
6368 else
6369 addr = (uint32_t)EAX;
6370
6371 /* XXX: could use the ASID to see if it is needed to do the
6372 flush */
6373 tlb_flush_page(env, addr);
6374}
6375
6376void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6377{
6378 if (likely(!(env->hflags & HF_SVMI_MASK)))
6379 return;
6380#ifndef VBOX
6381 switch(type) {
6382 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6383 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6384 helper_vmexit(type, param);
6385 }
6386 break;
6387 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6388 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6389 helper_vmexit(type, param);
6390 }
6391 break;
6392 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6393 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6394 helper_vmexit(type, param);
6395 }
6396 break;
6397 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6398 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6399 helper_vmexit(type, param);
6400 }
6401 break;
6402 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6403 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6404 helper_vmexit(type, param);
6405 }
6406 break;
6407 case SVM_EXIT_MSR:
6408 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6409 /* FIXME: this should be read in at vmrun (faster this way?) */
6410 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6411 uint32_t t0, t1;
6412 switch((uint32_t)ECX) {
6413 case 0 ... 0x1fff:
6414 t0 = (ECX * 2) % 8;
6415 t1 = ECX / 8;
6416 break;
6417 case 0xc0000000 ... 0xc0001fff:
6418 t0 = (8192 + ECX - 0xc0000000) * 2;
6419 t1 = (t0 / 8);
6420 t0 %= 8;
6421 break;
6422 case 0xc0010000 ... 0xc0011fff:
6423 t0 = (16384 + ECX - 0xc0010000) * 2;
6424 t1 = (t0 / 8);
6425 t0 %= 8;
6426 break;
6427 default:
6428 helper_vmexit(type, param);
6429 t0 = 0;
6430 t1 = 0;
6431 break;
6432 }
6433 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6434 helper_vmexit(type, param);
6435 }
6436 break;
6437 default:
6438 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6439 helper_vmexit(type, param);
6440 }
6441 break;
6442 }
6443#else /* VBOX */
6444 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6445#endif /* VBOX */
6446}
6447
6448void helper_svm_check_io(uint32_t port, uint32_t param,
6449 uint32_t next_eip_addend)
6450{
6451 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6452 /* FIXME: this should be read in at vmrun (faster this way?) */
6453 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6454 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6455 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6456 /* next EIP */
6457 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6458 env->eip + next_eip_addend);
6459 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6460 }
6461 }
6462}
6463
6464/* Note: currently only 32 bits of exit_code are used */
6465void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6466{
6467 uint32_t int_ctl;
6468
6469 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6470 exit_code, exit_info_1,
6471 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6472 EIP);
6473
6474 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6475 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6476 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6477 } else {
6478 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6479 }
6480
6481 /* Save the VM state in the vmcb */
6482 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6483 &env->segs[R_ES]);
6484 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6485 &env->segs[R_CS]);
6486 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6487 &env->segs[R_SS]);
6488 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6489 &env->segs[R_DS]);
6490
6491 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6492 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6493
6494 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6495 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6496
6497 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6498 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6499 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6500 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6501 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6502
6503 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6504 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6505 int_ctl |= env->v_tpr & V_TPR_MASK;
6506 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6507 int_ctl |= V_IRQ_MASK;
6508 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6509
6510 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6511 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6512 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6513 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6514 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6515 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6516 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6517
6518 /* Reload the host state from vm_hsave */
6519 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6520 env->hflags &= ~HF_SVMI_MASK;
6521 env->intercept = 0;
6522 env->intercept_exceptions = 0;
6523 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6524 env->tsc_offset = 0;
6525
6526 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6527 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6528
6529 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6530 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6531
6532 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6533 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6534 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6535 /* we need to set the efer after the crs so the hidden flags get
6536 set properly */
6537 cpu_load_efer(env,
6538 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6539 env->eflags = 0;
6540 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6541 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6542 CC_OP = CC_OP_EFLAGS;
6543
6544 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6545 env, R_ES);
6546 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6547 env, R_CS);
6548 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6549 env, R_SS);
6550 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6551 env, R_DS);
6552
6553 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6554 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6555 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6556
6557 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6558 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6559
6560 /* other setups */
6561 cpu_x86_set_cpl(env, 0);
6562 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6563 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6564
6565 env->hflags2 &= ~HF2_GIF_MASK;
6566 /* FIXME: Resets the current ASID register to zero (host ASID). */
6567
6568 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6569
6570 /* Clears the TSC_OFFSET inside the processor. */
6571
6572 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6573 from the page table indicated the host's CR3. If the PDPEs contain
6574 illegal state, the processor causes a shutdown. */
6575
6576 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6577 env->cr[0] |= CR0_PE_MASK;
6578 env->eflags &= ~VM_MASK;
6579
6580 /* Disables all breakpoints in the host DR7 register. */
6581
6582 /* Checks the reloaded host state for consistency. */
6583
6584 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6585 host's code segment or non-canonical (in the case of long mode), a
6586 #GP fault is delivered inside the host.) */
6587
6588 /* remove any pending exception */
6589 env->exception_index = -1;
6590 env->error_code = 0;
6591 env->old_exception = -1;
6592
6593 cpu_loop_exit();
6594}
6595
6596#endif
6597
6598/* MMX/SSE */
6599/* XXX: optimize by storing fptt and fptags in the static cpu state */
6600void helper_enter_mmx(void)
6601{
6602 env->fpstt = 0;
6603 *(uint32_t *)(env->fptags) = 0;
6604 *(uint32_t *)(env->fptags + 4) = 0;
6605}
6606
6607void helper_emms(void)
6608{
6609 /* set to empty state */
6610 *(uint32_t *)(env->fptags) = 0x01010101;
6611 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6612}
6613
6614/* XXX: suppress */
6615void helper_movq(void *d, void *s)
6616{
6617 *(uint64_t *)d = *(uint64_t *)s;
6618}
6619
6620#define SHIFT 0
6621#include "ops_sse.h"
6622
6623#define SHIFT 1
6624#include "ops_sse.h"
6625
6626#define SHIFT 0
6627#include "helper_template.h"
6628#undef SHIFT
6629
6630#define SHIFT 1
6631#include "helper_template.h"
6632#undef SHIFT
6633
6634#define SHIFT 2
6635#include "helper_template.h"
6636#undef SHIFT
6637
6638#ifdef TARGET_X86_64
6639
6640#define SHIFT 3
6641#include "helper_template.h"
6642#undef SHIFT
6643
6644#endif
6645
6646/* bit operations */
6647target_ulong helper_bsf(target_ulong t0)
6648{
6649 int count;
6650 target_ulong res;
6651
6652 res = t0;
6653 count = 0;
6654 while ((res & 1) == 0) {
6655 count++;
6656 res >>= 1;
6657 }
6658 return count;
6659}
6660
6661target_ulong helper_bsr(target_ulong t0)
6662{
6663 int count;
6664 target_ulong res, mask;
6665
6666 res = t0;
6667 count = TARGET_LONG_BITS - 1;
6668 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6669 while ((res & mask) == 0) {
6670 count--;
6671 res <<= 1;
6672 }
6673 return count;
6674}
6675
6676
6677static int compute_all_eflags(void)
6678{
6679 return CC_SRC;
6680}
6681
6682static int compute_c_eflags(void)
6683{
6684 return CC_SRC & CC_C;
6685}
6686
6687uint32_t helper_cc_compute_all(int op)
6688{
6689 switch (op) {
6690 default: /* should never happen */ return 0;
6691
6692 case CC_OP_EFLAGS: return compute_all_eflags();
6693
6694 case CC_OP_MULB: return compute_all_mulb();
6695 case CC_OP_MULW: return compute_all_mulw();
6696 case CC_OP_MULL: return compute_all_mull();
6697
6698 case CC_OP_ADDB: return compute_all_addb();
6699 case CC_OP_ADDW: return compute_all_addw();
6700 case CC_OP_ADDL: return compute_all_addl();
6701
6702 case CC_OP_ADCB: return compute_all_adcb();
6703 case CC_OP_ADCW: return compute_all_adcw();
6704 case CC_OP_ADCL: return compute_all_adcl();
6705
6706 case CC_OP_SUBB: return compute_all_subb();
6707 case CC_OP_SUBW: return compute_all_subw();
6708 case CC_OP_SUBL: return compute_all_subl();
6709
6710 case CC_OP_SBBB: return compute_all_sbbb();
6711 case CC_OP_SBBW: return compute_all_sbbw();
6712 case CC_OP_SBBL: return compute_all_sbbl();
6713
6714 case CC_OP_LOGICB: return compute_all_logicb();
6715 case CC_OP_LOGICW: return compute_all_logicw();
6716 case CC_OP_LOGICL: return compute_all_logicl();
6717
6718 case CC_OP_INCB: return compute_all_incb();
6719 case CC_OP_INCW: return compute_all_incw();
6720 case CC_OP_INCL: return compute_all_incl();
6721
6722 case CC_OP_DECB: return compute_all_decb();
6723 case CC_OP_DECW: return compute_all_decw();
6724 case CC_OP_DECL: return compute_all_decl();
6725
6726 case CC_OP_SHLB: return compute_all_shlb();
6727 case CC_OP_SHLW: return compute_all_shlw();
6728 case CC_OP_SHLL: return compute_all_shll();
6729
6730 case CC_OP_SARB: return compute_all_sarb();
6731 case CC_OP_SARW: return compute_all_sarw();
6732 case CC_OP_SARL: return compute_all_sarl();
6733
6734#ifdef TARGET_X86_64
6735 case CC_OP_MULQ: return compute_all_mulq();
6736
6737 case CC_OP_ADDQ: return compute_all_addq();
6738
6739 case CC_OP_ADCQ: return compute_all_adcq();
6740
6741 case CC_OP_SUBQ: return compute_all_subq();
6742
6743 case CC_OP_SBBQ: return compute_all_sbbq();
6744
6745 case CC_OP_LOGICQ: return compute_all_logicq();
6746
6747 case CC_OP_INCQ: return compute_all_incq();
6748
6749 case CC_OP_DECQ: return compute_all_decq();
6750
6751 case CC_OP_SHLQ: return compute_all_shlq();
6752
6753 case CC_OP_SARQ: return compute_all_sarq();
6754#endif
6755 }
6756}
6757
6758uint32_t helper_cc_compute_c(int op)
6759{
6760 switch (op) {
6761 default: /* should never happen */ return 0;
6762
6763 case CC_OP_EFLAGS: return compute_c_eflags();
6764
6765 case CC_OP_MULB: return compute_c_mull();
6766 case CC_OP_MULW: return compute_c_mull();
6767 case CC_OP_MULL: return compute_c_mull();
6768
6769 case CC_OP_ADDB: return compute_c_addb();
6770 case CC_OP_ADDW: return compute_c_addw();
6771 case CC_OP_ADDL: return compute_c_addl();
6772
6773 case CC_OP_ADCB: return compute_c_adcb();
6774 case CC_OP_ADCW: return compute_c_adcw();
6775 case CC_OP_ADCL: return compute_c_adcl();
6776
6777 case CC_OP_SUBB: return compute_c_subb();
6778 case CC_OP_SUBW: return compute_c_subw();
6779 case CC_OP_SUBL: return compute_c_subl();
6780
6781 case CC_OP_SBBB: return compute_c_sbbb();
6782 case CC_OP_SBBW: return compute_c_sbbw();
6783 case CC_OP_SBBL: return compute_c_sbbl();
6784
6785 case CC_OP_LOGICB: return compute_c_logicb();
6786 case CC_OP_LOGICW: return compute_c_logicw();
6787 case CC_OP_LOGICL: return compute_c_logicl();
6788
6789 case CC_OP_INCB: return compute_c_incl();
6790 case CC_OP_INCW: return compute_c_incl();
6791 case CC_OP_INCL: return compute_c_incl();
6792
6793 case CC_OP_DECB: return compute_c_incl();
6794 case CC_OP_DECW: return compute_c_incl();
6795 case CC_OP_DECL: return compute_c_incl();
6796
6797 case CC_OP_SHLB: return compute_c_shlb();
6798 case CC_OP_SHLW: return compute_c_shlw();
6799 case CC_OP_SHLL: return compute_c_shll();
6800
6801 case CC_OP_SARB: return compute_c_sarl();
6802 case CC_OP_SARW: return compute_c_sarl();
6803 case CC_OP_SARL: return compute_c_sarl();
6804
6805#ifdef TARGET_X86_64
6806 case CC_OP_MULQ: return compute_c_mull();
6807
6808 case CC_OP_ADDQ: return compute_c_addq();
6809
6810 case CC_OP_ADCQ: return compute_c_adcq();
6811
6812 case CC_OP_SUBQ: return compute_c_subq();
6813
6814 case CC_OP_SBBQ: return compute_c_sbbq();
6815
6816 case CC_OP_LOGICQ: return compute_c_logicq();
6817
6818 case CC_OP_INCQ: return compute_c_incl();
6819
6820 case CC_OP_DECQ: return compute_c_incl();
6821
6822 case CC_OP_SHLQ: return compute_c_shlq();
6823
6824 case CC_OP_SARQ: return compute_c_sarl();
6825#endif
6826 }
6827}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette