VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 37675

最後變更 在這個檔案從37675是 37675,由 vboxsync 提交於 14 年 前

rem: Synced with v0.12.5.

  • 屬性 svn:eol-style 設為 native
檔案大小: 193.3 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "exec-all.h"
32#include "host-utils.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39//#define DEBUG_PCALL
40
41
42#ifdef DEBUG_PCALL
43# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
44# define LOG_PCALL_STATE(env) \
45 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
46#else
47# define LOG_PCALL(...) do { } while (0)
48# define LOG_PCALL_STATE(env) do { } while (0)
49#endif
50
51
52#if 0
53#define raise_exception_err(a, b)\
54do {\
55 qemu_log("raise_exception line=%d\n", __LINE__);\
56 (raise_exception_err)(a, b);\
57} while (0)
58#endif
59
60static const uint8_t parity_table[256] = {
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
89 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
90 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
93};
94
95/* modulo 17 table */
96static const uint8_t rclw_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 9,10,11,12,13,14,15,
99 16, 0, 1, 2, 3, 4, 5, 6,
100 7, 8, 9,10,11,12,13,14,
101};
102
103/* modulo 9 table */
104static const uint8_t rclb_table[32] = {
105 0, 1, 2, 3, 4, 5, 6, 7,
106 8, 0, 1, 2, 3, 4, 5, 6,
107 7, 8, 0, 1, 2, 3, 4, 5,
108 6, 7, 8, 0, 1, 2, 3, 4,
109};
110
111static const CPU86_LDouble f15rk[7] =
112{
113 0.00000000000000000000L,
114 1.00000000000000000000L,
115 3.14159265358979323851L, /*pi*/
116 0.30102999566398119523L, /*lg2*/
117 0.69314718055994530943L, /*ln2*/
118 1.44269504088896340739L, /*l2e*/
119 3.32192809488736234781L, /*l2t*/
120};
121
122/* broken thread support */
123
124static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
125
126void helper_lock(void)
127{
128 spin_lock(&global_cpu_lock);
129}
130
131void helper_unlock(void)
132{
133 spin_unlock(&global_cpu_lock);
134}
135
136void helper_write_eflags(target_ulong t0, uint32_t update_mask)
137{
138 load_eflags(t0, update_mask);
139}
140
141target_ulong helper_read_eflags(void)
142{
143 uint32_t eflags;
144 eflags = helper_cc_compute_all(CC_OP);
145 eflags |= (DF & DF_MASK);
146 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
147 return eflags;
148}
149
150#ifdef VBOX
151
152void helper_write_eflags_vme(target_ulong t0)
153{
154 unsigned int new_eflags = t0;
155
156 assert(env->eflags & (1<<VM_SHIFT));
157
158 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
159 /* if TF will be set -> #GP */
160 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
161 || (new_eflags & TF_MASK)) {
162 raise_exception(EXCP0D_GPF);
163 } else {
164 load_eflags(new_eflags,
165 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
166
167 if (new_eflags & IF_MASK) {
168 env->eflags |= VIF_MASK;
169 } else {
170 env->eflags &= ~VIF_MASK;
171 }
172 }
173}
174
175target_ulong helper_read_eflags_vme(void)
176{
177 uint32_t eflags;
178 eflags = helper_cc_compute_all(CC_OP);
179 eflags |= (DF & DF_MASK);
180 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
181 if (env->eflags & VIF_MASK)
182 eflags |= IF_MASK;
183 else
184 eflags &= ~IF_MASK;
185
186 /* According to AMD manual, should be read with IOPL == 3 */
187 eflags |= (3 << IOPL_SHIFT);
188
189 /* We only use helper_read_eflags_vme() in 16-bits mode */
190 return eflags & 0xffff;
191}
192
193void helper_dump_state()
194{
195 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
196 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
197 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
198 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
199 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
200 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
201 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
202}
203
204#endif /* VBOX */
205
206/* return non zero if error */
207static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
208 int selector)
209{
210 SegmentCache *dt;
211 int index;
212 target_ulong ptr;
213
214#ifdef VBOX
215 /* Trying to load a selector with CPL=1? */
216 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
217 {
218 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
219 selector = selector & 0xfffc;
220 }
221#endif /* VBOX */
222
223 if (selector & 0x4)
224 dt = &env->ldt;
225 else
226 dt = &env->gdt;
227 index = selector & ~7;
228 if ((index + 7) > dt->limit)
229 return -1;
230 ptr = dt->base + index;
231 *e1_ptr = ldl_kernel(ptr);
232 *e2_ptr = ldl_kernel(ptr + 4);
233 return 0;
234}
235
236static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
237{
238 unsigned int limit;
239 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
240 if (e2 & DESC_G_MASK)
241 limit = (limit << 12) | 0xfff;
242 return limit;
243}
244
245static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
246{
247 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
248}
249
250static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
251{
252 sc->base = get_seg_base(e1, e2);
253 sc->limit = get_seg_limit(e1, e2);
254 sc->flags = e2;
255}
256
257/* init the segment cache in vm86 mode. */
258static inline void load_seg_vm(int seg, int selector)
259{
260 selector &= 0xffff;
261#ifdef VBOX
262 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
263 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
264 flags |= (3 << DESC_DPL_SHIFT);
265
266 cpu_x86_load_seg_cache(env, seg, selector,
267 (selector << 4), 0xffff, flags);
268#else /* VBOX */
269 cpu_x86_load_seg_cache(env, seg, selector,
270 (selector << 4), 0xffff, 0);
271#endif /* VBOX */
272}
273
274static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
275 uint32_t *esp_ptr, int dpl)
276{
277#ifndef VBOX
278 int type, index, shift;
279#else
280 unsigned int type, index, shift;
281#endif
282
283#if 0
284 {
285 int i;
286 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
287 for(i=0;i<env->tr.limit;i++) {
288 printf("%02x ", env->tr.base[i]);
289 if ((i & 7) == 7) printf("\n");
290 }
291 printf("\n");
292 }
293#endif
294
295 if (!(env->tr.flags & DESC_P_MASK))
296 cpu_abort(env, "invalid tss");
297 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298 if ((type & 7) != 1)
299 cpu_abort(env, "invalid tss type");
300 shift = type >> 3;
301 index = (dpl * 4 + 2) << shift;
302 if (index + (4 << shift) - 1 > env->tr.limit)
303 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
304 if (shift == 0) {
305 *esp_ptr = lduw_kernel(env->tr.base + index);
306 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
307 } else {
308 *esp_ptr = ldl_kernel(env->tr.base + index);
309 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
310 }
311}
312
313/* XXX: merge with load_seg() */
314static void tss_load_seg(int seg_reg, int selector)
315{
316 uint32_t e1, e2;
317 int rpl, dpl, cpl;
318
319#ifdef VBOX
320 e1 = e2 = 0; /* gcc warning? */
321 cpl = env->hflags & HF_CPL_MASK;
322 /* Trying to load a selector with CPL=1? */
323 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
324 {
325 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
326 selector = selector & 0xfffc;
327 }
328#endif /* VBOX */
329
330 if ((selector & 0xfffc) != 0) {
331 if (load_segment(&e1, &e2, selector) != 0)
332 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
333 if (!(e2 & DESC_S_MASK))
334 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
335 rpl = selector & 3;
336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
337 cpl = env->hflags & HF_CPL_MASK;
338 if (seg_reg == R_CS) {
339 if (!(e2 & DESC_CS_MASK))
340 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
341 /* XXX: is it correct ? */
342 if (dpl != rpl)
343 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
344 if ((e2 & DESC_C_MASK) && dpl > rpl)
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 } else if (seg_reg == R_SS) {
347 /* SS must be writable data */
348 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 if (dpl != cpl || dpl != rpl)
351 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
352 } else {
353 /* not readable code */
354 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* if data or non conforming code, checks the rights */
357 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
358 if (dpl < cpl || dpl < rpl)
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 }
361 }
362 if (!(e2 & DESC_P_MASK))
363 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
364 cpu_x86_load_seg_cache(env, seg_reg, selector,
365 get_seg_base(e1, e2),
366 get_seg_limit(e1, e2),
367 e2);
368 } else {
369 if (seg_reg == R_SS || seg_reg == R_CS)
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371#ifdef VBOX
372# if 0
373 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
374 cpu_x86_load_seg_cache(env, seg_reg, selector,
375 0, 0, 0);
376# endif
377#endif /* VBOX */
378 }
379}
380
381#define SWITCH_TSS_JMP 0
382#define SWITCH_TSS_IRET 1
383#define SWITCH_TSS_CALL 2
384
385/* XXX: restore CPU state in registers (PowerPC case) */
386static void switch_tss(int tss_selector,
387 uint32_t e1, uint32_t e2, int source,
388 uint32_t next_eip)
389{
390 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
391 target_ulong tss_base;
392 uint32_t new_regs[8], new_segs[6];
393 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
394 uint32_t old_eflags, eflags_mask;
395 SegmentCache *dt;
396#ifndef VBOX
397 int index;
398#else
399 unsigned int index;
400#endif
401 target_ulong ptr;
402
403 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
404 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
405
406 /* if task gate, we read the TSS segment and we load it */
407 if (type == 5) {
408 if (!(e2 & DESC_P_MASK))
409 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
410 tss_selector = e1 >> 16;
411 if (tss_selector & 4)
412 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
413 if (load_segment(&e1, &e2, tss_selector) != 0)
414 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
415 if (e2 & DESC_S_MASK)
416 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
417 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
418 if ((type & 7) != 1)
419 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
420 }
421
422 if (!(e2 & DESC_P_MASK))
423 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
424
425 if (type & 8)
426 tss_limit_max = 103;
427 else
428 tss_limit_max = 43;
429 tss_limit = get_seg_limit(e1, e2);
430 tss_base = get_seg_base(e1, e2);
431 if ((tss_selector & 4) != 0 ||
432 tss_limit < tss_limit_max)
433 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
434 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
435 if (old_type & 8)
436 old_tss_limit_max = 103;
437 else
438 old_tss_limit_max = 43;
439
440 /* read all the registers from the new TSS */
441 if (type & 8) {
442 /* 32 bit */
443 new_cr3 = ldl_kernel(tss_base + 0x1c);
444 new_eip = ldl_kernel(tss_base + 0x20);
445 new_eflags = ldl_kernel(tss_base + 0x24);
446 for(i = 0; i < 8; i++)
447 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
448 for(i = 0; i < 6; i++)
449 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
450 new_ldt = lduw_kernel(tss_base + 0x60);
451 new_trap = ldl_kernel(tss_base + 0x64);
452 } else {
453 /* 16 bit */
454 new_cr3 = 0;
455 new_eip = lduw_kernel(tss_base + 0x0e);
456 new_eflags = lduw_kernel(tss_base + 0x10);
457 for(i = 0; i < 8; i++)
458 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
459 for(i = 0; i < 4; i++)
460 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
461 new_ldt = lduw_kernel(tss_base + 0x2a);
462 new_segs[R_FS] = 0;
463 new_segs[R_GS] = 0;
464 new_trap = 0;
465 }
466
467 /* NOTE: we must avoid memory exceptions during the task switch,
468 so we make dummy accesses before */
469 /* XXX: it can still fail in some cases, so a bigger hack is
470 necessary to valid the TLB after having done the accesses */
471
472 v1 = ldub_kernel(env->tr.base);
473 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
474 stb_kernel(env->tr.base, v1);
475 stb_kernel(env->tr.base + old_tss_limit_max, v2);
476
477 /* clear busy bit (it is restartable) */
478 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
479 target_ulong ptr;
480 uint32_t e2;
481 ptr = env->gdt.base + (env->tr.selector & ~7);
482 e2 = ldl_kernel(ptr + 4);
483 e2 &= ~DESC_TSS_BUSY_MASK;
484 stl_kernel(ptr + 4, e2);
485 }
486 old_eflags = compute_eflags();
487 if (source == SWITCH_TSS_IRET)
488 old_eflags &= ~NT_MASK;
489
490 /* save the current state in the old TSS */
491 if (type & 8) {
492 /* 32 bit */
493 stl_kernel(env->tr.base + 0x20, next_eip);
494 stl_kernel(env->tr.base + 0x24, old_eflags);
495 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
496 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
497 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
498 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
499 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
500 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
501 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
502 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
503 for(i = 0; i < 6; i++)
504 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
505#ifdef VBOX
506 /* Must store the ldt as it gets reloaded and might have been changed. */
507 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
508#endif
509#if defined(VBOX) && defined(DEBUG)
510 printf("TSS 32 bits switch\n");
511 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
512#endif
513 } else {
514 /* 16 bit */
515 stw_kernel(env->tr.base + 0x0e, next_eip);
516 stw_kernel(env->tr.base + 0x10, old_eflags);
517 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
518 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
519 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
520 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
521 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
522 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
523 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
524 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
525 for(i = 0; i < 4; i++)
526 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
527#ifdef VBOX
528 /* Must store the ldt as it gets reloaded and might have been changed. */
529 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
530#endif
531 }
532
533 /* now if an exception occurs, it will occurs in the next task
534 context */
535
536 if (source == SWITCH_TSS_CALL) {
537 stw_kernel(tss_base, env->tr.selector);
538 new_eflags |= NT_MASK;
539 }
540
541 /* set busy bit */
542 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
543 target_ulong ptr;
544 uint32_t e2;
545 ptr = env->gdt.base + (tss_selector & ~7);
546 e2 = ldl_kernel(ptr + 4);
547 e2 |= DESC_TSS_BUSY_MASK;
548 stl_kernel(ptr + 4, e2);
549 }
550
551 /* set the new CPU state */
552 /* from this point, any exception which occurs can give problems */
553 env->cr[0] |= CR0_TS_MASK;
554 env->hflags |= HF_TS_MASK;
555 env->tr.selector = tss_selector;
556 env->tr.base = tss_base;
557 env->tr.limit = tss_limit;
558 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
559
560 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
561 cpu_x86_update_cr3(env, new_cr3);
562 }
563
564 /* load all registers without an exception, then reload them with
565 possible exception */
566 env->eip = new_eip;
567 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
568 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
569 if (!(type & 8))
570 eflags_mask &= 0xffff;
571 load_eflags(new_eflags, eflags_mask);
572 /* XXX: what to do in 16 bit case ? */
573 EAX = new_regs[0];
574 ECX = new_regs[1];
575 EDX = new_regs[2];
576 EBX = new_regs[3];
577 ESP = new_regs[4];
578 EBP = new_regs[5];
579 ESI = new_regs[6];
580 EDI = new_regs[7];
581 if (new_eflags & VM_MASK) {
582 for(i = 0; i < 6; i++)
583 load_seg_vm(i, new_segs[i]);
584 /* in vm86, CPL is always 3 */
585 cpu_x86_set_cpl(env, 3);
586 } else {
587 /* CPL is set the RPL of CS */
588 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
589 /* first just selectors as the rest may trigger exceptions */
590 for(i = 0; i < 6; i++)
591 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
592 }
593
594 env->ldt.selector = new_ldt & ~4;
595 env->ldt.base = 0;
596 env->ldt.limit = 0;
597 env->ldt.flags = 0;
598
599 /* load the LDT */
600 if (new_ldt & 4)
601 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
602
603 if ((new_ldt & 0xfffc) != 0) {
604 dt = &env->gdt;
605 index = new_ldt & ~7;
606 if ((index + 7) > dt->limit)
607 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
608 ptr = dt->base + index;
609 e1 = ldl_kernel(ptr);
610 e2 = ldl_kernel(ptr + 4);
611 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
612 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
613 if (!(e2 & DESC_P_MASK))
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 load_seg_cache_raw_dt(&env->ldt, e1, e2);
616 }
617
618 /* load the segments */
619 if (!(new_eflags & VM_MASK)) {
620 tss_load_seg(R_CS, new_segs[R_CS]);
621 tss_load_seg(R_SS, new_segs[R_SS]);
622 tss_load_seg(R_ES, new_segs[R_ES]);
623 tss_load_seg(R_DS, new_segs[R_DS]);
624 tss_load_seg(R_FS, new_segs[R_FS]);
625 tss_load_seg(R_GS, new_segs[R_GS]);
626 }
627
628 /* check that EIP is in the CS segment limits */
629 if (new_eip > env->segs[R_CS].limit) {
630 /* XXX: different exception if CALL ? */
631 raise_exception_err(EXCP0D_GPF, 0);
632 }
633
634#ifndef CONFIG_USER_ONLY
635 /* reset local breakpoints */
636 if (env->dr[7] & 0x55) {
637 for (i = 0; i < 4; i++) {
638 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
639 hw_breakpoint_remove(env, i);
640 }
641 env->dr[7] &= ~0x55;
642 }
643#endif
644}
645
646/* check if Port I/O is allowed in TSS */
647static inline void check_io(int addr, int size)
648{
649#ifndef VBOX
650 int io_offset, val, mask;
651#else
652 int val, mask;
653 unsigned int io_offset;
654#endif /* VBOX */
655
656 /* TSS must be a valid 32 bit one */
657 if (!(env->tr.flags & DESC_P_MASK) ||
658 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
659 env->tr.limit < 103)
660 goto fail;
661 io_offset = lduw_kernel(env->tr.base + 0x66);
662 io_offset += (addr >> 3);
663 /* Note: the check needs two bytes */
664 if ((io_offset + 1) > env->tr.limit)
665 goto fail;
666 val = lduw_kernel(env->tr.base + io_offset);
667 val >>= (addr & 7);
668 mask = (1 << size) - 1;
669 /* all bits must be zero to allow the I/O */
670 if ((val & mask) != 0) {
671 fail:
672 raise_exception_err(EXCP0D_GPF, 0);
673 }
674}
675
676#ifdef VBOX
677
678/* Keep in sync with gen_check_external_event() */
679void helper_check_external_event()
680{
681 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
682 | CPU_INTERRUPT_EXTERNAL_TIMER
683 | CPU_INTERRUPT_EXTERNAL_DMA))
684 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
685 && (env->eflags & IF_MASK)
686 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
687 {
688 helper_external_event();
689 }
690
691}
692
693void helper_sync_seg(uint32_t reg)
694{
695 if (env->segs[reg].newselector)
696 sync_seg(env, reg, env->segs[reg].newselector);
697}
698
699#endif /* VBOX */
700
701void helper_check_iob(uint32_t t0)
702{
703 check_io(t0, 1);
704}
705
706void helper_check_iow(uint32_t t0)
707{
708 check_io(t0, 2);
709}
710
711void helper_check_iol(uint32_t t0)
712{
713 check_io(t0, 4);
714}
715
716void helper_outb(uint32_t port, uint32_t data)
717{
718#ifndef VBOX
719 cpu_outb(port, data & 0xff);
720#else
721 cpu_outb(env, port, data & 0xff);
722#endif
723}
724
725target_ulong helper_inb(uint32_t port)
726{
727#ifndef VBOX
728 return cpu_inb(port);
729#else
730 return cpu_inb(env, port);
731#endif
732}
733
734void helper_outw(uint32_t port, uint32_t data)
735{
736#ifndef VBOX
737 cpu_outw(port, data & 0xffff);
738#else
739 cpu_outw(env, port, data & 0xffff);
740#endif
741}
742
743target_ulong helper_inw(uint32_t port)
744{
745#ifndef VBOX
746 return cpu_inw(port);
747#else
748 return cpu_inw(env, port);
749#endif
750}
751
752void helper_outl(uint32_t port, uint32_t data)
753{
754#ifndef VBOX
755 cpu_outl(port, data);
756#else
757 cpu_outl(env, port, data);
758#endif
759}
760
761target_ulong helper_inl(uint32_t port)
762{
763#ifndef VBOX
764 return cpu_inl(port);
765#else
766 return cpu_inl(env, port);
767#endif
768}
769
770static inline unsigned int get_sp_mask(unsigned int e2)
771{
772 if (e2 & DESC_B_MASK)
773 return 0xffffffff;
774 else
775 return 0xffff;
776}
777
778static int exeption_has_error_code(int intno)
779{
780 switch(intno) {
781 case 8:
782 case 10:
783 case 11:
784 case 12:
785 case 13:
786 case 14:
787 case 17:
788 return 1;
789 }
790 return 0;
791}
792
793#ifdef TARGET_X86_64
794#define SET_ESP(val, sp_mask)\
795do {\
796 if ((sp_mask) == 0xffff)\
797 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
798 else if ((sp_mask) == 0xffffffffLL)\
799 ESP = (uint32_t)(val);\
800 else\
801 ESP = (val);\
802} while (0)
803#else
804#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
805#endif
806
807/* in 64-bit machines, this can overflow. So this segment addition macro
808 * can be used to trim the value to 32-bit whenever needed */
809#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
810
811/* XXX: add a is_user flag to have proper security support */
812#define PUSHW(ssp, sp, sp_mask, val)\
813{\
814 sp -= 2;\
815 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
816}
817
818#define PUSHL(ssp, sp, sp_mask, val)\
819{\
820 sp -= 4;\
821 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
822}
823
824#define POPW(ssp, sp, sp_mask, val)\
825{\
826 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
827 sp += 2;\
828}
829
830#define POPL(ssp, sp, sp_mask, val)\
831{\
832 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
833 sp += 4;\
834}
835
836/* protected mode interrupt */
837static void do_interrupt_protected(int intno, int is_int, int error_code,
838 unsigned int next_eip, int is_hw)
839{
840 SegmentCache *dt;
841 target_ulong ptr, ssp;
842 int type, dpl, selector, ss_dpl, cpl;
843 int has_error_code, new_stack, shift;
844 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
845 uint32_t old_eip, sp_mask;
846
847#ifdef VBOX
848 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
849 cpu_loop_exit();
850#endif
851
852 has_error_code = 0;
853 if (!is_int && !is_hw)
854 has_error_code = exeption_has_error_code(intno);
855 if (is_int)
856 old_eip = next_eip;
857 else
858 old_eip = env->eip;
859
860 dt = &env->idt;
861#ifndef VBOX
862 if (intno * 8 + 7 > dt->limit)
863#else
864 if ((unsigned)intno * 8 + 7 > dt->limit)
865#endif
866 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
867 ptr = dt->base + intno * 8;
868 e1 = ldl_kernel(ptr);
869 e2 = ldl_kernel(ptr + 4);
870 /* check gate type */
871 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
872 switch(type) {
873 case 5: /* task gate */
874 /* must do that check here to return the correct error code */
875 if (!(e2 & DESC_P_MASK))
876 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
877 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
878 if (has_error_code) {
879 int type;
880 uint32_t mask;
881 /* push the error code */
882 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
883 shift = type >> 3;
884 if (env->segs[R_SS].flags & DESC_B_MASK)
885 mask = 0xffffffff;
886 else
887 mask = 0xffff;
888 esp = (ESP - (2 << shift)) & mask;
889 ssp = env->segs[R_SS].base + esp;
890 if (shift)
891 stl_kernel(ssp, error_code);
892 else
893 stw_kernel(ssp, error_code);
894 SET_ESP(esp, mask);
895 }
896 return;
897 case 6: /* 286 interrupt gate */
898 case 7: /* 286 trap gate */
899 case 14: /* 386 interrupt gate */
900 case 15: /* 386 trap gate */
901 break;
902 default:
903 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
904 break;
905 }
906 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
907 cpl = env->hflags & HF_CPL_MASK;
908 /* check privilege if software int */
909 if (is_int && dpl < cpl)
910 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
911 /* check valid bit */
912 if (!(e2 & DESC_P_MASK))
913 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
914 selector = e1 >> 16;
915 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
916 if ((selector & 0xfffc) == 0)
917 raise_exception_err(EXCP0D_GPF, 0);
918
919 if (load_segment(&e1, &e2, selector) != 0)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924 if (dpl > cpl)
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if (!(e2 & DESC_P_MASK))
927 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
929 /* to inner privilege */
930 get_ss_esp_from_tss(&ss, &esp, dpl);
931 if ((ss & 0xfffc) == 0)
932 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
933 if ((ss & 3) != dpl)
934 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
935 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
938 if (ss_dpl != dpl)
939 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
940 if (!(ss_e2 & DESC_S_MASK) ||
941 (ss_e2 & DESC_CS_MASK) ||
942 !(ss_e2 & DESC_W_MASK))
943 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
944 if (!(ss_e2 & DESC_P_MASK))
945#ifdef VBOX /* See page 3-477 of 253666.pdf */
946 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
947#else
948 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
949#endif
950 new_stack = 1;
951 sp_mask = get_sp_mask(ss_e2);
952 ssp = get_seg_base(ss_e1, ss_e2);
953#if defined(VBOX) && defined(DEBUG)
954 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
955#endif
956 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957 /* to same privilege */
958 if (env->eflags & VM_MASK)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 new_stack = 0;
961 sp_mask = get_sp_mask(env->segs[R_SS].flags);
962 ssp = env->segs[R_SS].base;
963 esp = ESP;
964 dpl = cpl;
965 } else {
966 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
967 new_stack = 0; /* avoid warning */
968 sp_mask = 0; /* avoid warning */
969 ssp = 0; /* avoid warning */
970 esp = 0; /* avoid warning */
971 }
972
973 shift = type >> 3;
974
975#if 0
976 /* XXX: check that enough room is available */
977 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
978 if (env->eflags & VM_MASK)
979 push_size += 8;
980 push_size <<= shift;
981#endif
982 if (shift == 1) {
983 if (new_stack) {
984 if (env->eflags & VM_MASK) {
985 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
986 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
987 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
988 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
989 }
990 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
991 PUSHL(ssp, esp, sp_mask, ESP);
992 }
993 PUSHL(ssp, esp, sp_mask, compute_eflags());
994 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
995 PUSHL(ssp, esp, sp_mask, old_eip);
996 if (has_error_code) {
997 PUSHL(ssp, esp, sp_mask, error_code);
998 }
999 } else {
1000 if (new_stack) {
1001 if (env->eflags & VM_MASK) {
1002 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1003 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1004 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1005 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1006 }
1007 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1008 PUSHW(ssp, esp, sp_mask, ESP);
1009 }
1010 PUSHW(ssp, esp, sp_mask, compute_eflags());
1011 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1012 PUSHW(ssp, esp, sp_mask, old_eip);
1013 if (has_error_code) {
1014 PUSHW(ssp, esp, sp_mask, error_code);
1015 }
1016 }
1017
1018 if (new_stack) {
1019 if (env->eflags & VM_MASK) {
1020 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1021 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1022 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1023 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1024 }
1025 ss = (ss & ~3) | dpl;
1026 cpu_x86_load_seg_cache(env, R_SS, ss,
1027 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1028 }
1029 SET_ESP(esp, sp_mask);
1030
1031 selector = (selector & ~3) | dpl;
1032 cpu_x86_load_seg_cache(env, R_CS, selector,
1033 get_seg_base(e1, e2),
1034 get_seg_limit(e1, e2),
1035 e2);
1036 cpu_x86_set_cpl(env, dpl);
1037 env->eip = offset;
1038
1039 /* interrupt gate clear IF mask */
1040 if ((type & 1) == 0) {
1041 env->eflags &= ~IF_MASK;
1042 }
1043#ifndef VBOX
1044 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1045#else
1046 /*
1047 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1048 * gets confused by seemingly changed EFLAGS. See #3491 and
1049 * public bug #2341.
1050 */
1051 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1052#endif
1053}
1054
1055#ifdef VBOX
1056
1057/* check if VME interrupt redirection is enabled in TSS */
1058DECLINLINE(bool) is_vme_irq_redirected(int intno)
1059{
1060 unsigned int io_offset, intredir_offset;
1061 unsigned char val, mask;
1062
1063 /* TSS must be a valid 32 bit one */
1064 if (!(env->tr.flags & DESC_P_MASK) ||
1065 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1066 env->tr.limit < 103)
1067 goto fail;
1068 io_offset = lduw_kernel(env->tr.base + 0x66);
1069 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1070 if (io_offset < 0x68 + 0x20)
1071 io_offset = 0x68 + 0x20;
1072 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1073 intredir_offset = io_offset - 0x20;
1074
1075 intredir_offset += (intno >> 3);
1076 if ((intredir_offset) > env->tr.limit)
1077 goto fail;
1078
1079 val = ldub_kernel(env->tr.base + intredir_offset);
1080 mask = 1 << (unsigned char)(intno & 7);
1081
1082 /* bit set means no redirection. */
1083 if ((val & mask) != 0) {
1084 return false;
1085 }
1086 return true;
1087
1088fail:
1089 raise_exception_err(EXCP0D_GPF, 0);
1090 return true;
1091}
1092
1093/* V86 mode software interrupt with CR4.VME=1 */
1094static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1095{
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eflags;
1100 uint32_t iopl;
1101
1102 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1103
1104 if (!is_vme_irq_redirected(intno))
1105 {
1106 if (iopl == 3)
1107 {
1108 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1109 return;
1110 }
1111 else
1112 raise_exception_err(EXCP0D_GPF, 0);
1113 }
1114
1115 /* virtual mode idt is at linear address 0 */
1116 ptr = 0 + intno * 4;
1117 offset = lduw_kernel(ptr);
1118 selector = lduw_kernel(ptr + 2);
1119 esp = ESP;
1120 ssp = env->segs[R_SS].base;
1121 old_cs = env->segs[R_CS].selector;
1122
1123 old_eflags = compute_eflags();
1124 if (iopl < 3)
1125 {
1126 /* copy VIF into IF and set IOPL to 3 */
1127 if (env->eflags & VIF_MASK)
1128 old_eflags |= IF_MASK;
1129 else
1130 old_eflags &= ~IF_MASK;
1131
1132 old_eflags |= (3 << IOPL_SHIFT);
1133 }
1134
1135 /* XXX: use SS segment size ? */
1136 PUSHW(ssp, esp, 0xffff, old_eflags);
1137 PUSHW(ssp, esp, 0xffff, old_cs);
1138 PUSHW(ssp, esp, 0xffff, next_eip);
1139
1140 /* update processor state */
1141 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1142 env->eip = offset;
1143 env->segs[R_CS].selector = selector;
1144 env->segs[R_CS].base = (selector << 4);
1145 env->eflags &= ~(TF_MASK | RF_MASK);
1146
1147 if (iopl < 3)
1148 env->eflags &= ~VIF_MASK;
1149 else
1150 env->eflags &= ~IF_MASK;
1151}
1152
1153#endif /* VBOX */
1154
1155#ifdef TARGET_X86_64
1156
1157#define PUSHQ(sp, val)\
1158{\
1159 sp -= 8;\
1160 stq_kernel(sp, (val));\
1161}
1162
1163#define POPQ(sp, val)\
1164{\
1165 val = ldq_kernel(sp);\
1166 sp += 8;\
1167}
1168
1169static inline target_ulong get_rsp_from_tss(int level)
1170{
1171 int index;
1172
1173#if 0
1174 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1175 env->tr.base, env->tr.limit);
1176#endif
1177
1178 if (!(env->tr.flags & DESC_P_MASK))
1179 cpu_abort(env, "invalid tss");
1180 index = 8 * level + 4;
1181 if ((index + 7) > env->tr.limit)
1182 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1183 return ldq_kernel(env->tr.base + index);
1184}
1185
1186/* 64 bit interrupt */
1187static void do_interrupt64(int intno, int is_int, int error_code,
1188 target_ulong next_eip, int is_hw)
1189{
1190 SegmentCache *dt;
1191 target_ulong ptr;
1192 int type, dpl, selector, cpl, ist;
1193 int has_error_code, new_stack;
1194 uint32_t e1, e2, e3, ss;
1195 target_ulong old_eip, esp, offset;
1196
1197#ifdef VBOX
1198 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1199 cpu_loop_exit();
1200#endif
1201
1202 has_error_code = 0;
1203 if (!is_int && !is_hw)
1204 has_error_code = exeption_has_error_code(intno);
1205 if (is_int)
1206 old_eip = next_eip;
1207 else
1208 old_eip = env->eip;
1209
1210 dt = &env->idt;
1211 if (intno * 16 + 15 > dt->limit)
1212 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1213 ptr = dt->base + intno * 16;
1214 e1 = ldl_kernel(ptr);
1215 e2 = ldl_kernel(ptr + 4);
1216 e3 = ldl_kernel(ptr + 8);
1217 /* check gate type */
1218 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1219 switch(type) {
1220 case 14: /* 386 interrupt gate */
1221 case 15: /* 386 trap gate */
1222 break;
1223 default:
1224 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1225 break;
1226 }
1227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1228 cpl = env->hflags & HF_CPL_MASK;
1229 /* check privilege if software int */
1230 if (is_int && dpl < cpl)
1231 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1232 /* check valid bit */
1233 if (!(e2 & DESC_P_MASK))
1234 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1235 selector = e1 >> 16;
1236 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1237 ist = e2 & 7;
1238 if ((selector & 0xfffc) == 0)
1239 raise_exception_err(EXCP0D_GPF, 0);
1240
1241 if (load_segment(&e1, &e2, selector) != 0)
1242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1243 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1244 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1245 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1246 if (dpl > cpl)
1247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1248 if (!(e2 & DESC_P_MASK))
1249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1250 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1251 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1252 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1253 /* to inner privilege */
1254 if (ist != 0)
1255 esp = get_rsp_from_tss(ist + 3);
1256 else
1257 esp = get_rsp_from_tss(dpl);
1258 esp &= ~0xfLL; /* align stack */
1259 ss = 0;
1260 new_stack = 1;
1261 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1262 /* to same privilege */
1263 if (env->eflags & VM_MASK)
1264 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1265 new_stack = 0;
1266 if (ist != 0)
1267 esp = get_rsp_from_tss(ist + 3);
1268 else
1269 esp = ESP;
1270 esp &= ~0xfLL; /* align stack */
1271 dpl = cpl;
1272 } else {
1273 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1274 new_stack = 0; /* avoid warning */
1275 esp = 0; /* avoid warning */
1276 }
1277
1278 PUSHQ(esp, env->segs[R_SS].selector);
1279 PUSHQ(esp, ESP);
1280 PUSHQ(esp, compute_eflags());
1281 PUSHQ(esp, env->segs[R_CS].selector);
1282 PUSHQ(esp, old_eip);
1283 if (has_error_code) {
1284 PUSHQ(esp, error_code);
1285 }
1286
1287 if (new_stack) {
1288 ss = 0 | dpl;
1289 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1290 }
1291 ESP = esp;
1292
1293 selector = (selector & ~3) | dpl;
1294 cpu_x86_load_seg_cache(env, R_CS, selector,
1295 get_seg_base(e1, e2),
1296 get_seg_limit(e1, e2),
1297 e2);
1298 cpu_x86_set_cpl(env, dpl);
1299 env->eip = offset;
1300
1301 /* interrupt gate clear IF mask */
1302 if ((type & 1) == 0) {
1303 env->eflags &= ~IF_MASK;
1304 }
1305#ifndef VBOX
1306 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1307#else /* VBOX */
1308 /*
1309 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1310 * gets confused by seemingly changed EFLAGS. See #3491 and
1311 * public bug #2341.
1312 */
1313 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1314#endif /* VBOX */
1315}
1316#endif
1317
1318#ifdef TARGET_X86_64
1319#if defined(CONFIG_USER_ONLY)
1320void helper_syscall(int next_eip_addend)
1321{
1322 env->exception_index = EXCP_SYSCALL;
1323 env->exception_next_eip = env->eip + next_eip_addend;
1324 cpu_loop_exit();
1325}
1326#else
1327void helper_syscall(int next_eip_addend)
1328{
1329 int selector;
1330
1331 if (!(env->efer & MSR_EFER_SCE)) {
1332 raise_exception_err(EXCP06_ILLOP, 0);
1333 }
1334 selector = (env->star >> 32) & 0xffff;
1335 if (env->hflags & HF_LMA_MASK) {
1336 int code64;
1337
1338 ECX = env->eip + next_eip_addend;
1339 env->regs[11] = compute_eflags();
1340
1341 code64 = env->hflags & HF_CS64_MASK;
1342
1343 cpu_x86_set_cpl(env, 0);
1344 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1345 0, 0xffffffff,
1346 DESC_G_MASK | DESC_P_MASK |
1347 DESC_S_MASK |
1348 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1349 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1350 0, 0xffffffff,
1351 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1352 DESC_S_MASK |
1353 DESC_W_MASK | DESC_A_MASK);
1354 env->eflags &= ~env->fmask;
1355 load_eflags(env->eflags, 0);
1356 if (code64)
1357 env->eip = env->lstar;
1358 else
1359 env->eip = env->cstar;
1360 } else {
1361 ECX = (uint32_t)(env->eip + next_eip_addend);
1362
1363 cpu_x86_set_cpl(env, 0);
1364 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1365 0, 0xffffffff,
1366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1367 DESC_S_MASK |
1368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1369 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1370 0, 0xffffffff,
1371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1372 DESC_S_MASK |
1373 DESC_W_MASK | DESC_A_MASK);
1374 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1375 env->eip = (uint32_t)env->star;
1376 }
1377}
1378#endif
1379#endif
1380
1381#ifdef TARGET_X86_64
1382void helper_sysret(int dflag)
1383{
1384 int cpl, selector;
1385
1386 if (!(env->efer & MSR_EFER_SCE)) {
1387 raise_exception_err(EXCP06_ILLOP, 0);
1388 }
1389 cpl = env->hflags & HF_CPL_MASK;
1390 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1391 raise_exception_err(EXCP0D_GPF, 0);
1392 }
1393 selector = (env->star >> 48) & 0xffff;
1394 if (env->hflags & HF_LMA_MASK) {
1395 if (dflag == 2) {
1396 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1397 0, 0xffffffff,
1398 DESC_G_MASK | DESC_P_MASK |
1399 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1400 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1401 DESC_L_MASK);
1402 env->eip = ECX;
1403 } else {
1404 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1405 0, 0xffffffff,
1406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1407 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1408 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1409 env->eip = (uint32_t)ECX;
1410 }
1411 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1412 0, 0xffffffff,
1413 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1414 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1415 DESC_W_MASK | DESC_A_MASK);
1416 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1417 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1418 cpu_x86_set_cpl(env, 3);
1419 } else {
1420 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1421 0, 0xffffffff,
1422 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1423 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1424 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1425 env->eip = (uint32_t)ECX;
1426 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1427 0, 0xffffffff,
1428 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1429 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1430 DESC_W_MASK | DESC_A_MASK);
1431 env->eflags |= IF_MASK;
1432 cpu_x86_set_cpl(env, 3);
1433 }
1434}
1435#endif
1436
1437#ifdef VBOX
1438
1439/**
1440 * Checks and processes external VMM events.
1441 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1442 */
1443void helper_external_event(void)
1444{
1445# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1446 uintptr_t uSP;
1447# ifdef RT_ARCH_AMD64
1448 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1449# else
1450 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1451# endif
1452 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1453# endif
1454 /* Keep in sync with flags checked by gen_check_external_event() */
1455 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1456 {
1457 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1458 ~CPU_INTERRUPT_EXTERNAL_HARD);
1459 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1460 }
1461 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1462 {
1463 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1464 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1465 cpu_exit(env);
1466 }
1467 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1468 {
1469 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1470 ~CPU_INTERRUPT_EXTERNAL_DMA);
1471 remR3DmaRun(env);
1472 }
1473 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1474 {
1475 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1476 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1477 remR3TimersRun(env);
1478 }
1479}
1480
1481/* helper for recording call instruction addresses for later scanning */
1482void helper_record_call()
1483{
1484 if ( !(env->state & CPU_RAW_RING0)
1485 && (env->cr[0] & CR0_PG_MASK)
1486 && !(env->eflags & X86_EFL_IF))
1487 remR3RecordCall(env);
1488}
1489
1490#endif /* VBOX */
1491
1492/* real mode interrupt */
1493static void do_interrupt_real(int intno, int is_int, int error_code,
1494 unsigned int next_eip)
1495{
1496 SegmentCache *dt;
1497 target_ulong ptr, ssp;
1498 int selector;
1499 uint32_t offset, esp;
1500 uint32_t old_cs, old_eip;
1501
1502 /* real mode (simpler !) */
1503 dt = &env->idt;
1504#ifndef VBOX
1505 if (intno * 4 + 3 > dt->limit)
1506#else
1507 if ((unsigned)intno * 4 + 3 > dt->limit)
1508#endif
1509 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1510 ptr = dt->base + intno * 4;
1511 offset = lduw_kernel(ptr);
1512 selector = lduw_kernel(ptr + 2);
1513 esp = ESP;
1514 ssp = env->segs[R_SS].base;
1515 if (is_int)
1516 old_eip = next_eip;
1517 else
1518 old_eip = env->eip;
1519 old_cs = env->segs[R_CS].selector;
1520 /* XXX: use SS segment size ? */
1521 PUSHW(ssp, esp, 0xffff, compute_eflags());
1522 PUSHW(ssp, esp, 0xffff, old_cs);
1523 PUSHW(ssp, esp, 0xffff, old_eip);
1524
1525 /* update processor state */
1526 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1527 env->eip = offset;
1528 env->segs[R_CS].selector = selector;
1529 env->segs[R_CS].base = (selector << 4);
1530 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1531}
1532
1533/* fake user mode interrupt */
1534void do_interrupt_user(int intno, int is_int, int error_code,
1535 target_ulong next_eip)
1536{
1537 SegmentCache *dt;
1538 target_ulong ptr;
1539 int dpl, cpl, shift;
1540 uint32_t e2;
1541
1542 dt = &env->idt;
1543 if (env->hflags & HF_LMA_MASK) {
1544 shift = 4;
1545 } else {
1546 shift = 3;
1547 }
1548 ptr = dt->base + (intno << shift);
1549 e2 = ldl_kernel(ptr + 4);
1550
1551 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1552 cpl = env->hflags & HF_CPL_MASK;
1553 /* check privilege if software int */
1554 if (is_int && dpl < cpl)
1555 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1556
1557 /* Since we emulate only user space, we cannot do more than
1558 exiting the emulation with the suitable exception and error
1559 code */
1560 if (is_int)
1561 EIP = next_eip;
1562}
1563
1564#if !defined(CONFIG_USER_ONLY)
1565static void handle_even_inj(int intno, int is_int, int error_code,
1566 int is_hw, int rm)
1567{
1568 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1569 if (!(event_inj & SVM_EVTINJ_VALID)) {
1570 int type;
1571 if (is_int)
1572 type = SVM_EVTINJ_TYPE_SOFT;
1573 else
1574 type = SVM_EVTINJ_TYPE_EXEPT;
1575 event_inj = intno | type | SVM_EVTINJ_VALID;
1576 if (!rm && exeption_has_error_code(intno)) {
1577 event_inj |= SVM_EVTINJ_VALID_ERR;
1578 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1579 }
1580 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1581 }
1582}
1583#endif
1584
1585/*
1586 * Begin execution of an interruption. is_int is TRUE if coming from
1587 * the int instruction. next_eip is the EIP value AFTER the interrupt
1588 * instruction. It is only relevant if is_int is TRUE.
1589 */
1590void do_interrupt(int intno, int is_int, int error_code,
1591 target_ulong next_eip, int is_hw)
1592{
1593 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1594 if ((env->cr[0] & CR0_PE_MASK)) {
1595 static int count;
1596 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1597 count, intno, error_code, is_int,
1598 env->hflags & HF_CPL_MASK,
1599 env->segs[R_CS].selector, EIP,
1600 (int)env->segs[R_CS].base + EIP,
1601 env->segs[R_SS].selector, ESP);
1602 if (intno == 0x0e) {
1603 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1604 } else {
1605 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1606 }
1607 qemu_log("\n");
1608 log_cpu_state(env, X86_DUMP_CCOP);
1609#if 0
1610 {
1611 int i;
1612 uint8_t *ptr;
1613 qemu_log(" code=");
1614 ptr = env->segs[R_CS].base + env->eip;
1615 for(i = 0; i < 16; i++) {
1616 qemu_log(" %02x", ldub(ptr + i));
1617 }
1618 qemu_log("\n");
1619 }
1620#endif
1621 count++;
1622 }
1623 }
1624#ifdef VBOX
1625 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1626 if (is_int) {
1627 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1628 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1629 } else {
1630 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1631 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1632 }
1633 }
1634#endif
1635 if (env->cr[0] & CR0_PE_MASK) {
1636#if !defined(CONFIG_USER_ONLY)
1637 if (env->hflags & HF_SVMI_MASK)
1638 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1639#endif
1640#ifdef TARGET_X86_64
1641 if (env->hflags & HF_LMA_MASK) {
1642 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1643 } else
1644#endif
1645 {
1646#ifdef VBOX
1647 /* int xx *, v86 code and VME enabled? */
1648 if ( (env->eflags & VM_MASK)
1649 && (env->cr[4] & CR4_VME_MASK)
1650 && is_int
1651 && !is_hw
1652 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1653 )
1654 do_soft_interrupt_vme(intno, error_code, next_eip);
1655 else
1656#endif /* VBOX */
1657 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1658 }
1659 } else {
1660#if !defined(CONFIG_USER_ONLY)
1661 if (env->hflags & HF_SVMI_MASK)
1662 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1663#endif
1664 do_interrupt_real(intno, is_int, error_code, next_eip);
1665 }
1666
1667#if !defined(CONFIG_USER_ONLY)
1668 if (env->hflags & HF_SVMI_MASK) {
1669 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1670 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1671 }
1672#endif
1673}
1674
1675/* This should come from sysemu.h - if we could include it here... */
1676void qemu_system_reset_request(void);
1677
1678/*
1679 * Check nested exceptions and change to double or triple fault if
1680 * needed. It should only be called, if this is not an interrupt.
1681 * Returns the new exception number.
1682 */
1683static int check_exception(int intno, int *error_code)
1684{
1685 int first_contributory = env->old_exception == 0 ||
1686 (env->old_exception >= 10 &&
1687 env->old_exception <= 13);
1688 int second_contributory = intno == 0 ||
1689 (intno >= 10 && intno <= 13);
1690
1691 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1692 env->old_exception, intno);
1693
1694#if !defined(CONFIG_USER_ONLY)
1695 if (env->old_exception == EXCP08_DBLE) {
1696 if (env->hflags & HF_SVMI_MASK)
1697 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1698
1699 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1700
1701# ifndef VBOX
1702 qemu_system_reset_request();
1703# else
1704 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1705# endif
1706 return EXCP_HLT;
1707 }
1708#endif
1709
1710 if ((first_contributory && second_contributory)
1711 || (env->old_exception == EXCP0E_PAGE &&
1712 (second_contributory || (intno == EXCP0E_PAGE)))) {
1713 intno = EXCP08_DBLE;
1714 *error_code = 0;
1715 }
1716
1717 if (second_contributory || (intno == EXCP0E_PAGE) ||
1718 (intno == EXCP08_DBLE))
1719 env->old_exception = intno;
1720
1721 return intno;
1722}
1723
1724/*
1725 * Signal an interruption. It is executed in the main CPU loop.
1726 * is_int is TRUE if coming from the int instruction. next_eip is the
1727 * EIP value AFTER the interrupt instruction. It is only relevant if
1728 * is_int is TRUE.
1729 */
1730static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1731 int next_eip_addend)
1732{
1733#if defined(VBOX) && defined(DEBUG)
1734 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1735#endif
1736 if (!is_int) {
1737 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1738 intno = check_exception(intno, &error_code);
1739 } else {
1740 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1741 }
1742
1743 env->exception_index = intno;
1744 env->error_code = error_code;
1745 env->exception_is_int = is_int;
1746 env->exception_next_eip = env->eip + next_eip_addend;
1747 cpu_loop_exit();
1748}
1749
1750/* shortcuts to generate exceptions */
1751
1752void raise_exception_err(int exception_index, int error_code)
1753{
1754 raise_interrupt(exception_index, 0, error_code, 0);
1755}
1756
1757void raise_exception(int exception_index)
1758{
1759 raise_interrupt(exception_index, 0, 0, 0);
1760}
1761
1762/* SMM support */
1763
1764#if defined(CONFIG_USER_ONLY)
1765
1766void do_smm_enter(void)
1767{
1768}
1769
1770void helper_rsm(void)
1771{
1772}
1773
1774#else
1775
1776#ifdef TARGET_X86_64
1777#define SMM_REVISION_ID 0x00020064
1778#else
1779#define SMM_REVISION_ID 0x00020000
1780#endif
1781
1782void do_smm_enter(void)
1783{
1784 target_ulong sm_state;
1785 SegmentCache *dt;
1786 int i, offset;
1787
1788 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1789 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1790
1791 env->hflags |= HF_SMM_MASK;
1792 cpu_smm_update(env);
1793
1794 sm_state = env->smbase + 0x8000;
1795
1796#ifdef TARGET_X86_64
1797 for(i = 0; i < 6; i++) {
1798 dt = &env->segs[i];
1799 offset = 0x7e00 + i * 16;
1800 stw_phys(sm_state + offset, dt->selector);
1801 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1802 stl_phys(sm_state + offset + 4, dt->limit);
1803 stq_phys(sm_state + offset + 8, dt->base);
1804 }
1805
1806 stq_phys(sm_state + 0x7e68, env->gdt.base);
1807 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1808
1809 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1810 stq_phys(sm_state + 0x7e78, env->ldt.base);
1811 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1812 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1813
1814 stq_phys(sm_state + 0x7e88, env->idt.base);
1815 stl_phys(sm_state + 0x7e84, env->idt.limit);
1816
1817 stw_phys(sm_state + 0x7e90, env->tr.selector);
1818 stq_phys(sm_state + 0x7e98, env->tr.base);
1819 stl_phys(sm_state + 0x7e94, env->tr.limit);
1820 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1821
1822 stq_phys(sm_state + 0x7ed0, env->efer);
1823
1824 stq_phys(sm_state + 0x7ff8, EAX);
1825 stq_phys(sm_state + 0x7ff0, ECX);
1826 stq_phys(sm_state + 0x7fe8, EDX);
1827 stq_phys(sm_state + 0x7fe0, EBX);
1828 stq_phys(sm_state + 0x7fd8, ESP);
1829 stq_phys(sm_state + 0x7fd0, EBP);
1830 stq_phys(sm_state + 0x7fc8, ESI);
1831 stq_phys(sm_state + 0x7fc0, EDI);
1832 for(i = 8; i < 16; i++)
1833 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1834 stq_phys(sm_state + 0x7f78, env->eip);
1835 stl_phys(sm_state + 0x7f70, compute_eflags());
1836 stl_phys(sm_state + 0x7f68, env->dr[6]);
1837 stl_phys(sm_state + 0x7f60, env->dr[7]);
1838
1839 stl_phys(sm_state + 0x7f48, env->cr[4]);
1840 stl_phys(sm_state + 0x7f50, env->cr[3]);
1841 stl_phys(sm_state + 0x7f58, env->cr[0]);
1842
1843 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1844 stl_phys(sm_state + 0x7f00, env->smbase);
1845#else
1846 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1847 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1848 stl_phys(sm_state + 0x7ff4, compute_eflags());
1849 stl_phys(sm_state + 0x7ff0, env->eip);
1850 stl_phys(sm_state + 0x7fec, EDI);
1851 stl_phys(sm_state + 0x7fe8, ESI);
1852 stl_phys(sm_state + 0x7fe4, EBP);
1853 stl_phys(sm_state + 0x7fe0, ESP);
1854 stl_phys(sm_state + 0x7fdc, EBX);
1855 stl_phys(sm_state + 0x7fd8, EDX);
1856 stl_phys(sm_state + 0x7fd4, ECX);
1857 stl_phys(sm_state + 0x7fd0, EAX);
1858 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1859 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1860
1861 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1862 stl_phys(sm_state + 0x7f64, env->tr.base);
1863 stl_phys(sm_state + 0x7f60, env->tr.limit);
1864 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1865
1866 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1867 stl_phys(sm_state + 0x7f80, env->ldt.base);
1868 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1869 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1870
1871 stl_phys(sm_state + 0x7f74, env->gdt.base);
1872 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1873
1874 stl_phys(sm_state + 0x7f58, env->idt.base);
1875 stl_phys(sm_state + 0x7f54, env->idt.limit);
1876
1877 for(i = 0; i < 6; i++) {
1878 dt = &env->segs[i];
1879 if (i < 3)
1880 offset = 0x7f84 + i * 12;
1881 else
1882 offset = 0x7f2c + (i - 3) * 12;
1883 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1884 stl_phys(sm_state + offset + 8, dt->base);
1885 stl_phys(sm_state + offset + 4, dt->limit);
1886 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1887 }
1888 stl_phys(sm_state + 0x7f14, env->cr[4]);
1889
1890 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1891 stl_phys(sm_state + 0x7ef8, env->smbase);
1892#endif
1893 /* init SMM cpu state */
1894
1895#ifdef TARGET_X86_64
1896 cpu_load_efer(env, 0);
1897#endif
1898 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1899 env->eip = 0x00008000;
1900 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1901 0xffffffff, 0);
1902 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1903 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1904 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1905 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1906 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1907
1908 cpu_x86_update_cr0(env,
1909 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1910 cpu_x86_update_cr4(env, 0);
1911 env->dr[7] = 0x00000400;
1912 CC_OP = CC_OP_EFLAGS;
1913}
1914
1915void helper_rsm(void)
1916{
1917#ifdef VBOX
1918 cpu_abort(env, "helper_rsm");
1919#else /* !VBOX */
1920 target_ulong sm_state;
1921 int i, offset;
1922 uint32_t val;
1923
1924 sm_state = env->smbase + 0x8000;
1925#ifdef TARGET_X86_64
1926 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1927
1928 for(i = 0; i < 6; i++) {
1929 offset = 0x7e00 + i * 16;
1930 cpu_x86_load_seg_cache(env, i,
1931 lduw_phys(sm_state + offset),
1932 ldq_phys(sm_state + offset + 8),
1933 ldl_phys(sm_state + offset + 4),
1934 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1935 }
1936
1937 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1938 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1939
1940 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1941 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1942 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1943 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1944
1945 env->idt.base = ldq_phys(sm_state + 0x7e88);
1946 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1947
1948 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1949 env->tr.base = ldq_phys(sm_state + 0x7e98);
1950 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1951 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1952
1953 EAX = ldq_phys(sm_state + 0x7ff8);
1954 ECX = ldq_phys(sm_state + 0x7ff0);
1955 EDX = ldq_phys(sm_state + 0x7fe8);
1956 EBX = ldq_phys(sm_state + 0x7fe0);
1957 ESP = ldq_phys(sm_state + 0x7fd8);
1958 EBP = ldq_phys(sm_state + 0x7fd0);
1959 ESI = ldq_phys(sm_state + 0x7fc8);
1960 EDI = ldq_phys(sm_state + 0x7fc0);
1961 for(i = 8; i < 16; i++)
1962 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1963 env->eip = ldq_phys(sm_state + 0x7f78);
1964 load_eflags(ldl_phys(sm_state + 0x7f70),
1965 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1966 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1967 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1968
1969 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1970 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1971 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1972
1973 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1974 if (val & 0x20000) {
1975 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1976 }
1977#else
1978 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1979 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1980 load_eflags(ldl_phys(sm_state + 0x7ff4),
1981 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1982 env->eip = ldl_phys(sm_state + 0x7ff0);
1983 EDI = ldl_phys(sm_state + 0x7fec);
1984 ESI = ldl_phys(sm_state + 0x7fe8);
1985 EBP = ldl_phys(sm_state + 0x7fe4);
1986 ESP = ldl_phys(sm_state + 0x7fe0);
1987 EBX = ldl_phys(sm_state + 0x7fdc);
1988 EDX = ldl_phys(sm_state + 0x7fd8);
1989 ECX = ldl_phys(sm_state + 0x7fd4);
1990 EAX = ldl_phys(sm_state + 0x7fd0);
1991 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1992 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1993
1994 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1995 env->tr.base = ldl_phys(sm_state + 0x7f64);
1996 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1997 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1998
1999 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2000 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2001 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2002 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2003
2004 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2005 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2006
2007 env->idt.base = ldl_phys(sm_state + 0x7f58);
2008 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2009
2010 for(i = 0; i < 6; i++) {
2011 if (i < 3)
2012 offset = 0x7f84 + i * 12;
2013 else
2014 offset = 0x7f2c + (i - 3) * 12;
2015 cpu_x86_load_seg_cache(env, i,
2016 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2017 ldl_phys(sm_state + offset + 8),
2018 ldl_phys(sm_state + offset + 4),
2019 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2020 }
2021 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2022
2023 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2024 if (val & 0x20000) {
2025 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2026 }
2027#endif
2028 CC_OP = CC_OP_EFLAGS;
2029 env->hflags &= ~HF_SMM_MASK;
2030 cpu_smm_update(env);
2031
2032 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2033 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2034#endif /* !VBOX */
2035}
2036
2037#endif /* !CONFIG_USER_ONLY */
2038
2039
2040/* division, flags are undefined */
2041
2042void helper_divb_AL(target_ulong t0)
2043{
2044 unsigned int num, den, q, r;
2045
2046 num = (EAX & 0xffff);
2047 den = (t0 & 0xff);
2048 if (den == 0) {
2049 raise_exception(EXCP00_DIVZ);
2050 }
2051 q = (num / den);
2052 if (q > 0xff)
2053 raise_exception(EXCP00_DIVZ);
2054 q &= 0xff;
2055 r = (num % den) & 0xff;
2056 EAX = (EAX & ~0xffff) | (r << 8) | q;
2057}
2058
2059void helper_idivb_AL(target_ulong t0)
2060{
2061 int num, den, q, r;
2062
2063 num = (int16_t)EAX;
2064 den = (int8_t)t0;
2065 if (den == 0) {
2066 raise_exception(EXCP00_DIVZ);
2067 }
2068 q = (num / den);
2069 if (q != (int8_t)q)
2070 raise_exception(EXCP00_DIVZ);
2071 q &= 0xff;
2072 r = (num % den) & 0xff;
2073 EAX = (EAX & ~0xffff) | (r << 8) | q;
2074}
2075
2076void helper_divw_AX(target_ulong t0)
2077{
2078 unsigned int num, den, q, r;
2079
2080 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2081 den = (t0 & 0xffff);
2082 if (den == 0) {
2083 raise_exception(EXCP00_DIVZ);
2084 }
2085 q = (num / den);
2086 if (q > 0xffff)
2087 raise_exception(EXCP00_DIVZ);
2088 q &= 0xffff;
2089 r = (num % den) & 0xffff;
2090 EAX = (EAX & ~0xffff) | q;
2091 EDX = (EDX & ~0xffff) | r;
2092}
2093
2094void helper_idivw_AX(target_ulong t0)
2095{
2096 int num, den, q, r;
2097
2098 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2099 den = (int16_t)t0;
2100 if (den == 0) {
2101 raise_exception(EXCP00_DIVZ);
2102 }
2103 q = (num / den);
2104 if (q != (int16_t)q)
2105 raise_exception(EXCP00_DIVZ);
2106 q &= 0xffff;
2107 r = (num % den) & 0xffff;
2108 EAX = (EAX & ~0xffff) | q;
2109 EDX = (EDX & ~0xffff) | r;
2110}
2111
2112void helper_divl_EAX(target_ulong t0)
2113{
2114 unsigned int den, r;
2115 uint64_t num, q;
2116
2117 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2118 den = t0;
2119 if (den == 0) {
2120 raise_exception(EXCP00_DIVZ);
2121 }
2122 q = (num / den);
2123 r = (num % den);
2124 if (q > 0xffffffff)
2125 raise_exception(EXCP00_DIVZ);
2126 EAX = (uint32_t)q;
2127 EDX = (uint32_t)r;
2128}
2129
2130void helper_idivl_EAX(target_ulong t0)
2131{
2132 int den, r;
2133 int64_t num, q;
2134
2135 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2136 den = t0;
2137 if (den == 0) {
2138 raise_exception(EXCP00_DIVZ);
2139 }
2140 q = (num / den);
2141 r = (num % den);
2142 if (q != (int32_t)q)
2143 raise_exception(EXCP00_DIVZ);
2144 EAX = (uint32_t)q;
2145 EDX = (uint32_t)r;
2146}
2147
2148/* bcd */
2149
2150/* XXX: exception */
2151void helper_aam(int base)
2152{
2153 int al, ah;
2154 al = EAX & 0xff;
2155 ah = al / base;
2156 al = al % base;
2157 EAX = (EAX & ~0xffff) | al | (ah << 8);
2158 CC_DST = al;
2159}
2160
2161void helper_aad(int base)
2162{
2163 int al, ah;
2164 al = EAX & 0xff;
2165 ah = (EAX >> 8) & 0xff;
2166 al = ((ah * base) + al) & 0xff;
2167 EAX = (EAX & ~0xffff) | al;
2168 CC_DST = al;
2169}
2170
2171void helper_aaa(void)
2172{
2173 int icarry;
2174 int al, ah, af;
2175 int eflags;
2176
2177 eflags = helper_cc_compute_all(CC_OP);
2178 af = eflags & CC_A;
2179 al = EAX & 0xff;
2180 ah = (EAX >> 8) & 0xff;
2181
2182 icarry = (al > 0xf9);
2183 if (((al & 0x0f) > 9 ) || af) {
2184 al = (al + 6) & 0x0f;
2185 ah = (ah + 1 + icarry) & 0xff;
2186 eflags |= CC_C | CC_A;
2187 } else {
2188 eflags &= ~(CC_C | CC_A);
2189 al &= 0x0f;
2190 }
2191 EAX = (EAX & ~0xffff) | al | (ah << 8);
2192 CC_SRC = eflags;
2193}
2194
2195void helper_aas(void)
2196{
2197 int icarry;
2198 int al, ah, af;
2199 int eflags;
2200
2201 eflags = helper_cc_compute_all(CC_OP);
2202 af = eflags & CC_A;
2203 al = EAX & 0xff;
2204 ah = (EAX >> 8) & 0xff;
2205
2206 icarry = (al < 6);
2207 if (((al & 0x0f) > 9 ) || af) {
2208 al = (al - 6) & 0x0f;
2209 ah = (ah - 1 - icarry) & 0xff;
2210 eflags |= CC_C | CC_A;
2211 } else {
2212 eflags &= ~(CC_C | CC_A);
2213 al &= 0x0f;
2214 }
2215 EAX = (EAX & ~0xffff) | al | (ah << 8);
2216 CC_SRC = eflags;
2217}
2218
2219void helper_daa(void)
2220{
2221 int al, af, cf;
2222 int eflags;
2223
2224 eflags = helper_cc_compute_all(CC_OP);
2225 cf = eflags & CC_C;
2226 af = eflags & CC_A;
2227 al = EAX & 0xff;
2228
2229 eflags = 0;
2230 if (((al & 0x0f) > 9 ) || af) {
2231 al = (al + 6) & 0xff;
2232 eflags |= CC_A;
2233 }
2234 if ((al > 0x9f) || cf) {
2235 al = (al + 0x60) & 0xff;
2236 eflags |= CC_C;
2237 }
2238 EAX = (EAX & ~0xff) | al;
2239 /* well, speed is not an issue here, so we compute the flags by hand */
2240 eflags |= (al == 0) << 6; /* zf */
2241 eflags |= parity_table[al]; /* pf */
2242 eflags |= (al & 0x80); /* sf */
2243 CC_SRC = eflags;
2244}
2245
2246void helper_das(void)
2247{
2248 int al, al1, af, cf;
2249 int eflags;
2250
2251 eflags = helper_cc_compute_all(CC_OP);
2252 cf = eflags & CC_C;
2253 af = eflags & CC_A;
2254 al = EAX & 0xff;
2255
2256 eflags = 0;
2257 al1 = al;
2258 if (((al & 0x0f) > 9 ) || af) {
2259 eflags |= CC_A;
2260 if (al < 6 || cf)
2261 eflags |= CC_C;
2262 al = (al - 6) & 0xff;
2263 }
2264 if ((al1 > 0x99) || cf) {
2265 al = (al - 0x60) & 0xff;
2266 eflags |= CC_C;
2267 }
2268 EAX = (EAX & ~0xff) | al;
2269 /* well, speed is not an issue here, so we compute the flags by hand */
2270 eflags |= (al == 0) << 6; /* zf */
2271 eflags |= parity_table[al]; /* pf */
2272 eflags |= (al & 0x80); /* sf */
2273 CC_SRC = eflags;
2274}
2275
2276void helper_into(int next_eip_addend)
2277{
2278 int eflags;
2279 eflags = helper_cc_compute_all(CC_OP);
2280 if (eflags & CC_O) {
2281 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2282 }
2283}
2284
2285void helper_cmpxchg8b(target_ulong a0)
2286{
2287 uint64_t d;
2288 int eflags;
2289
2290 eflags = helper_cc_compute_all(CC_OP);
2291 d = ldq(a0);
2292 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2293 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2294 eflags |= CC_Z;
2295 } else {
2296 /* always do the store */
2297 stq(a0, d);
2298 EDX = (uint32_t)(d >> 32);
2299 EAX = (uint32_t)d;
2300 eflags &= ~CC_Z;
2301 }
2302 CC_SRC = eflags;
2303}
2304
2305#ifdef TARGET_X86_64
2306void helper_cmpxchg16b(target_ulong a0)
2307{
2308 uint64_t d0, d1;
2309 int eflags;
2310
2311 if ((a0 & 0xf) != 0)
2312 raise_exception(EXCP0D_GPF);
2313 eflags = helper_cc_compute_all(CC_OP);
2314 d0 = ldq(a0);
2315 d1 = ldq(a0 + 8);
2316 if (d0 == EAX && d1 == EDX) {
2317 stq(a0, EBX);
2318 stq(a0 + 8, ECX);
2319 eflags |= CC_Z;
2320 } else {
2321 /* always do the store */
2322 stq(a0, d0);
2323 stq(a0 + 8, d1);
2324 EDX = d1;
2325 EAX = d0;
2326 eflags &= ~CC_Z;
2327 }
2328 CC_SRC = eflags;
2329}
2330#endif
2331
2332void helper_single_step(void)
2333{
2334#ifndef CONFIG_USER_ONLY
2335 check_hw_breakpoints(env, 1);
2336 env->dr[6] |= DR6_BS;
2337#endif
2338 raise_exception(EXCP01_DB);
2339}
2340
2341void helper_cpuid(void)
2342{
2343 uint32_t eax, ebx, ecx, edx;
2344
2345 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2346
2347 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2348 EAX = eax;
2349 EBX = ebx;
2350 ECX = ecx;
2351 EDX = edx;
2352}
2353
2354void helper_enter_level(int level, int data32, target_ulong t1)
2355{
2356 target_ulong ssp;
2357 uint32_t esp_mask, esp, ebp;
2358
2359 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2360 ssp = env->segs[R_SS].base;
2361 ebp = EBP;
2362 esp = ESP;
2363 if (data32) {
2364 /* 32 bit */
2365 esp -= 4;
2366 while (--level) {
2367 esp -= 4;
2368 ebp -= 4;
2369 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2370 }
2371 esp -= 4;
2372 stl(ssp + (esp & esp_mask), t1);
2373 } else {
2374 /* 16 bit */
2375 esp -= 2;
2376 while (--level) {
2377 esp -= 2;
2378 ebp -= 2;
2379 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2380 }
2381 esp -= 2;
2382 stw(ssp + (esp & esp_mask), t1);
2383 }
2384}
2385
2386#ifdef TARGET_X86_64
2387void helper_enter64_level(int level, int data64, target_ulong t1)
2388{
2389 target_ulong esp, ebp;
2390 ebp = EBP;
2391 esp = ESP;
2392
2393 if (data64) {
2394 /* 64 bit */
2395 esp -= 8;
2396 while (--level) {
2397 esp -= 8;
2398 ebp -= 8;
2399 stq(esp, ldq(ebp));
2400 }
2401 esp -= 8;
2402 stq(esp, t1);
2403 } else {
2404 /* 16 bit */
2405 esp -= 2;
2406 while (--level) {
2407 esp -= 2;
2408 ebp -= 2;
2409 stw(esp, lduw(ebp));
2410 }
2411 esp -= 2;
2412 stw(esp, t1);
2413 }
2414}
2415#endif
2416
2417void helper_lldt(int selector)
2418{
2419 SegmentCache *dt;
2420 uint32_t e1, e2;
2421#ifndef VBOX
2422 int index, entry_limit;
2423#else
2424 unsigned int index, entry_limit;
2425#endif
2426 target_ulong ptr;
2427
2428#ifdef VBOX
2429 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2430 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2431#endif
2432
2433 selector &= 0xffff;
2434 if ((selector & 0xfffc) == 0) {
2435 /* XXX: NULL selector case: invalid LDT */
2436 env->ldt.base = 0;
2437 env->ldt.limit = 0;
2438 } else {
2439 if (selector & 0x4)
2440 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2441 dt = &env->gdt;
2442 index = selector & ~7;
2443#ifdef TARGET_X86_64
2444 if (env->hflags & HF_LMA_MASK)
2445 entry_limit = 15;
2446 else
2447#endif
2448 entry_limit = 7;
2449 if ((index + entry_limit) > dt->limit)
2450 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2451 ptr = dt->base + index;
2452 e1 = ldl_kernel(ptr);
2453 e2 = ldl_kernel(ptr + 4);
2454 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2455 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2456 if (!(e2 & DESC_P_MASK))
2457 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2458#ifdef TARGET_X86_64
2459 if (env->hflags & HF_LMA_MASK) {
2460 uint32_t e3;
2461 e3 = ldl_kernel(ptr + 8);
2462 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2463 env->ldt.base |= (target_ulong)e3 << 32;
2464 } else
2465#endif
2466 {
2467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2468 }
2469 }
2470 env->ldt.selector = selector;
2471#ifdef VBOX
2472 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2473 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2474#endif
2475}
2476
2477void helper_ltr(int selector)
2478{
2479 SegmentCache *dt;
2480 uint32_t e1, e2;
2481#ifndef VBOX
2482 int index, type, entry_limit;
2483#else
2484 unsigned int index;
2485 int type, entry_limit;
2486#endif
2487 target_ulong ptr;
2488
2489#ifdef VBOX
2490 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2491 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2492 env->tr.flags, (RTSEL)(selector & 0xffff)));
2493#endif
2494 selector &= 0xffff;
2495 if ((selector & 0xfffc) == 0) {
2496 /* NULL selector case: invalid TR */
2497 env->tr.base = 0;
2498 env->tr.limit = 0;
2499 env->tr.flags = 0;
2500 } else {
2501 if (selector & 0x4)
2502 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2503 dt = &env->gdt;
2504 index = selector & ~7;
2505#ifdef TARGET_X86_64
2506 if (env->hflags & HF_LMA_MASK)
2507 entry_limit = 15;
2508 else
2509#endif
2510 entry_limit = 7;
2511 if ((index + entry_limit) > dt->limit)
2512 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2513 ptr = dt->base + index;
2514 e1 = ldl_kernel(ptr);
2515 e2 = ldl_kernel(ptr + 4);
2516 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2517 if ((e2 & DESC_S_MASK) ||
2518 (type != 1 && type != 9))
2519 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2520 if (!(e2 & DESC_P_MASK))
2521 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2522#ifdef TARGET_X86_64
2523 if (env->hflags & HF_LMA_MASK) {
2524 uint32_t e3, e4;
2525 e3 = ldl_kernel(ptr + 8);
2526 e4 = ldl_kernel(ptr + 12);
2527 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2528 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2529 load_seg_cache_raw_dt(&env->tr, e1, e2);
2530 env->tr.base |= (target_ulong)e3 << 32;
2531 } else
2532#endif
2533 {
2534 load_seg_cache_raw_dt(&env->tr, e1, e2);
2535 }
2536 e2 |= DESC_TSS_BUSY_MASK;
2537 stl_kernel(ptr + 4, e2);
2538 }
2539 env->tr.selector = selector;
2540#ifdef VBOX
2541 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2542 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2543 env->tr.flags, (RTSEL)(selector & 0xffff)));
2544#endif
2545}
2546
2547/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2548void helper_load_seg(int seg_reg, int selector)
2549{
2550 uint32_t e1, e2;
2551 int cpl, dpl, rpl;
2552 SegmentCache *dt;
2553#ifndef VBOX
2554 int index;
2555#else
2556 unsigned int index;
2557#endif
2558 target_ulong ptr;
2559
2560 selector &= 0xffff;
2561 cpl = env->hflags & HF_CPL_MASK;
2562#ifdef VBOX
2563
2564 /* Trying to load a selector with CPL=1? */
2565 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2566 {
2567 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2568 selector = selector & 0xfffc;
2569 }
2570#endif /* VBOX */
2571 if ((selector & 0xfffc) == 0) {
2572 /* null selector case */
2573 if (seg_reg == R_SS
2574#ifdef TARGET_X86_64
2575 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2576#endif
2577 )
2578 raise_exception_err(EXCP0D_GPF, 0);
2579 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2580 } else {
2581
2582 if (selector & 0x4)
2583 dt = &env->ldt;
2584 else
2585 dt = &env->gdt;
2586 index = selector & ~7;
2587 if ((index + 7) > dt->limit)
2588 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2589 ptr = dt->base + index;
2590 e1 = ldl_kernel(ptr);
2591 e2 = ldl_kernel(ptr + 4);
2592
2593 if (!(e2 & DESC_S_MASK))
2594 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2595 rpl = selector & 3;
2596 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2597 if (seg_reg == R_SS) {
2598 /* must be writable segment */
2599 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2600 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2601 if (rpl != cpl || dpl != cpl)
2602 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2603 } else {
2604 /* must be readable segment */
2605 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2606 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2607
2608 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2609 /* if not conforming code, test rights */
2610 if (dpl < cpl || dpl < rpl)
2611 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2612 }
2613 }
2614
2615 if (!(e2 & DESC_P_MASK)) {
2616 if (seg_reg == R_SS)
2617 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2618 else
2619 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2620 }
2621
2622 /* set the access bit if not already set */
2623 if (!(e2 & DESC_A_MASK)) {
2624 e2 |= DESC_A_MASK;
2625 stl_kernel(ptr + 4, e2);
2626 }
2627
2628 cpu_x86_load_seg_cache(env, seg_reg, selector,
2629 get_seg_base(e1, e2),
2630 get_seg_limit(e1, e2),
2631 e2);
2632#if 0
2633 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2634 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2635#endif
2636 }
2637}
2638
2639/* protected mode jump */
2640void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2641 int next_eip_addend)
2642{
2643 int gate_cs, type;
2644 uint32_t e1, e2, cpl, dpl, rpl, limit;
2645 target_ulong next_eip;
2646
2647#ifdef VBOX /** @todo Why do we do this? */
2648 e1 = e2 = 0;
2649#endif
2650 if ((new_cs & 0xfffc) == 0)
2651 raise_exception_err(EXCP0D_GPF, 0);
2652 if (load_segment(&e1, &e2, new_cs) != 0)
2653 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2654 cpl = env->hflags & HF_CPL_MASK;
2655 if (e2 & DESC_S_MASK) {
2656 if (!(e2 & DESC_CS_MASK))
2657 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2658 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2659 if (e2 & DESC_C_MASK) {
2660 /* conforming code segment */
2661 if (dpl > cpl)
2662 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2663 } else {
2664 /* non conforming code segment */
2665 rpl = new_cs & 3;
2666 if (rpl > cpl)
2667 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2668 if (dpl != cpl)
2669 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2670 }
2671 if (!(e2 & DESC_P_MASK))
2672 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2673 limit = get_seg_limit(e1, e2);
2674 if (new_eip > limit &&
2675 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2676 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2677 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2678 get_seg_base(e1, e2), limit, e2);
2679 EIP = new_eip;
2680 } else {
2681 /* jump to call or task gate */
2682 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2683 rpl = new_cs & 3;
2684 cpl = env->hflags & HF_CPL_MASK;
2685 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2686 switch(type) {
2687 case 1: /* 286 TSS */
2688 case 9: /* 386 TSS */
2689 case 5: /* task gate */
2690 if (dpl < cpl || dpl < rpl)
2691 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2692 next_eip = env->eip + next_eip_addend;
2693 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2694 CC_OP = CC_OP_EFLAGS;
2695 break;
2696 case 4: /* 286 call gate */
2697 case 12: /* 386 call gate */
2698 if ((dpl < cpl) || (dpl < rpl))
2699 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2700 if (!(e2 & DESC_P_MASK))
2701 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2702 gate_cs = e1 >> 16;
2703 new_eip = (e1 & 0xffff);
2704 if (type == 12)
2705 new_eip |= (e2 & 0xffff0000);
2706 if (load_segment(&e1, &e2, gate_cs) != 0)
2707 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2708 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2709 /* must be code segment */
2710 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2711 (DESC_S_MASK | DESC_CS_MASK)))
2712 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2713 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2714 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2715 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2716 if (!(e2 & DESC_P_MASK))
2717#ifdef VBOX /* See page 3-514 of 253666.pdf */
2718 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2719#else
2720 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2721#endif
2722 limit = get_seg_limit(e1, e2);
2723 if (new_eip > limit)
2724 raise_exception_err(EXCP0D_GPF, 0);
2725 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2726 get_seg_base(e1, e2), limit, e2);
2727 EIP = new_eip;
2728 break;
2729 default:
2730 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2731 break;
2732 }
2733 }
2734}
2735
2736/* real mode call */
2737void helper_lcall_real(int new_cs, target_ulong new_eip1,
2738 int shift, int next_eip)
2739{
2740 int new_eip;
2741 uint32_t esp, esp_mask;
2742 target_ulong ssp;
2743
2744 new_eip = new_eip1;
2745 esp = ESP;
2746 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2747 ssp = env->segs[R_SS].base;
2748 if (shift) {
2749 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2750 PUSHL(ssp, esp, esp_mask, next_eip);
2751 } else {
2752 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2753 PUSHW(ssp, esp, esp_mask, next_eip);
2754 }
2755
2756 SET_ESP(esp, esp_mask);
2757 env->eip = new_eip;
2758 env->segs[R_CS].selector = new_cs;
2759 env->segs[R_CS].base = (new_cs << 4);
2760}
2761
2762/* protected mode call */
2763void helper_lcall_protected(int new_cs, target_ulong new_eip,
2764 int shift, int next_eip_addend)
2765{
2766 int new_stack, i;
2767 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2768 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2769 uint32_t val, limit, old_sp_mask;
2770 target_ulong ssp, old_ssp, next_eip;
2771
2772#ifdef VBOX /** @todo Why do we do this? */
2773 e1 = e2 = 0;
2774#endif
2775 next_eip = env->eip + next_eip_addend;
2776 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2777 LOG_PCALL_STATE(env);
2778 if ((new_cs & 0xfffc) == 0)
2779 raise_exception_err(EXCP0D_GPF, 0);
2780 if (load_segment(&e1, &e2, new_cs) != 0)
2781 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2782 cpl = env->hflags & HF_CPL_MASK;
2783 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2784 if (e2 & DESC_S_MASK) {
2785 if (!(e2 & DESC_CS_MASK))
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2788 if (e2 & DESC_C_MASK) {
2789 /* conforming code segment */
2790 if (dpl > cpl)
2791 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2792 } else {
2793 /* non conforming code segment */
2794 rpl = new_cs & 3;
2795 if (rpl > cpl)
2796 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2797 if (dpl != cpl)
2798 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2799 }
2800 if (!(e2 & DESC_P_MASK))
2801 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2802
2803#ifdef TARGET_X86_64
2804 /* XXX: check 16/32 bit cases in long mode */
2805 if (shift == 2) {
2806 target_ulong rsp;
2807 /* 64 bit case */
2808 rsp = ESP;
2809 PUSHQ(rsp, env->segs[R_CS].selector);
2810 PUSHQ(rsp, next_eip);
2811 /* from this point, not restartable */
2812 ESP = rsp;
2813 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2814 get_seg_base(e1, e2),
2815 get_seg_limit(e1, e2), e2);
2816 EIP = new_eip;
2817 } else
2818#endif
2819 {
2820 sp = ESP;
2821 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2822 ssp = env->segs[R_SS].base;
2823 if (shift) {
2824 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2825 PUSHL(ssp, sp, sp_mask, next_eip);
2826 } else {
2827 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2828 PUSHW(ssp, sp, sp_mask, next_eip);
2829 }
2830
2831 limit = get_seg_limit(e1, e2);
2832 if (new_eip > limit)
2833 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2834 /* from this point, not restartable */
2835 SET_ESP(sp, sp_mask);
2836 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2837 get_seg_base(e1, e2), limit, e2);
2838 EIP = new_eip;
2839 }
2840 } else {
2841 /* check gate type */
2842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2843 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2844 rpl = new_cs & 3;
2845 switch(type) {
2846 case 1: /* available 286 TSS */
2847 case 9: /* available 386 TSS */
2848 case 5: /* task gate */
2849 if (dpl < cpl || dpl < rpl)
2850 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2851 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2852 CC_OP = CC_OP_EFLAGS;
2853 return;
2854 case 4: /* 286 call gate */
2855 case 12: /* 386 call gate */
2856 break;
2857 default:
2858 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2859 break;
2860 }
2861 shift = type >> 3;
2862
2863 if (dpl < cpl || dpl < rpl)
2864 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2865 /* check valid bit */
2866 if (!(e2 & DESC_P_MASK))
2867 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2868 selector = e1 >> 16;
2869 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2870 param_count = e2 & 0x1f;
2871 if ((selector & 0xfffc) == 0)
2872 raise_exception_err(EXCP0D_GPF, 0);
2873
2874 if (load_segment(&e1, &e2, selector) != 0)
2875 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2876 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2877 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2878 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2879 if (dpl > cpl)
2880 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2881 if (!(e2 & DESC_P_MASK))
2882 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2883
2884 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2885 /* to inner privilege */
2886 get_ss_esp_from_tss(&ss, &sp, dpl);
2887 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2888 ss, sp, param_count, ESP);
2889 if ((ss & 0xfffc) == 0)
2890 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2891 if ((ss & 3) != dpl)
2892 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2893 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2894 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2895 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2896 if (ss_dpl != dpl)
2897 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2898 if (!(ss_e2 & DESC_S_MASK) ||
2899 (ss_e2 & DESC_CS_MASK) ||
2900 !(ss_e2 & DESC_W_MASK))
2901 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2902 if (!(ss_e2 & DESC_P_MASK))
2903#ifdef VBOX /* See page 3-99 of 253666.pdf */
2904 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2905#else
2906 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2907#endif
2908
2909 // push_size = ((param_count * 2) + 8) << shift;
2910
2911 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2912 old_ssp = env->segs[R_SS].base;
2913
2914 sp_mask = get_sp_mask(ss_e2);
2915 ssp = get_seg_base(ss_e1, ss_e2);
2916 if (shift) {
2917 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2918 PUSHL(ssp, sp, sp_mask, ESP);
2919 for(i = param_count - 1; i >= 0; i--) {
2920 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2921 PUSHL(ssp, sp, sp_mask, val);
2922 }
2923 } else {
2924 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2925 PUSHW(ssp, sp, sp_mask, ESP);
2926 for(i = param_count - 1; i >= 0; i--) {
2927 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2928 PUSHW(ssp, sp, sp_mask, val);
2929 }
2930 }
2931 new_stack = 1;
2932 } else {
2933 /* to same privilege */
2934 sp = ESP;
2935 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2936 ssp = env->segs[R_SS].base;
2937 // push_size = (4 << shift);
2938 new_stack = 0;
2939 }
2940
2941 if (shift) {
2942 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2943 PUSHL(ssp, sp, sp_mask, next_eip);
2944 } else {
2945 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2946 PUSHW(ssp, sp, sp_mask, next_eip);
2947 }
2948
2949 /* from this point, not restartable */
2950
2951 if (new_stack) {
2952 ss = (ss & ~3) | dpl;
2953 cpu_x86_load_seg_cache(env, R_SS, ss,
2954 ssp,
2955 get_seg_limit(ss_e1, ss_e2),
2956 ss_e2);
2957 }
2958
2959 selector = (selector & ~3) | dpl;
2960 cpu_x86_load_seg_cache(env, R_CS, selector,
2961 get_seg_base(e1, e2),
2962 get_seg_limit(e1, e2),
2963 e2);
2964 cpu_x86_set_cpl(env, dpl);
2965 SET_ESP(sp, sp_mask);
2966 EIP = offset;
2967 }
2968}
2969
2970/* real and vm86 mode iret */
2971void helper_iret_real(int shift)
2972{
2973 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2974 target_ulong ssp;
2975 int eflags_mask;
2976#ifdef VBOX
2977 bool fVME = false;
2978
2979 remR3TrapClear(env->pVM);
2980#endif /* VBOX */
2981
2982 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2983 sp = ESP;
2984 ssp = env->segs[R_SS].base;
2985 if (shift == 1) {
2986 /* 32 bits */
2987 POPL(ssp, sp, sp_mask, new_eip);
2988 POPL(ssp, sp, sp_mask, new_cs);
2989 new_cs &= 0xffff;
2990 POPL(ssp, sp, sp_mask, new_eflags);
2991 } else {
2992 /* 16 bits */
2993 POPW(ssp, sp, sp_mask, new_eip);
2994 POPW(ssp, sp, sp_mask, new_cs);
2995 POPW(ssp, sp, sp_mask, new_eflags);
2996 }
2997#ifdef VBOX
2998 if ( (env->eflags & VM_MASK)
2999 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3000 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3001 {
3002 fVME = true;
3003 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3004 /* if TF will be set -> #GP */
3005 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3006 || (new_eflags & TF_MASK))
3007 raise_exception(EXCP0D_GPF);
3008 }
3009#endif /* VBOX */
3010 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3011 env->segs[R_CS].selector = new_cs;
3012 env->segs[R_CS].base = (new_cs << 4);
3013 env->eip = new_eip;
3014#ifdef VBOX
3015 if (fVME)
3016 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3017 else
3018#endif
3019 if (env->eflags & VM_MASK)
3020 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3021 else
3022 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3023 if (shift == 0)
3024 eflags_mask &= 0xffff;
3025 load_eflags(new_eflags, eflags_mask);
3026 env->hflags2 &= ~HF2_NMI_MASK;
3027#ifdef VBOX
3028 if (fVME)
3029 {
3030 if (new_eflags & IF_MASK)
3031 env->eflags |= VIF_MASK;
3032 else
3033 env->eflags &= ~VIF_MASK;
3034 }
3035#endif /* VBOX */
3036}
3037
3038static inline void validate_seg(int seg_reg, int cpl)
3039{
3040 int dpl;
3041 uint32_t e2;
3042
3043 /* XXX: on x86_64, we do not want to nullify FS and GS because
3044 they may still contain a valid base. I would be interested to
3045 know how a real x86_64 CPU behaves */
3046 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3047 (env->segs[seg_reg].selector & 0xfffc) == 0)
3048 return;
3049
3050 e2 = env->segs[seg_reg].flags;
3051 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3052 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3053 /* data or non conforming code segment */
3054 if (dpl < cpl) {
3055 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3056 }
3057 }
3058}
3059
3060/* protected mode iret */
3061static inline void helper_ret_protected(int shift, int is_iret, int addend)
3062{
3063 uint32_t new_cs, new_eflags, new_ss;
3064 uint32_t new_es, new_ds, new_fs, new_gs;
3065 uint32_t e1, e2, ss_e1, ss_e2;
3066 int cpl, dpl, rpl, eflags_mask, iopl;
3067 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3068
3069#ifdef VBOX /** @todo Why do we do this? */
3070 ss_e1 = ss_e2 = e1 = e2 = 0;
3071#endif
3072
3073#ifdef TARGET_X86_64
3074 if (shift == 2)
3075 sp_mask = -1;
3076 else
3077#endif
3078 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3079 sp = ESP;
3080 ssp = env->segs[R_SS].base;
3081 new_eflags = 0; /* avoid warning */
3082#ifdef TARGET_X86_64
3083 if (shift == 2) {
3084 POPQ(sp, new_eip);
3085 POPQ(sp, new_cs);
3086 new_cs &= 0xffff;
3087 if (is_iret) {
3088 POPQ(sp, new_eflags);
3089 }
3090 } else
3091#endif
3092 if (shift == 1) {
3093 /* 32 bits */
3094 POPL(ssp, sp, sp_mask, new_eip);
3095 POPL(ssp, sp, sp_mask, new_cs);
3096 new_cs &= 0xffff;
3097 if (is_iret) {
3098 POPL(ssp, sp, sp_mask, new_eflags);
3099#if defined(VBOX) && defined(DEBUG)
3100 printf("iret: new CS %04X\n", new_cs);
3101 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3102 printf("iret: new EFLAGS %08X\n", new_eflags);
3103 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3104#endif
3105 if (new_eflags & VM_MASK)
3106 goto return_to_vm86;
3107 }
3108#ifdef VBOX
3109 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3110 {
3111# ifdef DEBUG
3112 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3113# endif
3114 new_cs = new_cs & 0xfffc;
3115 }
3116#endif
3117 } else {
3118 /* 16 bits */
3119 POPW(ssp, sp, sp_mask, new_eip);
3120 POPW(ssp, sp, sp_mask, new_cs);
3121 if (is_iret)
3122 POPW(ssp, sp, sp_mask, new_eflags);
3123 }
3124 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3125 new_cs, new_eip, shift, addend);
3126 LOG_PCALL_STATE(env);
3127 if ((new_cs & 0xfffc) == 0)
3128 {
3129#if defined(VBOX) && defined(DEBUG)
3130 printf("new_cs & 0xfffc) == 0\n");
3131#endif
3132 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3133 }
3134 if (load_segment(&e1, &e2, new_cs) != 0)
3135 {
3136#if defined(VBOX) && defined(DEBUG)
3137 printf("load_segment failed\n");
3138#endif
3139 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3140 }
3141 if (!(e2 & DESC_S_MASK) ||
3142 !(e2 & DESC_CS_MASK))
3143 {
3144#if defined(VBOX) && defined(DEBUG)
3145 printf("e2 mask %08x\n", e2);
3146#endif
3147 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3148 }
3149 cpl = env->hflags & HF_CPL_MASK;
3150 rpl = new_cs & 3;
3151 if (rpl < cpl)
3152 {
3153#if defined(VBOX) && defined(DEBUG)
3154 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3155#endif
3156 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3157 }
3158 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3159 if (e2 & DESC_C_MASK) {
3160 if (dpl > rpl)
3161 {
3162#if defined(VBOX) && defined(DEBUG)
3163 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3164#endif
3165 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3166 }
3167 } else {
3168 if (dpl != rpl)
3169 {
3170#if defined(VBOX) && defined(DEBUG)
3171 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3172#endif
3173 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3174 }
3175 }
3176 if (!(e2 & DESC_P_MASK))
3177 {
3178#if defined(VBOX) && defined(DEBUG)
3179 printf("DESC_P_MASK e2=%08x\n", e2);
3180#endif
3181 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3182 }
3183
3184 sp += addend;
3185 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3186 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3187 /* return to same privilege level */
3188 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3189 get_seg_base(e1, e2),
3190 get_seg_limit(e1, e2),
3191 e2);
3192 } else {
3193 /* return to different privilege level */
3194#ifdef TARGET_X86_64
3195 if (shift == 2) {
3196 POPQ(sp, new_esp);
3197 POPQ(sp, new_ss);
3198 new_ss &= 0xffff;
3199 } else
3200#endif
3201 if (shift == 1) {
3202 /* 32 bits */
3203 POPL(ssp, sp, sp_mask, new_esp);
3204 POPL(ssp, sp, sp_mask, new_ss);
3205 new_ss &= 0xffff;
3206 } else {
3207 /* 16 bits */
3208 POPW(ssp, sp, sp_mask, new_esp);
3209 POPW(ssp, sp, sp_mask, new_ss);
3210 }
3211 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3212 new_ss, new_esp);
3213 if ((new_ss & 0xfffc) == 0) {
3214#ifdef TARGET_X86_64
3215 /* NULL ss is allowed in long mode if cpl != 3*/
3216 /* XXX: test CS64 ? */
3217 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3218 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3219 0, 0xffffffff,
3220 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3221 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3222 DESC_W_MASK | DESC_A_MASK);
3223 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3224 } else
3225#endif
3226 {
3227 raise_exception_err(EXCP0D_GPF, 0);
3228 }
3229 } else {
3230 if ((new_ss & 3) != rpl)
3231 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3232 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3233 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3234 if (!(ss_e2 & DESC_S_MASK) ||
3235 (ss_e2 & DESC_CS_MASK) ||
3236 !(ss_e2 & DESC_W_MASK))
3237 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3238 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3239 if (dpl != rpl)
3240 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3241 if (!(ss_e2 & DESC_P_MASK))
3242 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3243 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3244 get_seg_base(ss_e1, ss_e2),
3245 get_seg_limit(ss_e1, ss_e2),
3246 ss_e2);
3247 }
3248
3249 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3250 get_seg_base(e1, e2),
3251 get_seg_limit(e1, e2),
3252 e2);
3253 cpu_x86_set_cpl(env, rpl);
3254 sp = new_esp;
3255#ifdef TARGET_X86_64
3256 if (env->hflags & HF_CS64_MASK)
3257 sp_mask = -1;
3258 else
3259#endif
3260 sp_mask = get_sp_mask(ss_e2);
3261
3262 /* validate data segments */
3263 validate_seg(R_ES, rpl);
3264 validate_seg(R_DS, rpl);
3265 validate_seg(R_FS, rpl);
3266 validate_seg(R_GS, rpl);
3267
3268 sp += addend;
3269 }
3270 SET_ESP(sp, sp_mask);
3271 env->eip = new_eip;
3272 if (is_iret) {
3273 /* NOTE: 'cpl' is the _old_ CPL */
3274 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3275 if (cpl == 0)
3276#ifdef VBOX
3277 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3278#else
3279 eflags_mask |= IOPL_MASK;
3280#endif
3281 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3282 if (cpl <= iopl)
3283 eflags_mask |= IF_MASK;
3284 if (shift == 0)
3285 eflags_mask &= 0xffff;
3286 load_eflags(new_eflags, eflags_mask);
3287 }
3288 return;
3289
3290 return_to_vm86:
3291 POPL(ssp, sp, sp_mask, new_esp);
3292 POPL(ssp, sp, sp_mask, new_ss);
3293 POPL(ssp, sp, sp_mask, new_es);
3294 POPL(ssp, sp, sp_mask, new_ds);
3295 POPL(ssp, sp, sp_mask, new_fs);
3296 POPL(ssp, sp, sp_mask, new_gs);
3297
3298 /* modify processor state */
3299 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3300 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3301 load_seg_vm(R_CS, new_cs & 0xffff);
3302 cpu_x86_set_cpl(env, 3);
3303 load_seg_vm(R_SS, new_ss & 0xffff);
3304 load_seg_vm(R_ES, new_es & 0xffff);
3305 load_seg_vm(R_DS, new_ds & 0xffff);
3306 load_seg_vm(R_FS, new_fs & 0xffff);
3307 load_seg_vm(R_GS, new_gs & 0xffff);
3308
3309 env->eip = new_eip & 0xffff;
3310 ESP = new_esp;
3311}
3312
3313void helper_iret_protected(int shift, int next_eip)
3314{
3315 int tss_selector, type;
3316 uint32_t e1, e2;
3317
3318#ifdef VBOX
3319 e1 = e2 = 0; /** @todo Why do we do this? */
3320 remR3TrapClear(env->pVM);
3321#endif
3322
3323 /* specific case for TSS */
3324 if (env->eflags & NT_MASK) {
3325#ifdef TARGET_X86_64
3326 if (env->hflags & HF_LMA_MASK)
3327 raise_exception_err(EXCP0D_GPF, 0);
3328#endif
3329 tss_selector = lduw_kernel(env->tr.base + 0);
3330 if (tss_selector & 4)
3331 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3332 if (load_segment(&e1, &e2, tss_selector) != 0)
3333 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3334 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3335 /* NOTE: we check both segment and busy TSS */
3336 if (type != 3)
3337 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3338 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3339 } else {
3340 helper_ret_protected(shift, 1, 0);
3341 }
3342 env->hflags2 &= ~HF2_NMI_MASK;
3343}
3344
3345void helper_lret_protected(int shift, int addend)
3346{
3347 helper_ret_protected(shift, 0, addend);
3348}
3349
3350void helper_sysenter(void)
3351{
3352 if (env->sysenter_cs == 0) {
3353 raise_exception_err(EXCP0D_GPF, 0);
3354 }
3355 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3356 cpu_x86_set_cpl(env, 0);
3357
3358#ifdef TARGET_X86_64
3359 if (env->hflags & HF_LMA_MASK) {
3360 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3361 0, 0xffffffff,
3362 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3363 DESC_S_MASK |
3364 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3365 } else
3366#endif
3367 {
3368 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3369 0, 0xffffffff,
3370 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3371 DESC_S_MASK |
3372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3373 }
3374 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3375 0, 0xffffffff,
3376 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3377 DESC_S_MASK |
3378 DESC_W_MASK | DESC_A_MASK);
3379 ESP = env->sysenter_esp;
3380 EIP = env->sysenter_eip;
3381}
3382
3383void helper_sysexit(int dflag)
3384{
3385 int cpl;
3386
3387 cpl = env->hflags & HF_CPL_MASK;
3388 if (env->sysenter_cs == 0 || cpl != 0) {
3389 raise_exception_err(EXCP0D_GPF, 0);
3390 }
3391 cpu_x86_set_cpl(env, 3);
3392#ifdef TARGET_X86_64
3393 if (dflag == 2) {
3394 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3395 0, 0xffffffff,
3396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3397 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3398 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3399 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3400 0, 0xffffffff,
3401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3403 DESC_W_MASK | DESC_A_MASK);
3404 } else
3405#endif
3406 {
3407 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3408 0, 0xffffffff,
3409 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3410 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3411 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3412 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3413 0, 0xffffffff,
3414 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3415 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3416 DESC_W_MASK | DESC_A_MASK);
3417 }
3418 ESP = ECX;
3419 EIP = EDX;
3420}
3421
3422#if defined(CONFIG_USER_ONLY)
3423target_ulong helper_read_crN(int reg)
3424{
3425 return 0;
3426}
3427
3428void helper_write_crN(int reg, target_ulong t0)
3429{
3430}
3431
3432void helper_movl_drN_T0(int reg, target_ulong t0)
3433{
3434}
3435#else
3436target_ulong helper_read_crN(int reg)
3437{
3438 target_ulong val;
3439
3440 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3441 switch(reg) {
3442 default:
3443 val = env->cr[reg];
3444 break;
3445 case 8:
3446 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3447 val = cpu_get_apic_tpr(env);
3448 } else {
3449 val = env->v_tpr;
3450 }
3451 break;
3452 }
3453 return val;
3454}
3455
3456void helper_write_crN(int reg, target_ulong t0)
3457{
3458 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3459 switch(reg) {
3460 case 0:
3461 cpu_x86_update_cr0(env, t0);
3462 break;
3463 case 3:
3464 cpu_x86_update_cr3(env, t0);
3465 break;
3466 case 4:
3467 cpu_x86_update_cr4(env, t0);
3468 break;
3469 case 8:
3470 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3471 cpu_set_apic_tpr(env, t0);
3472 }
3473 env->v_tpr = t0 & 0x0f;
3474 break;
3475 default:
3476 env->cr[reg] = t0;
3477 break;
3478 }
3479}
3480
3481void helper_movl_drN_T0(int reg, target_ulong t0)
3482{
3483 int i;
3484
3485 if (reg < 4) {
3486 hw_breakpoint_remove(env, reg);
3487 env->dr[reg] = t0;
3488 hw_breakpoint_insert(env, reg);
3489 } else if (reg == 7) {
3490 for (i = 0; i < 4; i++)
3491 hw_breakpoint_remove(env, i);
3492 env->dr[7] = t0;
3493 for (i = 0; i < 4; i++)
3494 hw_breakpoint_insert(env, i);
3495 } else
3496 env->dr[reg] = t0;
3497}
3498#endif
3499
3500void helper_lmsw(target_ulong t0)
3501{
3502 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3503 if already set to one. */
3504 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3505 helper_write_crN(0, t0);
3506}
3507
3508void helper_clts(void)
3509{
3510 env->cr[0] &= ~CR0_TS_MASK;
3511 env->hflags &= ~HF_TS_MASK;
3512}
3513
3514void helper_invlpg(target_ulong addr)
3515{
3516 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3517 tlb_flush_page(env, addr);
3518}
3519
3520void helper_rdtsc(void)
3521{
3522 uint64_t val;
3523
3524 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3525 raise_exception(EXCP0D_GPF);
3526 }
3527 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3528
3529 val = cpu_get_tsc(env) + env->tsc_offset;
3530 EAX = (uint32_t)(val);
3531 EDX = (uint32_t)(val >> 32);
3532}
3533
3534void helper_rdtscp(void)
3535{
3536#ifndef VBOX
3537 helper_rdtsc();
3538 ECX = (uint32_t)(env->tsc_aux);
3539#else
3540 uint64_t val;
3541 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3542 raise_exception(EXCP0D_GPF);
3543 }
3544
3545 val = cpu_get_tsc(env);
3546 EAX = (uint32_t)(val);
3547 EDX = (uint32_t)(val >> 32);
3548 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3549 ECX = (uint32_t)(val);
3550 else
3551 ECX = 0;
3552#endif /* VBOX */
3553}
3554
3555void helper_rdpmc(void)
3556{
3557#ifdef VBOX
3558 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3559 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3560 raise_exception(EXCP0D_GPF);
3561 }
3562 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3563 EAX = 0;
3564 EDX = 0;
3565#else /* !VBOX */
3566 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3567 raise_exception(EXCP0D_GPF);
3568 }
3569 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3570
3571 /* currently unimplemented */
3572 raise_exception_err(EXCP06_ILLOP, 0);
3573#endif /* !VBOX */
3574}
3575
3576#if defined(CONFIG_USER_ONLY)
3577void helper_wrmsr(void)
3578{
3579}
3580
3581void helper_rdmsr(void)
3582{
3583}
3584#else
3585void helper_wrmsr(void)
3586{
3587 uint64_t val;
3588
3589 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3590
3591 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3592
3593 switch((uint32_t)ECX) {
3594 case MSR_IA32_SYSENTER_CS:
3595 env->sysenter_cs = val & 0xffff;
3596 break;
3597 case MSR_IA32_SYSENTER_ESP:
3598 env->sysenter_esp = val;
3599 break;
3600 case MSR_IA32_SYSENTER_EIP:
3601 env->sysenter_eip = val;
3602 break;
3603 case MSR_IA32_APICBASE:
3604# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3605 cpu_set_apic_base(env, val);
3606# endif
3607 break;
3608 case MSR_EFER:
3609 {
3610 uint64_t update_mask;
3611 update_mask = 0;
3612 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3613 update_mask |= MSR_EFER_SCE;
3614 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3615 update_mask |= MSR_EFER_LME;
3616 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3617 update_mask |= MSR_EFER_FFXSR;
3618 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3619 update_mask |= MSR_EFER_NXE;
3620 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3621 update_mask |= MSR_EFER_SVME;
3622 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3623 update_mask |= MSR_EFER_FFXSR;
3624 cpu_load_efer(env, (env->efer & ~update_mask) |
3625 (val & update_mask));
3626 }
3627 break;
3628 case MSR_STAR:
3629 env->star = val;
3630 break;
3631 case MSR_PAT:
3632 env->pat = val;
3633 break;
3634 case MSR_VM_HSAVE_PA:
3635 env->vm_hsave = val;
3636 break;
3637#ifdef TARGET_X86_64
3638 case MSR_LSTAR:
3639 env->lstar = val;
3640 break;
3641 case MSR_CSTAR:
3642 env->cstar = val;
3643 break;
3644 case MSR_FMASK:
3645 env->fmask = val;
3646 break;
3647 case MSR_FSBASE:
3648 env->segs[R_FS].base = val;
3649 break;
3650 case MSR_GSBASE:
3651 env->segs[R_GS].base = val;
3652 break;
3653 case MSR_KERNELGSBASE:
3654 env->kernelgsbase = val;
3655 break;
3656#endif
3657# ifndef VBOX
3658 case MSR_MTRRphysBase(0):
3659 case MSR_MTRRphysBase(1):
3660 case MSR_MTRRphysBase(2):
3661 case MSR_MTRRphysBase(3):
3662 case MSR_MTRRphysBase(4):
3663 case MSR_MTRRphysBase(5):
3664 case MSR_MTRRphysBase(6):
3665 case MSR_MTRRphysBase(7):
3666 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3667 break;
3668 case MSR_MTRRphysMask(0):
3669 case MSR_MTRRphysMask(1):
3670 case MSR_MTRRphysMask(2):
3671 case MSR_MTRRphysMask(3):
3672 case MSR_MTRRphysMask(4):
3673 case MSR_MTRRphysMask(5):
3674 case MSR_MTRRphysMask(6):
3675 case MSR_MTRRphysMask(7):
3676 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3677 break;
3678 case MSR_MTRRfix64K_00000:
3679 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3680 break;
3681 case MSR_MTRRfix16K_80000:
3682 case MSR_MTRRfix16K_A0000:
3683 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3684 break;
3685 case MSR_MTRRfix4K_C0000:
3686 case MSR_MTRRfix4K_C8000:
3687 case MSR_MTRRfix4K_D0000:
3688 case MSR_MTRRfix4K_D8000:
3689 case MSR_MTRRfix4K_E0000:
3690 case MSR_MTRRfix4K_E8000:
3691 case MSR_MTRRfix4K_F0000:
3692 case MSR_MTRRfix4K_F8000:
3693 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3694 break;
3695 case MSR_MTRRdefType:
3696 env->mtrr_deftype = val;
3697 break;
3698 case MSR_MCG_STATUS:
3699 env->mcg_status = val;
3700 break;
3701 case MSR_MCG_CTL:
3702 if ((env->mcg_cap & MCG_CTL_P)
3703 && (val == 0 || val == ~(uint64_t)0))
3704 env->mcg_ctl = val;
3705 break;
3706 case MSR_TSC_AUX:
3707 env->tsc_aux = val;
3708 break;
3709# endif /* !VBOX */
3710 default:
3711# ifndef VBOX
3712 if ((uint32_t)ECX >= MSR_MC0_CTL
3713 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3714 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3715 if ((offset & 0x3) != 0
3716 || (val == 0 || val == ~(uint64_t)0))
3717 env->mce_banks[offset] = val;
3718 break;
3719 }
3720 /* XXX: exception ? */
3721# endif
3722 break;
3723 }
3724
3725# ifdef VBOX
3726 /* call CPUM. */
3727 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3728 {
3729 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3730 }
3731# endif
3732}
3733
3734void helper_rdmsr(void)
3735{
3736 uint64_t val;
3737
3738 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3739
3740 switch((uint32_t)ECX) {
3741 case MSR_IA32_SYSENTER_CS:
3742 val = env->sysenter_cs;
3743 break;
3744 case MSR_IA32_SYSENTER_ESP:
3745 val = env->sysenter_esp;
3746 break;
3747 case MSR_IA32_SYSENTER_EIP:
3748 val = env->sysenter_eip;
3749 break;
3750 case MSR_IA32_APICBASE:
3751 val = cpu_get_apic_base(env);
3752 break;
3753 case MSR_EFER:
3754 val = env->efer;
3755 break;
3756 case MSR_STAR:
3757 val = env->star;
3758 break;
3759 case MSR_PAT:
3760 val = env->pat;
3761 break;
3762 case MSR_VM_HSAVE_PA:
3763 val = env->vm_hsave;
3764 break;
3765# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3766 case MSR_IA32_PERF_STATUS:
3767 /* tsc_increment_by_tick */
3768 val = 1000ULL;
3769 /* CPU multiplier */
3770 val |= (((uint64_t)4ULL) << 40);
3771 break;
3772# endif /* !VBOX */
3773#ifdef TARGET_X86_64
3774 case MSR_LSTAR:
3775 val = env->lstar;
3776 break;
3777 case MSR_CSTAR:
3778 val = env->cstar;
3779 break;
3780 case MSR_FMASK:
3781 val = env->fmask;
3782 break;
3783 case MSR_FSBASE:
3784 val = env->segs[R_FS].base;
3785 break;
3786 case MSR_GSBASE:
3787 val = env->segs[R_GS].base;
3788 break;
3789 case MSR_KERNELGSBASE:
3790 val = env->kernelgsbase;
3791 break;
3792# ifndef VBOX
3793 case MSR_TSC_AUX:
3794 val = env->tsc_aux;
3795 break;
3796# endif /*!VBOX*/
3797#endif
3798# ifndef VBOX
3799 case MSR_MTRRphysBase(0):
3800 case MSR_MTRRphysBase(1):
3801 case MSR_MTRRphysBase(2):
3802 case MSR_MTRRphysBase(3):
3803 case MSR_MTRRphysBase(4):
3804 case MSR_MTRRphysBase(5):
3805 case MSR_MTRRphysBase(6):
3806 case MSR_MTRRphysBase(7):
3807 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3808 break;
3809 case MSR_MTRRphysMask(0):
3810 case MSR_MTRRphysMask(1):
3811 case MSR_MTRRphysMask(2):
3812 case MSR_MTRRphysMask(3):
3813 case MSR_MTRRphysMask(4):
3814 case MSR_MTRRphysMask(5):
3815 case MSR_MTRRphysMask(6):
3816 case MSR_MTRRphysMask(7):
3817 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3818 break;
3819 case MSR_MTRRfix64K_00000:
3820 val = env->mtrr_fixed[0];
3821 break;
3822 case MSR_MTRRfix16K_80000:
3823 case MSR_MTRRfix16K_A0000:
3824 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3825 break;
3826 case MSR_MTRRfix4K_C0000:
3827 case MSR_MTRRfix4K_C8000:
3828 case MSR_MTRRfix4K_D0000:
3829 case MSR_MTRRfix4K_D8000:
3830 case MSR_MTRRfix4K_E0000:
3831 case MSR_MTRRfix4K_E8000:
3832 case MSR_MTRRfix4K_F0000:
3833 case MSR_MTRRfix4K_F8000:
3834 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3835 break;
3836 case MSR_MTRRdefType:
3837 val = env->mtrr_deftype;
3838 break;
3839 case MSR_MTRRcap:
3840 if (env->cpuid_features & CPUID_MTRR)
3841 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3842 else
3843 /* XXX: exception ? */
3844 val = 0;
3845 break;
3846 case MSR_MCG_CAP:
3847 val = env->mcg_cap;
3848 break;
3849 case MSR_MCG_CTL:
3850 if (env->mcg_cap & MCG_CTL_P)
3851 val = env->mcg_ctl;
3852 else
3853 val = 0;
3854 break;
3855 case MSR_MCG_STATUS:
3856 val = env->mcg_status;
3857 break;
3858# endif /* !VBOX */
3859 default:
3860# ifndef VBOX
3861 if ((uint32_t)ECX >= MSR_MC0_CTL
3862 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3863 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3864 val = env->mce_banks[offset];
3865 break;
3866 }
3867 /* XXX: exception ? */
3868 val = 0;
3869# else /* VBOX */
3870 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3871 {
3872 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3873 val = 0;
3874 }
3875# endif /* VBOX */
3876 break;
3877 }
3878 EAX = (uint32_t)(val);
3879 EDX = (uint32_t)(val >> 32);
3880
3881# ifdef VBOX_STRICT
3882 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3883 val = 0;
3884 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3885# endif
3886}
3887#endif
3888
3889target_ulong helper_lsl(target_ulong selector1)
3890{
3891 unsigned int limit;
3892 uint32_t e1, e2, eflags, selector;
3893 int rpl, dpl, cpl, type;
3894
3895 selector = selector1 & 0xffff;
3896 eflags = helper_cc_compute_all(CC_OP);
3897 if ((selector & 0xfffc) == 0)
3898 goto fail;
3899 if (load_segment(&e1, &e2, selector) != 0)
3900 goto fail;
3901 rpl = selector & 3;
3902 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3903 cpl = env->hflags & HF_CPL_MASK;
3904 if (e2 & DESC_S_MASK) {
3905 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3906 /* conforming */
3907 } else {
3908 if (dpl < cpl || dpl < rpl)
3909 goto fail;
3910 }
3911 } else {
3912 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3913 switch(type) {
3914 case 1:
3915 case 2:
3916 case 3:
3917 case 9:
3918 case 11:
3919 break;
3920 default:
3921 goto fail;
3922 }
3923 if (dpl < cpl || dpl < rpl) {
3924 fail:
3925 CC_SRC = eflags & ~CC_Z;
3926 return 0;
3927 }
3928 }
3929 limit = get_seg_limit(e1, e2);
3930 CC_SRC = eflags | CC_Z;
3931 return limit;
3932}
3933
3934target_ulong helper_lar(target_ulong selector1)
3935{
3936 uint32_t e1, e2, eflags, selector;
3937 int rpl, dpl, cpl, type;
3938
3939 selector = selector1 & 0xffff;
3940 eflags = helper_cc_compute_all(CC_OP);
3941 if ((selector & 0xfffc) == 0)
3942 goto fail;
3943 if (load_segment(&e1, &e2, selector) != 0)
3944 goto fail;
3945 rpl = selector & 3;
3946 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3947 cpl = env->hflags & HF_CPL_MASK;
3948 if (e2 & DESC_S_MASK) {
3949 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3950 /* conforming */
3951 } else {
3952 if (dpl < cpl || dpl < rpl)
3953 goto fail;
3954 }
3955 } else {
3956 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3957 switch(type) {
3958 case 1:
3959 case 2:
3960 case 3:
3961 case 4:
3962 case 5:
3963 case 9:
3964 case 11:
3965 case 12:
3966 break;
3967 default:
3968 goto fail;
3969 }
3970 if (dpl < cpl || dpl < rpl) {
3971 fail:
3972 CC_SRC = eflags & ~CC_Z;
3973 return 0;
3974 }
3975 }
3976 CC_SRC = eflags | CC_Z;
3977 return e2 & 0x00f0ff00;
3978}
3979
3980void helper_verr(target_ulong selector1)
3981{
3982 uint32_t e1, e2, eflags, selector;
3983 int rpl, dpl, cpl;
3984
3985 selector = selector1 & 0xffff;
3986 eflags = helper_cc_compute_all(CC_OP);
3987 if ((selector & 0xfffc) == 0)
3988 goto fail;
3989 if (load_segment(&e1, &e2, selector) != 0)
3990 goto fail;
3991 if (!(e2 & DESC_S_MASK))
3992 goto fail;
3993 rpl = selector & 3;
3994 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3995 cpl = env->hflags & HF_CPL_MASK;
3996 if (e2 & DESC_CS_MASK) {
3997 if (!(e2 & DESC_R_MASK))
3998 goto fail;
3999 if (!(e2 & DESC_C_MASK)) {
4000 if (dpl < cpl || dpl < rpl)
4001 goto fail;
4002 }
4003 } else {
4004 if (dpl < cpl || dpl < rpl) {
4005 fail:
4006 CC_SRC = eflags & ~CC_Z;
4007 return;
4008 }
4009 }
4010 CC_SRC = eflags | CC_Z;
4011}
4012
4013void helper_verw(target_ulong selector1)
4014{
4015 uint32_t e1, e2, eflags, selector;
4016 int rpl, dpl, cpl;
4017
4018 selector = selector1 & 0xffff;
4019 eflags = helper_cc_compute_all(CC_OP);
4020 if ((selector & 0xfffc) == 0)
4021 goto fail;
4022 if (load_segment(&e1, &e2, selector) != 0)
4023 goto fail;
4024 if (!(e2 & DESC_S_MASK))
4025 goto fail;
4026 rpl = selector & 3;
4027 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4028 cpl = env->hflags & HF_CPL_MASK;
4029 if (e2 & DESC_CS_MASK) {
4030 goto fail;
4031 } else {
4032 if (dpl < cpl || dpl < rpl)
4033 goto fail;
4034 if (!(e2 & DESC_W_MASK)) {
4035 fail:
4036 CC_SRC = eflags & ~CC_Z;
4037 return;
4038 }
4039 }
4040 CC_SRC = eflags | CC_Z;
4041}
4042
4043/* x87 FPU helpers */
4044
4045static void fpu_set_exception(int mask)
4046{
4047 env->fpus |= mask;
4048 if (env->fpus & (~env->fpuc & FPUC_EM))
4049 env->fpus |= FPUS_SE | FPUS_B;
4050}
4051
4052static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4053{
4054 if (b == 0.0)
4055 fpu_set_exception(FPUS_ZE);
4056 return a / b;
4057}
4058
4059static void fpu_raise_exception(void)
4060{
4061 if (env->cr[0] & CR0_NE_MASK) {
4062 raise_exception(EXCP10_COPR);
4063 }
4064#if !defined(CONFIG_USER_ONLY)
4065 else {
4066 cpu_set_ferr(env);
4067 }
4068#endif
4069}
4070
4071void helper_flds_FT0(uint32_t val)
4072{
4073 union {
4074 float32 f;
4075 uint32_t i;
4076 } u;
4077 u.i = val;
4078 FT0 = float32_to_floatx(u.f, &env->fp_status);
4079}
4080
4081void helper_fldl_FT0(uint64_t val)
4082{
4083 union {
4084 float64 f;
4085 uint64_t i;
4086 } u;
4087 u.i = val;
4088 FT0 = float64_to_floatx(u.f, &env->fp_status);
4089}
4090
4091void helper_fildl_FT0(int32_t val)
4092{
4093 FT0 = int32_to_floatx(val, &env->fp_status);
4094}
4095
4096void helper_flds_ST0(uint32_t val)
4097{
4098 int new_fpstt;
4099 union {
4100 float32 f;
4101 uint32_t i;
4102 } u;
4103 new_fpstt = (env->fpstt - 1) & 7;
4104 u.i = val;
4105 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4106 env->fpstt = new_fpstt;
4107 env->fptags[new_fpstt] = 0; /* validate stack entry */
4108}
4109
4110void helper_fldl_ST0(uint64_t val)
4111{
4112 int new_fpstt;
4113 union {
4114 float64 f;
4115 uint64_t i;
4116 } u;
4117 new_fpstt = (env->fpstt - 1) & 7;
4118 u.i = val;
4119 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4120 env->fpstt = new_fpstt;
4121 env->fptags[new_fpstt] = 0; /* validate stack entry */
4122}
4123
4124void helper_fildl_ST0(int32_t val)
4125{
4126 int new_fpstt;
4127 new_fpstt = (env->fpstt - 1) & 7;
4128 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4129 env->fpstt = new_fpstt;
4130 env->fptags[new_fpstt] = 0; /* validate stack entry */
4131}
4132
4133void helper_fildll_ST0(int64_t val)
4134{
4135 int new_fpstt;
4136 new_fpstt = (env->fpstt - 1) & 7;
4137 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4138 env->fpstt = new_fpstt;
4139 env->fptags[new_fpstt] = 0; /* validate stack entry */
4140}
4141
4142#ifndef VBOX
4143uint32_t helper_fsts_ST0(void)
4144#else
4145RTCCUINTREG helper_fsts_ST0(void)
4146#endif
4147{
4148 union {
4149 float32 f;
4150 uint32_t i;
4151 } u;
4152 u.f = floatx_to_float32(ST0, &env->fp_status);
4153 return u.i;
4154}
4155
4156uint64_t helper_fstl_ST0(void)
4157{
4158 union {
4159 float64 f;
4160 uint64_t i;
4161 } u;
4162 u.f = floatx_to_float64(ST0, &env->fp_status);
4163 return u.i;
4164}
4165
4166#ifndef VBOX
4167int32_t helper_fist_ST0(void)
4168#else
4169RTCCINTREG helper_fist_ST0(void)
4170#endif
4171{
4172 int32_t val;
4173 val = floatx_to_int32(ST0, &env->fp_status);
4174 if (val != (int16_t)val)
4175 val = -32768;
4176 return val;
4177}
4178
4179#ifndef VBOX
4180int32_t helper_fistl_ST0(void)
4181#else
4182RTCCINTREG helper_fistl_ST0(void)
4183#endif
4184{
4185 int32_t val;
4186 val = floatx_to_int32(ST0, &env->fp_status);
4187 return val;
4188}
4189
4190int64_t helper_fistll_ST0(void)
4191{
4192 int64_t val;
4193 val = floatx_to_int64(ST0, &env->fp_status);
4194 return val;
4195}
4196
4197#ifndef VBOX
4198int32_t helper_fistt_ST0(void)
4199#else
4200RTCCINTREG helper_fistt_ST0(void)
4201#endif
4202{
4203 int32_t val;
4204 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4205 if (val != (int16_t)val)
4206 val = -32768;
4207 return val;
4208}
4209
4210#ifndef VBOX
4211int32_t helper_fisttl_ST0(void)
4212#else
4213RTCCINTREG helper_fisttl_ST0(void)
4214#endif
4215{
4216 int32_t val;
4217 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4218 return val;
4219}
4220
4221int64_t helper_fisttll_ST0(void)
4222{
4223 int64_t val;
4224 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4225 return val;
4226}
4227
4228void helper_fldt_ST0(target_ulong ptr)
4229{
4230 int new_fpstt;
4231 new_fpstt = (env->fpstt - 1) & 7;
4232 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4233 env->fpstt = new_fpstt;
4234 env->fptags[new_fpstt] = 0; /* validate stack entry */
4235}
4236
4237void helper_fstt_ST0(target_ulong ptr)
4238{
4239 helper_fstt(ST0, ptr);
4240}
4241
4242void helper_fpush(void)
4243{
4244 fpush();
4245}
4246
4247void helper_fpop(void)
4248{
4249 fpop();
4250}
4251
4252void helper_fdecstp(void)
4253{
4254 env->fpstt = (env->fpstt - 1) & 7;
4255 env->fpus &= (~0x4700);
4256}
4257
4258void helper_fincstp(void)
4259{
4260 env->fpstt = (env->fpstt + 1) & 7;
4261 env->fpus &= (~0x4700);
4262}
4263
4264/* FPU move */
4265
4266void helper_ffree_STN(int st_index)
4267{
4268 env->fptags[(env->fpstt + st_index) & 7] = 1;
4269}
4270
4271void helper_fmov_ST0_FT0(void)
4272{
4273 ST0 = FT0;
4274}
4275
4276void helper_fmov_FT0_STN(int st_index)
4277{
4278 FT0 = ST(st_index);
4279}
4280
4281void helper_fmov_ST0_STN(int st_index)
4282{
4283 ST0 = ST(st_index);
4284}
4285
4286void helper_fmov_STN_ST0(int st_index)
4287{
4288 ST(st_index) = ST0;
4289}
4290
4291void helper_fxchg_ST0_STN(int st_index)
4292{
4293 CPU86_LDouble tmp;
4294 tmp = ST(st_index);
4295 ST(st_index) = ST0;
4296 ST0 = tmp;
4297}
4298
4299/* FPU operations */
4300
4301static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4302
4303void helper_fcom_ST0_FT0(void)
4304{
4305 int ret;
4306
4307 ret = floatx_compare(ST0, FT0, &env->fp_status);
4308 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4309}
4310
4311void helper_fucom_ST0_FT0(void)
4312{
4313 int ret;
4314
4315 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4316 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4317}
4318
4319static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4320
4321void helper_fcomi_ST0_FT0(void)
4322{
4323 int eflags;
4324 int ret;
4325
4326 ret = floatx_compare(ST0, FT0, &env->fp_status);
4327 eflags = helper_cc_compute_all(CC_OP);
4328 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4329 CC_SRC = eflags;
4330}
4331
4332void helper_fucomi_ST0_FT0(void)
4333{
4334 int eflags;
4335 int ret;
4336
4337 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4338 eflags = helper_cc_compute_all(CC_OP);
4339 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4340 CC_SRC = eflags;
4341}
4342
4343void helper_fadd_ST0_FT0(void)
4344{
4345 ST0 += FT0;
4346}
4347
4348void helper_fmul_ST0_FT0(void)
4349{
4350 ST0 *= FT0;
4351}
4352
4353void helper_fsub_ST0_FT0(void)
4354{
4355 ST0 -= FT0;
4356}
4357
4358void helper_fsubr_ST0_FT0(void)
4359{
4360 ST0 = FT0 - ST0;
4361}
4362
4363void helper_fdiv_ST0_FT0(void)
4364{
4365 ST0 = helper_fdiv(ST0, FT0);
4366}
4367
4368void helper_fdivr_ST0_FT0(void)
4369{
4370 ST0 = helper_fdiv(FT0, ST0);
4371}
4372
4373/* fp operations between STN and ST0 */
4374
4375void helper_fadd_STN_ST0(int st_index)
4376{
4377 ST(st_index) += ST0;
4378}
4379
4380void helper_fmul_STN_ST0(int st_index)
4381{
4382 ST(st_index) *= ST0;
4383}
4384
4385void helper_fsub_STN_ST0(int st_index)
4386{
4387 ST(st_index) -= ST0;
4388}
4389
4390void helper_fsubr_STN_ST0(int st_index)
4391{
4392 CPU86_LDouble *p;
4393 p = &ST(st_index);
4394 *p = ST0 - *p;
4395}
4396
4397void helper_fdiv_STN_ST0(int st_index)
4398{
4399 CPU86_LDouble *p;
4400 p = &ST(st_index);
4401 *p = helper_fdiv(*p, ST0);
4402}
4403
4404void helper_fdivr_STN_ST0(int st_index)
4405{
4406 CPU86_LDouble *p;
4407 p = &ST(st_index);
4408 *p = helper_fdiv(ST0, *p);
4409}
4410
4411/* misc FPU operations */
4412void helper_fchs_ST0(void)
4413{
4414 ST0 = floatx_chs(ST0);
4415}
4416
4417void helper_fabs_ST0(void)
4418{
4419 ST0 = floatx_abs(ST0);
4420}
4421
4422void helper_fld1_ST0(void)
4423{
4424 ST0 = f15rk[1];
4425}
4426
4427void helper_fldl2t_ST0(void)
4428{
4429 ST0 = f15rk[6];
4430}
4431
4432void helper_fldl2e_ST0(void)
4433{
4434 ST0 = f15rk[5];
4435}
4436
4437void helper_fldpi_ST0(void)
4438{
4439 ST0 = f15rk[2];
4440}
4441
4442void helper_fldlg2_ST0(void)
4443{
4444 ST0 = f15rk[3];
4445}
4446
4447void helper_fldln2_ST0(void)
4448{
4449 ST0 = f15rk[4];
4450}
4451
4452void helper_fldz_ST0(void)
4453{
4454 ST0 = f15rk[0];
4455}
4456
4457void helper_fldz_FT0(void)
4458{
4459 FT0 = f15rk[0];
4460}
4461
4462#ifndef VBOX
4463uint32_t helper_fnstsw(void)
4464#else
4465RTCCUINTREG helper_fnstsw(void)
4466#endif
4467{
4468 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4469}
4470
4471#ifndef VBOX
4472uint32_t helper_fnstcw(void)
4473#else
4474RTCCUINTREG helper_fnstcw(void)
4475#endif
4476{
4477 return env->fpuc;
4478}
4479
4480static void update_fp_status(void)
4481{
4482 int rnd_type;
4483
4484 /* set rounding mode */
4485 switch(env->fpuc & RC_MASK) {
4486 default:
4487 case RC_NEAR:
4488 rnd_type = float_round_nearest_even;
4489 break;
4490 case RC_DOWN:
4491 rnd_type = float_round_down;
4492 break;
4493 case RC_UP:
4494 rnd_type = float_round_up;
4495 break;
4496 case RC_CHOP:
4497 rnd_type = float_round_to_zero;
4498 break;
4499 }
4500 set_float_rounding_mode(rnd_type, &env->fp_status);
4501#ifdef FLOATX80
4502 switch((env->fpuc >> 8) & 3) {
4503 case 0:
4504 rnd_type = 32;
4505 break;
4506 case 2:
4507 rnd_type = 64;
4508 break;
4509 case 3:
4510 default:
4511 rnd_type = 80;
4512 break;
4513 }
4514 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4515#endif
4516}
4517
4518void helper_fldcw(uint32_t val)
4519{
4520 env->fpuc = val;
4521 update_fp_status();
4522}
4523
4524void helper_fclex(void)
4525{
4526 env->fpus &= 0x7f00;
4527}
4528
4529void helper_fwait(void)
4530{
4531 if (env->fpus & FPUS_SE)
4532 fpu_raise_exception();
4533}
4534
4535void helper_fninit(void)
4536{
4537 env->fpus = 0;
4538 env->fpstt = 0;
4539 env->fpuc = 0x37f;
4540 env->fptags[0] = 1;
4541 env->fptags[1] = 1;
4542 env->fptags[2] = 1;
4543 env->fptags[3] = 1;
4544 env->fptags[4] = 1;
4545 env->fptags[5] = 1;
4546 env->fptags[6] = 1;
4547 env->fptags[7] = 1;
4548}
4549
4550/* BCD ops */
4551
4552void helper_fbld_ST0(target_ulong ptr)
4553{
4554 CPU86_LDouble tmp;
4555 uint64_t val;
4556 unsigned int v;
4557 int i;
4558
4559 val = 0;
4560 for(i = 8; i >= 0; i--) {
4561 v = ldub(ptr + i);
4562 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4563 }
4564 tmp = val;
4565 if (ldub(ptr + 9) & 0x80)
4566 tmp = -tmp;
4567 fpush();
4568 ST0 = tmp;
4569}
4570
4571void helper_fbst_ST0(target_ulong ptr)
4572{
4573 int v;
4574 target_ulong mem_ref, mem_end;
4575 int64_t val;
4576
4577 val = floatx_to_int64(ST0, &env->fp_status);
4578 mem_ref = ptr;
4579 mem_end = mem_ref + 9;
4580 if (val < 0) {
4581 stb(mem_end, 0x80);
4582 val = -val;
4583 } else {
4584 stb(mem_end, 0x00);
4585 }
4586 while (mem_ref < mem_end) {
4587 if (val == 0)
4588 break;
4589 v = val % 100;
4590 val = val / 100;
4591 v = ((v / 10) << 4) | (v % 10);
4592 stb(mem_ref++, v);
4593 }
4594 while (mem_ref < mem_end) {
4595 stb(mem_ref++, 0);
4596 }
4597}
4598
4599void helper_f2xm1(void)
4600{
4601 ST0 = pow(2.0,ST0) - 1.0;
4602}
4603
4604void helper_fyl2x(void)
4605{
4606 CPU86_LDouble fptemp;
4607
4608 fptemp = ST0;
4609 if (fptemp>0.0){
4610 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4611 ST1 *= fptemp;
4612 fpop();
4613 } else {
4614 env->fpus &= (~0x4700);
4615 env->fpus |= 0x400;
4616 }
4617}
4618
4619void helper_fptan(void)
4620{
4621 CPU86_LDouble fptemp;
4622
4623 fptemp = ST0;
4624 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4625 env->fpus |= 0x400;
4626 } else {
4627 ST0 = tan(fptemp);
4628 fpush();
4629 ST0 = 1.0;
4630 env->fpus &= (~0x400); /* C2 <-- 0 */
4631 /* the above code is for |arg| < 2**52 only */
4632 }
4633}
4634
4635void helper_fpatan(void)
4636{
4637 CPU86_LDouble fptemp, fpsrcop;
4638
4639 fpsrcop = ST1;
4640 fptemp = ST0;
4641 ST1 = atan2(fpsrcop,fptemp);
4642 fpop();
4643}
4644
4645void helper_fxtract(void)
4646{
4647 CPU86_LDoubleU temp;
4648 unsigned int expdif;
4649
4650 temp.d = ST0;
4651 expdif = EXPD(temp) - EXPBIAS;
4652 /*DP exponent bias*/
4653 ST0 = expdif;
4654 fpush();
4655 BIASEXPONENT(temp);
4656 ST0 = temp.d;
4657}
4658
4659void helper_fprem1(void)
4660{
4661 CPU86_LDouble dblq, fpsrcop, fptemp;
4662 CPU86_LDoubleU fpsrcop1, fptemp1;
4663 int expdif;
4664 signed long long int q;
4665
4666#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4667 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4668#else
4669 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4670#endif
4671 ST0 = 0.0 / 0.0; /* NaN */
4672 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4673 return;
4674 }
4675
4676 fpsrcop = ST0;
4677 fptemp = ST1;
4678 fpsrcop1.d = fpsrcop;
4679 fptemp1.d = fptemp;
4680 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4681
4682 if (expdif < 0) {
4683 /* optimisation? taken from the AMD docs */
4684 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4685 /* ST0 is unchanged */
4686 return;
4687 }
4688
4689 if (expdif < 53) {
4690 dblq = fpsrcop / fptemp;
4691 /* round dblq towards nearest integer */
4692 dblq = rint(dblq);
4693 ST0 = fpsrcop - fptemp * dblq;
4694
4695 /* convert dblq to q by truncating towards zero */
4696 if (dblq < 0.0)
4697 q = (signed long long int)(-dblq);
4698 else
4699 q = (signed long long int)dblq;
4700
4701 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4702 /* (C0,C3,C1) <-- (q2,q1,q0) */
4703 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4704 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4705 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4706 } else {
4707 env->fpus |= 0x400; /* C2 <-- 1 */
4708 fptemp = pow(2.0, expdif - 50);
4709 fpsrcop = (ST0 / ST1) / fptemp;
4710 /* fpsrcop = integer obtained by chopping */
4711 fpsrcop = (fpsrcop < 0.0) ?
4712 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4713 ST0 -= (ST1 * fpsrcop * fptemp);
4714 }
4715}
4716
4717void helper_fprem(void)
4718{
4719 CPU86_LDouble dblq, fpsrcop, fptemp;
4720 CPU86_LDoubleU fpsrcop1, fptemp1;
4721 int expdif;
4722 signed long long int q;
4723
4724#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4725 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4726#else
4727 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4728#endif
4729 ST0 = 0.0 / 0.0; /* NaN */
4730 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4731 return;
4732 }
4733
4734 fpsrcop = (CPU86_LDouble)ST0;
4735 fptemp = (CPU86_LDouble)ST1;
4736 fpsrcop1.d = fpsrcop;
4737 fptemp1.d = fptemp;
4738 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4739
4740 if (expdif < 0) {
4741 /* optimisation? taken from the AMD docs */
4742 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4743 /* ST0 is unchanged */
4744 return;
4745 }
4746
4747 if ( expdif < 53 ) {
4748 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4749 /* round dblq towards zero */
4750 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4751 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4752
4753 /* convert dblq to q by truncating towards zero */
4754 if (dblq < 0.0)
4755 q = (signed long long int)(-dblq);
4756 else
4757 q = (signed long long int)dblq;
4758
4759 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4760 /* (C0,C3,C1) <-- (q2,q1,q0) */
4761 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4762 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4763 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4764 } else {
4765 int N = 32 + (expdif % 32); /* as per AMD docs */
4766 env->fpus |= 0x400; /* C2 <-- 1 */
4767 fptemp = pow(2.0, (double)(expdif - N));
4768 fpsrcop = (ST0 / ST1) / fptemp;
4769 /* fpsrcop = integer obtained by chopping */
4770 fpsrcop = (fpsrcop < 0.0) ?
4771 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4772 ST0 -= (ST1 * fpsrcop * fptemp);
4773 }
4774}
4775
4776void helper_fyl2xp1(void)
4777{
4778 CPU86_LDouble fptemp;
4779
4780 fptemp = ST0;
4781 if ((fptemp+1.0)>0.0) {
4782 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4783 ST1 *= fptemp;
4784 fpop();
4785 } else {
4786 env->fpus &= (~0x4700);
4787 env->fpus |= 0x400;
4788 }
4789}
4790
4791void helper_fsqrt(void)
4792{
4793 CPU86_LDouble fptemp;
4794
4795 fptemp = ST0;
4796 if (fptemp<0.0) {
4797 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4798 env->fpus |= 0x400;
4799 }
4800 ST0 = sqrt(fptemp);
4801}
4802
4803void helper_fsincos(void)
4804{
4805 CPU86_LDouble fptemp;
4806
4807 fptemp = ST0;
4808 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4809 env->fpus |= 0x400;
4810 } else {
4811 ST0 = sin(fptemp);
4812 fpush();
4813 ST0 = cos(fptemp);
4814 env->fpus &= (~0x400); /* C2 <-- 0 */
4815 /* the above code is for |arg| < 2**63 only */
4816 }
4817}
4818
4819void helper_frndint(void)
4820{
4821 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4822}
4823
4824void helper_fscale(void)
4825{
4826 ST0 = ldexp (ST0, (int)(ST1));
4827}
4828
4829void helper_fsin(void)
4830{
4831 CPU86_LDouble fptemp;
4832
4833 fptemp = ST0;
4834 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4835 env->fpus |= 0x400;
4836 } else {
4837 ST0 = sin(fptemp);
4838 env->fpus &= (~0x400); /* C2 <-- 0 */
4839 /* the above code is for |arg| < 2**53 only */
4840 }
4841}
4842
4843void helper_fcos(void)
4844{
4845 CPU86_LDouble fptemp;
4846
4847 fptemp = ST0;
4848 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4849 env->fpus |= 0x400;
4850 } else {
4851 ST0 = cos(fptemp);
4852 env->fpus &= (~0x400); /* C2 <-- 0 */
4853 /* the above code is for |arg5 < 2**63 only */
4854 }
4855}
4856
4857void helper_fxam_ST0(void)
4858{
4859 CPU86_LDoubleU temp;
4860 int expdif;
4861
4862 temp.d = ST0;
4863
4864 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4865 if (SIGND(temp))
4866 env->fpus |= 0x200; /* C1 <-- 1 */
4867
4868 /* XXX: test fptags too */
4869 expdif = EXPD(temp);
4870 if (expdif == MAXEXPD) {
4871#ifdef USE_X86LDOUBLE
4872 if (MANTD(temp) == 0x8000000000000000ULL)
4873#else
4874 if (MANTD(temp) == 0)
4875#endif
4876 env->fpus |= 0x500 /*Infinity*/;
4877 else
4878 env->fpus |= 0x100 /*NaN*/;
4879 } else if (expdif == 0) {
4880 if (MANTD(temp) == 0)
4881 env->fpus |= 0x4000 /*Zero*/;
4882 else
4883 env->fpus |= 0x4400 /*Denormal*/;
4884 } else {
4885 env->fpus |= 0x400;
4886 }
4887}
4888
4889void helper_fstenv(target_ulong ptr, int data32)
4890{
4891 int fpus, fptag, exp, i;
4892 uint64_t mant;
4893 CPU86_LDoubleU tmp;
4894
4895 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4896 fptag = 0;
4897 for (i=7; i>=0; i--) {
4898 fptag <<= 2;
4899 if (env->fptags[i]) {
4900 fptag |= 3;
4901 } else {
4902 tmp.d = env->fpregs[i].d;
4903 exp = EXPD(tmp);
4904 mant = MANTD(tmp);
4905 if (exp == 0 && mant == 0) {
4906 /* zero */
4907 fptag |= 1;
4908 } else if (exp == 0 || exp == MAXEXPD
4909#ifdef USE_X86LDOUBLE
4910 || (mant & (1LL << 63)) == 0
4911#endif
4912 ) {
4913 /* NaNs, infinity, denormal */
4914 fptag |= 2;
4915 }
4916 }
4917 }
4918 if (data32) {
4919 /* 32 bit */
4920 stl(ptr, env->fpuc);
4921 stl(ptr + 4, fpus);
4922 stl(ptr + 8, fptag);
4923 stl(ptr + 12, 0); /* fpip */
4924 stl(ptr + 16, 0); /* fpcs */
4925 stl(ptr + 20, 0); /* fpoo */
4926 stl(ptr + 24, 0); /* fpos */
4927 } else {
4928 /* 16 bit */
4929 stw(ptr, env->fpuc);
4930 stw(ptr + 2, fpus);
4931 stw(ptr + 4, fptag);
4932 stw(ptr + 6, 0);
4933 stw(ptr + 8, 0);
4934 stw(ptr + 10, 0);
4935 stw(ptr + 12, 0);
4936 }
4937}
4938
4939void helper_fldenv(target_ulong ptr, int data32)
4940{
4941 int i, fpus, fptag;
4942
4943 if (data32) {
4944 env->fpuc = lduw(ptr);
4945 fpus = lduw(ptr + 4);
4946 fptag = lduw(ptr + 8);
4947 }
4948 else {
4949 env->fpuc = lduw(ptr);
4950 fpus = lduw(ptr + 2);
4951 fptag = lduw(ptr + 4);
4952 }
4953 env->fpstt = (fpus >> 11) & 7;
4954 env->fpus = fpus & ~0x3800;
4955 for(i = 0;i < 8; i++) {
4956 env->fptags[i] = ((fptag & 3) == 3);
4957 fptag >>= 2;
4958 }
4959}
4960
4961void helper_fsave(target_ulong ptr, int data32)
4962{
4963 CPU86_LDouble tmp;
4964 int i;
4965
4966 helper_fstenv(ptr, data32);
4967
4968 ptr += (14 << data32);
4969 for(i = 0;i < 8; i++) {
4970 tmp = ST(i);
4971 helper_fstt(tmp, ptr);
4972 ptr += 10;
4973 }
4974
4975 /* fninit */
4976 env->fpus = 0;
4977 env->fpstt = 0;
4978 env->fpuc = 0x37f;
4979 env->fptags[0] = 1;
4980 env->fptags[1] = 1;
4981 env->fptags[2] = 1;
4982 env->fptags[3] = 1;
4983 env->fptags[4] = 1;
4984 env->fptags[5] = 1;
4985 env->fptags[6] = 1;
4986 env->fptags[7] = 1;
4987}
4988
4989void helper_frstor(target_ulong ptr, int data32)
4990{
4991 CPU86_LDouble tmp;
4992 int i;
4993
4994 helper_fldenv(ptr, data32);
4995 ptr += (14 << data32);
4996
4997 for(i = 0;i < 8; i++) {
4998 tmp = helper_fldt(ptr);
4999 ST(i) = tmp;
5000 ptr += 10;
5001 }
5002}
5003
5004void helper_fxsave(target_ulong ptr, int data64)
5005{
5006 int fpus, fptag, i, nb_xmm_regs;
5007 CPU86_LDouble tmp;
5008 target_ulong addr;
5009
5010 /* The operand must be 16 byte aligned */
5011 if (ptr & 0xf) {
5012 raise_exception(EXCP0D_GPF);
5013 }
5014
5015 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5016 fptag = 0;
5017 for(i = 0; i < 8; i++) {
5018 fptag |= (env->fptags[i] << i);
5019 }
5020 stw(ptr, env->fpuc);
5021 stw(ptr + 2, fpus);
5022 stw(ptr + 4, fptag ^ 0xff);
5023#ifdef TARGET_X86_64
5024 if (data64) {
5025 stq(ptr + 0x08, 0); /* rip */
5026 stq(ptr + 0x10, 0); /* rdp */
5027 } else
5028#endif
5029 {
5030 stl(ptr + 0x08, 0); /* eip */
5031 stl(ptr + 0x0c, 0); /* sel */
5032 stl(ptr + 0x10, 0); /* dp */
5033 stl(ptr + 0x14, 0); /* sel */
5034 }
5035
5036 addr = ptr + 0x20;
5037 for(i = 0;i < 8; i++) {
5038 tmp = ST(i);
5039 helper_fstt(tmp, addr);
5040 addr += 16;
5041 }
5042
5043 if (env->cr[4] & CR4_OSFXSR_MASK) {
5044 /* XXX: finish it */
5045 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5046 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5047 if (env->hflags & HF_CS64_MASK)
5048 nb_xmm_regs = 16;
5049 else
5050 nb_xmm_regs = 8;
5051 addr = ptr + 0xa0;
5052 /* Fast FXSAVE leaves out the XMM registers */
5053 if (!(env->efer & MSR_EFER_FFXSR)
5054 || (env->hflags & HF_CPL_MASK)
5055 || !(env->hflags & HF_LMA_MASK)) {
5056 for(i = 0; i < nb_xmm_regs; i++) {
5057 stq(addr, env->xmm_regs[i].XMM_Q(0));
5058 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5059 addr += 16;
5060 }
5061 }
5062 }
5063}
5064
5065void helper_fxrstor(target_ulong ptr, int data64)
5066{
5067 int i, fpus, fptag, nb_xmm_regs;
5068 CPU86_LDouble tmp;
5069 target_ulong addr;
5070
5071 /* The operand must be 16 byte aligned */
5072 if (ptr & 0xf) {
5073 raise_exception(EXCP0D_GPF);
5074 }
5075
5076 env->fpuc = lduw(ptr);
5077 fpus = lduw(ptr + 2);
5078 fptag = lduw(ptr + 4);
5079 env->fpstt = (fpus >> 11) & 7;
5080 env->fpus = fpus & ~0x3800;
5081 fptag ^= 0xff;
5082 for(i = 0;i < 8; i++) {
5083 env->fptags[i] = ((fptag >> i) & 1);
5084 }
5085
5086 addr = ptr + 0x20;
5087 for(i = 0;i < 8; i++) {
5088 tmp = helper_fldt(addr);
5089 ST(i) = tmp;
5090 addr += 16;
5091 }
5092
5093 if (env->cr[4] & CR4_OSFXSR_MASK) {
5094 /* XXX: finish it */
5095 env->mxcsr = ldl(ptr + 0x18);
5096 //ldl(ptr + 0x1c);
5097 if (env->hflags & HF_CS64_MASK)
5098 nb_xmm_regs = 16;
5099 else
5100 nb_xmm_regs = 8;
5101 addr = ptr + 0xa0;
5102 /* Fast FXRESTORE leaves out the XMM registers */
5103 if (!(env->efer & MSR_EFER_FFXSR)
5104 || (env->hflags & HF_CPL_MASK)
5105 || !(env->hflags & HF_LMA_MASK)) {
5106 for(i = 0; i < nb_xmm_regs; i++) {
5107#if !defined(VBOX) || __GNUC__ < 4
5108 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5109 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5110#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5111# if 1
5112 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5113 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5114 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5115 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5116# else
5117 /* this works fine on Mac OS X, gcc 4.0.1 */
5118 uint64_t u64 = ldq(addr);
5119 env->xmm_regs[i].XMM_Q(0);
5120 u64 = ldq(addr + 4);
5121 env->xmm_regs[i].XMM_Q(1) = u64;
5122# endif
5123#endif
5124 addr += 16;
5125 }
5126 }
5127 }
5128}
5129
5130#ifndef USE_X86LDOUBLE
5131
5132void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5133{
5134 CPU86_LDoubleU temp;
5135 int e;
5136
5137 temp.d = f;
5138 /* mantissa */
5139 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5140 /* exponent + sign */
5141 e = EXPD(temp) - EXPBIAS + 16383;
5142 e |= SIGND(temp) >> 16;
5143 *pexp = e;
5144}
5145
5146CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5147{
5148 CPU86_LDoubleU temp;
5149 int e;
5150 uint64_t ll;
5151
5152 /* XXX: handle overflow ? */
5153 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5154 e |= (upper >> 4) & 0x800; /* sign */
5155 ll = (mant >> 11) & ((1LL << 52) - 1);
5156#ifdef __arm__
5157 temp.l.upper = (e << 20) | (ll >> 32);
5158 temp.l.lower = ll;
5159#else
5160 temp.ll = ll | ((uint64_t)e << 52);
5161#endif
5162 return temp.d;
5163}
5164
5165#else
5166
5167void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5168{
5169 CPU86_LDoubleU temp;
5170
5171 temp.d = f;
5172 *pmant = temp.l.lower;
5173 *pexp = temp.l.upper;
5174}
5175
5176CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5177{
5178 CPU86_LDoubleU temp;
5179
5180 temp.l.upper = upper;
5181 temp.l.lower = mant;
5182 return temp.d;
5183}
5184#endif
5185
5186#ifdef TARGET_X86_64
5187
5188//#define DEBUG_MULDIV
5189
5190static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5191{
5192 *plow += a;
5193 /* carry test */
5194 if (*plow < a)
5195 (*phigh)++;
5196 *phigh += b;
5197}
5198
5199static void neg128(uint64_t *plow, uint64_t *phigh)
5200{
5201 *plow = ~ *plow;
5202 *phigh = ~ *phigh;
5203 add128(plow, phigh, 1, 0);
5204}
5205
5206/* return TRUE if overflow */
5207static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5208{
5209 uint64_t q, r, a1, a0;
5210 int i, qb, ab;
5211
5212 a0 = *plow;
5213 a1 = *phigh;
5214 if (a1 == 0) {
5215 q = a0 / b;
5216 r = a0 % b;
5217 *plow = q;
5218 *phigh = r;
5219 } else {
5220 if (a1 >= b)
5221 return 1;
5222 /* XXX: use a better algorithm */
5223 for(i = 0; i < 64; i++) {
5224 ab = a1 >> 63;
5225 a1 = (a1 << 1) | (a0 >> 63);
5226 if (ab || a1 >= b) {
5227 a1 -= b;
5228 qb = 1;
5229 } else {
5230 qb = 0;
5231 }
5232 a0 = (a0 << 1) | qb;
5233 }
5234#if defined(DEBUG_MULDIV)
5235 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5236 *phigh, *plow, b, a0, a1);
5237#endif
5238 *plow = a0;
5239 *phigh = a1;
5240 }
5241 return 0;
5242}
5243
5244/* return TRUE if overflow */
5245static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5246{
5247 int sa, sb;
5248 sa = ((int64_t)*phigh < 0);
5249 if (sa)
5250 neg128(plow, phigh);
5251 sb = (b < 0);
5252 if (sb)
5253 b = -b;
5254 if (div64(plow, phigh, b) != 0)
5255 return 1;
5256 if (sa ^ sb) {
5257 if (*plow > (1ULL << 63))
5258 return 1;
5259 *plow = - *plow;
5260 } else {
5261 if (*plow >= (1ULL << 63))
5262 return 1;
5263 }
5264 if (sa)
5265 *phigh = - *phigh;
5266 return 0;
5267}
5268
5269void helper_mulq_EAX_T0(target_ulong t0)
5270{
5271 uint64_t r0, r1;
5272
5273 mulu64(&r0, &r1, EAX, t0);
5274 EAX = r0;
5275 EDX = r1;
5276 CC_DST = r0;
5277 CC_SRC = r1;
5278}
5279
5280void helper_imulq_EAX_T0(target_ulong t0)
5281{
5282 uint64_t r0, r1;
5283
5284 muls64(&r0, &r1, EAX, t0);
5285 EAX = r0;
5286 EDX = r1;
5287 CC_DST = r0;
5288 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5289}
5290
5291target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5292{
5293 uint64_t r0, r1;
5294
5295 muls64(&r0, &r1, t0, t1);
5296 CC_DST = r0;
5297 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5298 return r0;
5299}
5300
5301void helper_divq_EAX(target_ulong t0)
5302{
5303 uint64_t r0, r1;
5304 if (t0 == 0) {
5305 raise_exception(EXCP00_DIVZ);
5306 }
5307 r0 = EAX;
5308 r1 = EDX;
5309 if (div64(&r0, &r1, t0))
5310 raise_exception(EXCP00_DIVZ);
5311 EAX = r0;
5312 EDX = r1;
5313}
5314
5315void helper_idivq_EAX(target_ulong t0)
5316{
5317 uint64_t r0, r1;
5318 if (t0 == 0) {
5319 raise_exception(EXCP00_DIVZ);
5320 }
5321 r0 = EAX;
5322 r1 = EDX;
5323 if (idiv64(&r0, &r1, t0))
5324 raise_exception(EXCP00_DIVZ);
5325 EAX = r0;
5326 EDX = r1;
5327}
5328#endif
5329
5330static void do_hlt(void)
5331{
5332 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5333 env->halted = 1;
5334 env->exception_index = EXCP_HLT;
5335 cpu_loop_exit();
5336}
5337
5338void helper_hlt(int next_eip_addend)
5339{
5340 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5341 EIP += next_eip_addend;
5342
5343 do_hlt();
5344}
5345
5346void helper_monitor(target_ulong ptr)
5347{
5348#ifdef VBOX
5349 if ((uint32_t)ECX > 1)
5350 raise_exception(EXCP0D_GPF);
5351#else /* !VBOX */
5352 if ((uint32_t)ECX != 0)
5353 raise_exception(EXCP0D_GPF);
5354#endif /* !VBOX */
5355 /* XXX: store address ? */
5356 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5357}
5358
5359void helper_mwait(int next_eip_addend)
5360{
5361 if ((uint32_t)ECX != 0)
5362 raise_exception(EXCP0D_GPF);
5363#ifdef VBOX
5364 helper_hlt(next_eip_addend);
5365#else /* !VBOX */
5366 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5367 EIP += next_eip_addend;
5368
5369 /* XXX: not complete but not completely erroneous */
5370 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5371 /* more than one CPU: do not sleep because another CPU may
5372 wake this one */
5373 } else {
5374 do_hlt();
5375 }
5376#endif /* !VBOX */
5377}
5378
5379void helper_debug(void)
5380{
5381 env->exception_index = EXCP_DEBUG;
5382 cpu_loop_exit();
5383}
5384
5385void helper_reset_rf(void)
5386{
5387 env->eflags &= ~RF_MASK;
5388}
5389
5390void helper_raise_interrupt(int intno, int next_eip_addend)
5391{
5392 raise_interrupt(intno, 1, 0, next_eip_addend);
5393}
5394
5395void helper_raise_exception(int exception_index)
5396{
5397 raise_exception(exception_index);
5398}
5399
5400void helper_cli(void)
5401{
5402 env->eflags &= ~IF_MASK;
5403}
5404
5405void helper_sti(void)
5406{
5407 env->eflags |= IF_MASK;
5408}
5409
5410#ifdef VBOX
5411void helper_cli_vme(void)
5412{
5413 env->eflags &= ~VIF_MASK;
5414}
5415
5416void helper_sti_vme(void)
5417{
5418 /* First check, then change eflags according to the AMD manual */
5419 if (env->eflags & VIP_MASK) {
5420 raise_exception(EXCP0D_GPF);
5421 }
5422 env->eflags |= VIF_MASK;
5423}
5424#endif /* VBOX */
5425
5426#if 0
5427/* vm86plus instructions */
5428void helper_cli_vm(void)
5429{
5430 env->eflags &= ~VIF_MASK;
5431}
5432
5433void helper_sti_vm(void)
5434{
5435 env->eflags |= VIF_MASK;
5436 if (env->eflags & VIP_MASK) {
5437 raise_exception(EXCP0D_GPF);
5438 }
5439}
5440#endif
5441
5442void helper_set_inhibit_irq(void)
5443{
5444 env->hflags |= HF_INHIBIT_IRQ_MASK;
5445}
5446
5447void helper_reset_inhibit_irq(void)
5448{
5449 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5450}
5451
5452void helper_boundw(target_ulong a0, int v)
5453{
5454 int low, high;
5455 low = ldsw(a0);
5456 high = ldsw(a0 + 2);
5457 v = (int16_t)v;
5458 if (v < low || v > high) {
5459 raise_exception(EXCP05_BOUND);
5460 }
5461}
5462
5463void helper_boundl(target_ulong a0, int v)
5464{
5465 int low, high;
5466 low = ldl(a0);
5467 high = ldl(a0 + 4);
5468 if (v < low || v > high) {
5469 raise_exception(EXCP05_BOUND);
5470 }
5471}
5472
5473static float approx_rsqrt(float a)
5474{
5475 return 1.0 / sqrt(a);
5476}
5477
5478static float approx_rcp(float a)
5479{
5480 return 1.0 / a;
5481}
5482
5483#if !defined(CONFIG_USER_ONLY)
5484
5485#define MMUSUFFIX _mmu
5486
5487#define SHIFT 0
5488#include "softmmu_template.h"
5489
5490#define SHIFT 1
5491#include "softmmu_template.h"
5492
5493#define SHIFT 2
5494#include "softmmu_template.h"
5495
5496#define SHIFT 3
5497#include "softmmu_template.h"
5498
5499#endif
5500
5501#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5502/* This code assumes real physical address always fit into host CPU reg,
5503 which is wrong in general, but true for our current use cases. */
5504RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5505{
5506 return remR3PhysReadS8(addr);
5507}
5508RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5509{
5510 return remR3PhysReadU8(addr);
5511}
5512void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5513{
5514 remR3PhysWriteU8(addr, val);
5515}
5516RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5517{
5518 return remR3PhysReadS16(addr);
5519}
5520RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5521{
5522 return remR3PhysReadU16(addr);
5523}
5524void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5525{
5526 remR3PhysWriteU16(addr, val);
5527}
5528RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5529{
5530 return remR3PhysReadS32(addr);
5531}
5532RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5533{
5534 return remR3PhysReadU32(addr);
5535}
5536void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5537{
5538 remR3PhysWriteU32(addr, val);
5539}
5540uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5541{
5542 return remR3PhysReadU64(addr);
5543}
5544void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5545{
5546 remR3PhysWriteU64(addr, val);
5547}
5548#endif /* VBOX */
5549
5550#if !defined(CONFIG_USER_ONLY)
5551/* try to fill the TLB and return an exception if error. If retaddr is
5552 NULL, it means that the function was called in C code (i.e. not
5553 from generated code or from helper.c) */
5554/* XXX: fix it to restore all registers */
5555void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5556{
5557 TranslationBlock *tb;
5558 int ret;
5559 unsigned long pc;
5560 CPUX86State *saved_env;
5561
5562 /* XXX: hack to restore env in all cases, even if not called from
5563 generated code */
5564 saved_env = env;
5565 env = cpu_single_env;
5566
5567 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5568 if (ret) {
5569 if (retaddr) {
5570 /* now we have a real cpu fault */
5571 pc = (unsigned long)retaddr;
5572 tb = tb_find_pc(pc);
5573 if (tb) {
5574 /* the PC is inside the translated code. It means that we have
5575 a virtual CPU fault */
5576 cpu_restore_state(tb, env, pc, NULL);
5577 }
5578 }
5579 raise_exception_err(env->exception_index, env->error_code);
5580 }
5581 env = saved_env;
5582}
5583#endif
5584
5585#ifdef VBOX
5586
5587/**
5588 * Correctly computes the eflags.
5589 * @returns eflags.
5590 * @param env1 CPU environment.
5591 */
5592uint32_t raw_compute_eflags(CPUX86State *env1)
5593{
5594 CPUX86State *savedenv = env;
5595 uint32_t efl;
5596 env = env1;
5597 efl = compute_eflags();
5598 env = savedenv;
5599 return efl;
5600}
5601
5602/**
5603 * Reads byte from virtual address in guest memory area.
5604 * XXX: is it working for any addresses? swapped out pages?
5605 * @returns read data byte.
5606 * @param env1 CPU environment.
5607 * @param pvAddr GC Virtual address.
5608 */
5609uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5610{
5611 CPUX86State *savedenv = env;
5612 uint8_t u8;
5613 env = env1;
5614 u8 = ldub_kernel(addr);
5615 env = savedenv;
5616 return u8;
5617}
5618
5619/**
5620 * Reads byte from virtual address in guest memory area.
5621 * XXX: is it working for any addresses? swapped out pages?
5622 * @returns read data byte.
5623 * @param env1 CPU environment.
5624 * @param pvAddr GC Virtual address.
5625 */
5626uint16_t read_word(CPUX86State *env1, target_ulong addr)
5627{
5628 CPUX86State *savedenv = env;
5629 uint16_t u16;
5630 env = env1;
5631 u16 = lduw_kernel(addr);
5632 env = savedenv;
5633 return u16;
5634}
5635
5636/**
5637 * Reads byte from virtual address in guest memory area.
5638 * XXX: is it working for any addresses? swapped out pages?
5639 * @returns read data byte.
5640 * @param env1 CPU environment.
5641 * @param pvAddr GC Virtual address.
5642 */
5643uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5644{
5645 CPUX86State *savedenv = env;
5646 uint32_t u32;
5647 env = env1;
5648 u32 = ldl_kernel(addr);
5649 env = savedenv;
5650 return u32;
5651}
5652
5653/**
5654 * Writes byte to virtual address in guest memory area.
5655 * XXX: is it working for any addresses? swapped out pages?
5656 * @returns read data byte.
5657 * @param env1 CPU environment.
5658 * @param pvAddr GC Virtual address.
5659 * @param val byte value
5660 */
5661void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5662{
5663 CPUX86State *savedenv = env;
5664 env = env1;
5665 stb(addr, val);
5666 env = savedenv;
5667}
5668
5669void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5670{
5671 CPUX86State *savedenv = env;
5672 env = env1;
5673 stw(addr, val);
5674 env = savedenv;
5675}
5676
5677void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5678{
5679 CPUX86State *savedenv = env;
5680 env = env1;
5681 stl(addr, val);
5682 env = savedenv;
5683}
5684
5685/**
5686 * Correctly loads selector into segment register with updating internal
5687 * qemu data/caches.
5688 * @param env1 CPU environment.
5689 * @param seg_reg Segment register.
5690 * @param selector Selector to load.
5691 */
5692void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5693{
5694 CPUX86State *savedenv = env;
5695#ifdef FORCE_SEGMENT_SYNC
5696 jmp_buf old_buf;
5697#endif
5698
5699 env = env1;
5700
5701 if ( env->eflags & X86_EFL_VM
5702 || !(env->cr[0] & X86_CR0_PE))
5703 {
5704 load_seg_vm(seg_reg, selector);
5705
5706 env = savedenv;
5707
5708 /* Successful sync. */
5709 env1->segs[seg_reg].newselector = 0;
5710 }
5711 else
5712 {
5713 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5714 time critical - let's not do that */
5715#ifdef FORCE_SEGMENT_SYNC
5716 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5717#endif
5718 if (setjmp(env1->jmp_env) == 0)
5719 {
5720 if (seg_reg == R_CS)
5721 {
5722 uint32_t e1, e2;
5723 e1 = e2 = 0;
5724 load_segment(&e1, &e2, selector);
5725 cpu_x86_load_seg_cache(env, R_CS, selector,
5726 get_seg_base(e1, e2),
5727 get_seg_limit(e1, e2),
5728 e2);
5729 }
5730 else
5731 helper_load_seg(seg_reg, selector);
5732 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5733 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5734
5735 env = savedenv;
5736
5737 /* Successful sync. */
5738 env1->segs[seg_reg].newselector = 0;
5739 }
5740 else
5741 {
5742 env = savedenv;
5743
5744 /* Postpone sync until the guest uses the selector. */
5745 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5746 env1->segs[seg_reg].newselector = selector;
5747 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5748 env1->exception_index = -1;
5749 env1->error_code = 0;
5750 env1->old_exception = -1;
5751 }
5752#ifdef FORCE_SEGMENT_SYNC
5753 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5754#endif
5755 }
5756
5757}
5758
5759DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5760{
5761 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5762}
5763
5764
5765int emulate_single_instr(CPUX86State *env1)
5766{
5767 TranslationBlock *tb;
5768 TranslationBlock *current;
5769 int flags;
5770 uint8_t *tc_ptr;
5771 target_ulong old_eip;
5772
5773 /* ensures env is loaded! */
5774 CPUX86State *savedenv = env;
5775 env = env1;
5776
5777 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5778
5779 current = env->current_tb;
5780 env->current_tb = NULL;
5781 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5782
5783 /*
5784 * Translate only one instruction.
5785 */
5786 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5787 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5788 env->segs[R_CS].base, flags, 0);
5789
5790 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5791
5792
5793 /* tb_link_phys: */
5794 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5795 tb->jmp_next[0] = NULL;
5796 tb->jmp_next[1] = NULL;
5797 Assert(tb->jmp_next[0] == NULL);
5798 Assert(tb->jmp_next[1] == NULL);
5799 if (tb->tb_next_offset[0] != 0xffff)
5800 tb_reset_jump(tb, 0);
5801 if (tb->tb_next_offset[1] != 0xffff)
5802 tb_reset_jump(tb, 1);
5803
5804 /*
5805 * Execute it using emulation
5806 */
5807 old_eip = env->eip;
5808 env->current_tb = tb;
5809
5810 /*
5811 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5812 * perhaps not a very safe hack
5813 */
5814 while (old_eip == env->eip)
5815 {
5816 tc_ptr = tb->tc_ptr;
5817
5818#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5819 int fake_ret;
5820 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5821#else
5822 tcg_qemu_tb_exec(tc_ptr);
5823#endif
5824
5825 /*
5826 * Exit once we detect an external interrupt and interrupts are enabled
5827 */
5828 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER))
5829 || ( (env->eflags & IF_MASK)
5830 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5831 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5832 )
5833 {
5834 break;
5835 }
5836 }
5837 env->current_tb = current;
5838
5839 tb_phys_invalidate(tb, -1);
5840 tb_free(tb);
5841/*
5842 Assert(tb->tb_next_offset[0] == 0xffff);
5843 Assert(tb->tb_next_offset[1] == 0xffff);
5844 Assert(tb->tb_next[0] == 0xffff);
5845 Assert(tb->tb_next[1] == 0xffff);
5846 Assert(tb->jmp_next[0] == NULL);
5847 Assert(tb->jmp_next[1] == NULL);
5848 Assert(tb->jmp_first == NULL); */
5849
5850 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5851
5852 /*
5853 * Execute the next instruction when we encounter instruction fusing.
5854 */
5855 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5856 {
5857 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5858 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5859 emulate_single_instr(env);
5860 }
5861
5862 env = savedenv;
5863 return 0;
5864}
5865
5866/**
5867 * Correctly loads a new ldtr selector.
5868 *
5869 * @param env1 CPU environment.
5870 * @param selector Selector to load.
5871 */
5872void sync_ldtr(CPUX86State *env1, int selector)
5873{
5874 CPUX86State *saved_env = env;
5875 if (setjmp(env1->jmp_env) == 0)
5876 {
5877 env = env1;
5878 helper_lldt(selector);
5879 env = saved_env;
5880 }
5881 else
5882 {
5883 env = saved_env;
5884#ifdef VBOX_STRICT
5885 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5886#endif
5887 }
5888}
5889
5890int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5891 uint32_t *esp_ptr, int dpl)
5892{
5893 int type, index, shift;
5894
5895 CPUX86State *savedenv = env;
5896 env = env1;
5897
5898 if (!(env->tr.flags & DESC_P_MASK))
5899 cpu_abort(env, "invalid tss");
5900 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5901 if ((type & 7) != 1)
5902 cpu_abort(env, "invalid tss type %d", type);
5903 shift = type >> 3;
5904 index = (dpl * 4 + 2) << shift;
5905 if (index + (4 << shift) - 1 > env->tr.limit)
5906 {
5907 env = savedenv;
5908 return 0;
5909 }
5910 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5911
5912 if (shift == 0) {
5913 *esp_ptr = lduw_kernel(env->tr.base + index);
5914 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5915 } else {
5916 *esp_ptr = ldl_kernel(env->tr.base + index);
5917 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5918 }
5919
5920 env = savedenv;
5921 return 1;
5922}
5923
5924//*****************************************************************************
5925// Needs to be at the bottom of the file (overriding macros)
5926
5927static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5928{
5929 return *(CPU86_LDouble *)ptr;
5930}
5931
5932static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5933{
5934 *(CPU86_LDouble *)ptr = f;
5935}
5936
5937#undef stw
5938#undef stl
5939#undef stq
5940#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5941#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5942#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5943
5944//*****************************************************************************
5945void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5946{
5947 int fpus, fptag, i, nb_xmm_regs;
5948 CPU86_LDouble tmp;
5949 uint8_t *addr;
5950 int data64 = !!(env->hflags & HF_LMA_MASK);
5951
5952 if (env->cpuid_features & CPUID_FXSR)
5953 {
5954 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5955 fptag = 0;
5956 for(i = 0; i < 8; i++) {
5957 fptag |= (env->fptags[i] << i);
5958 }
5959 stw(ptr, env->fpuc);
5960 stw(ptr + 2, fpus);
5961 stw(ptr + 4, fptag ^ 0xff);
5962
5963 addr = ptr + 0x20;
5964 for(i = 0;i < 8; i++) {
5965 tmp = ST(i);
5966 helper_fstt_raw(tmp, addr);
5967 addr += 16;
5968 }
5969
5970 if (env->cr[4] & CR4_OSFXSR_MASK) {
5971 /* XXX: finish it */
5972 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5973 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5974 nb_xmm_regs = 8 << data64;
5975 addr = ptr + 0xa0;
5976 for(i = 0; i < nb_xmm_regs; i++) {
5977#if __GNUC__ < 4
5978 stq(addr, env->xmm_regs[i].XMM_Q(0));
5979 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5980#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5981 stl(addr, env->xmm_regs[i].XMM_L(0));
5982 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
5983 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
5984 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
5985#endif
5986 addr += 16;
5987 }
5988 }
5989 }
5990 else
5991 {
5992 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
5993 int fptag;
5994
5995 fp->FCW = env->fpuc;
5996 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5997 fptag = 0;
5998 for (i=7; i>=0; i--) {
5999 fptag <<= 2;
6000 if (env->fptags[i]) {
6001 fptag |= 3;
6002 } else {
6003 /* the FPU automatically computes it */
6004 }
6005 }
6006 fp->FTW = fptag;
6007
6008 for(i = 0;i < 8; i++) {
6009 tmp = ST(i);
6010 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6011 }
6012 }
6013}
6014
6015//*****************************************************************************
6016#undef lduw
6017#undef ldl
6018#undef ldq
6019#define lduw(a) *(uint16_t *)(a)
6020#define ldl(a) *(uint32_t *)(a)
6021#define ldq(a) *(uint64_t *)(a)
6022//*****************************************************************************
6023void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6024{
6025 int i, fpus, fptag, nb_xmm_regs;
6026 CPU86_LDouble tmp;
6027 uint8_t *addr;
6028 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6029
6030 if (env->cpuid_features & CPUID_FXSR)
6031 {
6032 env->fpuc = lduw(ptr);
6033 fpus = lduw(ptr + 2);
6034 fptag = lduw(ptr + 4);
6035 env->fpstt = (fpus >> 11) & 7;
6036 env->fpus = fpus & ~0x3800;
6037 fptag ^= 0xff;
6038 for(i = 0;i < 8; i++) {
6039 env->fptags[i] = ((fptag >> i) & 1);
6040 }
6041
6042 addr = ptr + 0x20;
6043 for(i = 0;i < 8; i++) {
6044 tmp = helper_fldt_raw(addr);
6045 ST(i) = tmp;
6046 addr += 16;
6047 }
6048
6049 if (env->cr[4] & CR4_OSFXSR_MASK) {
6050 /* XXX: finish it, endianness */
6051 env->mxcsr = ldl(ptr + 0x18);
6052 //ldl(ptr + 0x1c);
6053 nb_xmm_regs = 8 << data64;
6054 addr = ptr + 0xa0;
6055 for(i = 0; i < nb_xmm_regs; i++) {
6056#if HC_ARCH_BITS == 32
6057 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6058 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6059 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6060 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6061 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6062#else
6063 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6064 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6065#endif
6066 addr += 16;
6067 }
6068 }
6069 }
6070 else
6071 {
6072 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6073 int fptag, j;
6074
6075 env->fpuc = fp->FCW;
6076 env->fpstt = (fp->FSW >> 11) & 7;
6077 env->fpus = fp->FSW & ~0x3800;
6078 fptag = fp->FTW;
6079 for(i = 0;i < 8; i++) {
6080 env->fptags[i] = ((fptag & 3) == 3);
6081 fptag >>= 2;
6082 }
6083 j = env->fpstt;
6084 for(i = 0;i < 8; i++) {
6085 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6086 ST(i) = tmp;
6087 }
6088 }
6089}
6090//*****************************************************************************
6091//*****************************************************************************
6092
6093#endif /* VBOX */
6094
6095/* Secure Virtual Machine helpers */
6096
6097#if defined(CONFIG_USER_ONLY)
6098
6099void helper_vmrun(int aflag, int next_eip_addend)
6100{
6101}
6102void helper_vmmcall(void)
6103{
6104}
6105void helper_vmload(int aflag)
6106{
6107}
6108void helper_vmsave(int aflag)
6109{
6110}
6111void helper_stgi(void)
6112{
6113}
6114void helper_clgi(void)
6115{
6116}
6117void helper_skinit(void)
6118{
6119}
6120void helper_invlpga(int aflag)
6121{
6122}
6123void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6124{
6125}
6126void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6127{
6128}
6129
6130void helper_svm_check_io(uint32_t port, uint32_t param,
6131 uint32_t next_eip_addend)
6132{
6133}
6134#else
6135
6136static inline void svm_save_seg(target_phys_addr_t addr,
6137 const SegmentCache *sc)
6138{
6139 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6140 sc->selector);
6141 stq_phys(addr + offsetof(struct vmcb_seg, base),
6142 sc->base);
6143 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6144 sc->limit);
6145 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6146 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6147}
6148
6149static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6150{
6151 unsigned int flags;
6152
6153 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6154 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6155 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6156 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6157 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6158}
6159
6160static inline void svm_load_seg_cache(target_phys_addr_t addr,
6161 CPUState *env, int seg_reg)
6162{
6163 SegmentCache sc1, *sc = &sc1;
6164 svm_load_seg(addr, sc);
6165 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6166 sc->base, sc->limit, sc->flags);
6167}
6168
6169void helper_vmrun(int aflag, int next_eip_addend)
6170{
6171 target_ulong addr;
6172 uint32_t event_inj;
6173 uint32_t int_ctl;
6174
6175 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6176
6177 if (aflag == 2)
6178 addr = EAX;
6179 else
6180 addr = (uint32_t)EAX;
6181
6182 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6183
6184 env->vm_vmcb = addr;
6185
6186 /* save the current CPU state in the hsave page */
6187 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6188 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6189
6190 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6191 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6192
6193 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6194 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6195 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6196 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6197 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6198 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6199
6200 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6201 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6202
6203 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6204 &env->segs[R_ES]);
6205 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6206 &env->segs[R_CS]);
6207 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6208 &env->segs[R_SS]);
6209 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6210 &env->segs[R_DS]);
6211
6212 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6213 EIP + next_eip_addend);
6214 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6215 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6216
6217 /* load the interception bitmaps so we do not need to access the
6218 vmcb in svm mode */
6219 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6220 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6221 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6222 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6223 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6224 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6225
6226 /* enable intercepts */
6227 env->hflags |= HF_SVMI_MASK;
6228
6229 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6230
6231 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6232 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6233
6234 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6235 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6236
6237 /* clear exit_info_2 so we behave like the real hardware */
6238 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6239
6240 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6241 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6242 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6243 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6244 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6245 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6246 if (int_ctl & V_INTR_MASKING_MASK) {
6247 env->v_tpr = int_ctl & V_TPR_MASK;
6248 env->hflags2 |= HF2_VINTR_MASK;
6249 if (env->eflags & IF_MASK)
6250 env->hflags2 |= HF2_HIF_MASK;
6251 }
6252
6253 cpu_load_efer(env,
6254 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6255 env->eflags = 0;
6256 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6257 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6258 CC_OP = CC_OP_EFLAGS;
6259
6260 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6261 env, R_ES);
6262 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6263 env, R_CS);
6264 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6265 env, R_SS);
6266 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6267 env, R_DS);
6268
6269 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6270 env->eip = EIP;
6271 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6272 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6273 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6274 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6275 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6276
6277 /* FIXME: guest state consistency checks */
6278
6279 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6280 case TLB_CONTROL_DO_NOTHING:
6281 break;
6282 case TLB_CONTROL_FLUSH_ALL_ASID:
6283 /* FIXME: this is not 100% correct but should work for now */
6284 tlb_flush(env, 1);
6285 break;
6286 }
6287
6288 env->hflags2 |= HF2_GIF_MASK;
6289
6290 if (int_ctl & V_IRQ_MASK) {
6291 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6292 }
6293
6294 /* maybe we need to inject an event */
6295 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6296 if (event_inj & SVM_EVTINJ_VALID) {
6297 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6298 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6299 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6300
6301 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6302 /* FIXME: need to implement valid_err */
6303 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6304 case SVM_EVTINJ_TYPE_INTR:
6305 env->exception_index = vector;
6306 env->error_code = event_inj_err;
6307 env->exception_is_int = 0;
6308 env->exception_next_eip = -1;
6309 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6310 /* XXX: is it always correct ? */
6311 do_interrupt(vector, 0, 0, 0, 1);
6312 break;
6313 case SVM_EVTINJ_TYPE_NMI:
6314 env->exception_index = EXCP02_NMI;
6315 env->error_code = event_inj_err;
6316 env->exception_is_int = 0;
6317 env->exception_next_eip = EIP;
6318 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6319 cpu_loop_exit();
6320 break;
6321 case SVM_EVTINJ_TYPE_EXEPT:
6322 env->exception_index = vector;
6323 env->error_code = event_inj_err;
6324 env->exception_is_int = 0;
6325 env->exception_next_eip = -1;
6326 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6327 cpu_loop_exit();
6328 break;
6329 case SVM_EVTINJ_TYPE_SOFT:
6330 env->exception_index = vector;
6331 env->error_code = event_inj_err;
6332 env->exception_is_int = 1;
6333 env->exception_next_eip = EIP;
6334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6335 cpu_loop_exit();
6336 break;
6337 }
6338 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6339 }
6340}
6341
6342void helper_vmmcall(void)
6343{
6344 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6345 raise_exception(EXCP06_ILLOP);
6346}
6347
6348void helper_vmload(int aflag)
6349{
6350 target_ulong addr;
6351 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6352
6353 if (aflag == 2)
6354 addr = EAX;
6355 else
6356 addr = (uint32_t)EAX;
6357
6358 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6359 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6360 env->segs[R_FS].base);
6361
6362 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6363 env, R_FS);
6364 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6365 env, R_GS);
6366 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6367 &env->tr);
6368 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6369 &env->ldt);
6370
6371#ifdef TARGET_X86_64
6372 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6373 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6374 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6375 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6376#endif
6377 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6378 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6379 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6380 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6381}
6382
6383void helper_vmsave(int aflag)
6384{
6385 target_ulong addr;
6386 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6387
6388 if (aflag == 2)
6389 addr = EAX;
6390 else
6391 addr = (uint32_t)EAX;
6392
6393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6394 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6395 env->segs[R_FS].base);
6396
6397 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6398 &env->segs[R_FS]);
6399 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6400 &env->segs[R_GS]);
6401 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6402 &env->tr);
6403 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6404 &env->ldt);
6405
6406#ifdef TARGET_X86_64
6407 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6408 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6409 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6410 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6411#endif
6412 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6413 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6414 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6415 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6416}
6417
6418void helper_stgi(void)
6419{
6420 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6421 env->hflags2 |= HF2_GIF_MASK;
6422}
6423
6424void helper_clgi(void)
6425{
6426 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6427 env->hflags2 &= ~HF2_GIF_MASK;
6428}
6429
6430void helper_skinit(void)
6431{
6432 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6433 /* XXX: not implemented */
6434 raise_exception(EXCP06_ILLOP);
6435}
6436
6437void helper_invlpga(int aflag)
6438{
6439 target_ulong addr;
6440 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6441
6442 if (aflag == 2)
6443 addr = EAX;
6444 else
6445 addr = (uint32_t)EAX;
6446
6447 /* XXX: could use the ASID to see if it is needed to do the
6448 flush */
6449 tlb_flush_page(env, addr);
6450}
6451
6452void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6453{
6454 if (likely(!(env->hflags & HF_SVMI_MASK)))
6455 return;
6456#ifndef VBOX
6457 switch(type) {
6458 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6459 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6460 helper_vmexit(type, param);
6461 }
6462 break;
6463 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6464 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6465 helper_vmexit(type, param);
6466 }
6467 break;
6468 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6469 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6470 helper_vmexit(type, param);
6471 }
6472 break;
6473 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6474 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6475 helper_vmexit(type, param);
6476 }
6477 break;
6478 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6479 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6480 helper_vmexit(type, param);
6481 }
6482 break;
6483 case SVM_EXIT_MSR:
6484 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6485 /* FIXME: this should be read in at vmrun (faster this way?) */
6486 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6487 uint32_t t0, t1;
6488 switch((uint32_t)ECX) {
6489 case 0 ... 0x1fff:
6490 t0 = (ECX * 2) % 8;
6491 t1 = ECX / 8;
6492 break;
6493 case 0xc0000000 ... 0xc0001fff:
6494 t0 = (8192 + ECX - 0xc0000000) * 2;
6495 t1 = (t0 / 8);
6496 t0 %= 8;
6497 break;
6498 case 0xc0010000 ... 0xc0011fff:
6499 t0 = (16384 + ECX - 0xc0010000) * 2;
6500 t1 = (t0 / 8);
6501 t0 %= 8;
6502 break;
6503 default:
6504 helper_vmexit(type, param);
6505 t0 = 0;
6506 t1 = 0;
6507 break;
6508 }
6509 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6510 helper_vmexit(type, param);
6511 }
6512 break;
6513 default:
6514 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6515 helper_vmexit(type, param);
6516 }
6517 break;
6518 }
6519#else /* VBOX */
6520 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6521#endif /* VBOX */
6522}
6523
6524void helper_svm_check_io(uint32_t port, uint32_t param,
6525 uint32_t next_eip_addend)
6526{
6527 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6528 /* FIXME: this should be read in at vmrun (faster this way?) */
6529 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6530 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6531 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6532 /* next EIP */
6533 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6534 env->eip + next_eip_addend);
6535 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6536 }
6537 }
6538}
6539
6540/* Note: currently only 32 bits of exit_code are used */
6541void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6542{
6543 uint32_t int_ctl;
6544
6545 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6546 exit_code, exit_info_1,
6547 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6548 EIP);
6549
6550 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6551 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6552 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6553 } else {
6554 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6555 }
6556
6557 /* Save the VM state in the vmcb */
6558 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6559 &env->segs[R_ES]);
6560 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6561 &env->segs[R_CS]);
6562 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6563 &env->segs[R_SS]);
6564 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6565 &env->segs[R_DS]);
6566
6567 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6568 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6569
6570 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6571 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6572
6573 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6574 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6575 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6576 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6577 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6578
6579 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6580 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6581 int_ctl |= env->v_tpr & V_TPR_MASK;
6582 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6583 int_ctl |= V_IRQ_MASK;
6584 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6585
6586 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6588 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6590 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6591 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6592 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6593
6594 /* Reload the host state from vm_hsave */
6595 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6596 env->hflags &= ~HF_SVMI_MASK;
6597 env->intercept = 0;
6598 env->intercept_exceptions = 0;
6599 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6600 env->tsc_offset = 0;
6601
6602 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6603 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6604
6605 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6606 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6607
6608 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6609 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6610 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6611 /* we need to set the efer after the crs so the hidden flags get
6612 set properly */
6613 cpu_load_efer(env,
6614 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6615 env->eflags = 0;
6616 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6617 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6618 CC_OP = CC_OP_EFLAGS;
6619
6620 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6621 env, R_ES);
6622 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6623 env, R_CS);
6624 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6625 env, R_SS);
6626 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6627 env, R_DS);
6628
6629 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6630 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6631 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6632
6633 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6634 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6635
6636 /* other setups */
6637 cpu_x86_set_cpl(env, 0);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6639 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6640
6641 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6642 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6643 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6644 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6645
6646 env->hflags2 &= ~HF2_GIF_MASK;
6647 /* FIXME: Resets the current ASID register to zero (host ASID). */
6648
6649 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6650
6651 /* Clears the TSC_OFFSET inside the processor. */
6652
6653 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6654 from the page table indicated the host's CR3. If the PDPEs contain
6655 illegal state, the processor causes a shutdown. */
6656
6657 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6658 env->cr[0] |= CR0_PE_MASK;
6659 env->eflags &= ~VM_MASK;
6660
6661 /* Disables all breakpoints in the host DR7 register. */
6662
6663 /* Checks the reloaded host state for consistency. */
6664
6665 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6666 host's code segment or non-canonical (in the case of long mode), a
6667 #GP fault is delivered inside the host.) */
6668
6669 /* remove any pending exception */
6670 env->exception_index = -1;
6671 env->error_code = 0;
6672 env->old_exception = -1;
6673
6674 cpu_loop_exit();
6675}
6676
6677#endif
6678
6679/* MMX/SSE */
6680/* XXX: optimize by storing fptt and fptags in the static cpu state */
6681void helper_enter_mmx(void)
6682{
6683 env->fpstt = 0;
6684 *(uint32_t *)(env->fptags) = 0;
6685 *(uint32_t *)(env->fptags + 4) = 0;
6686}
6687
6688void helper_emms(void)
6689{
6690 /* set to empty state */
6691 *(uint32_t *)(env->fptags) = 0x01010101;
6692 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6693}
6694
6695/* XXX: suppress */
6696void helper_movq(void *d, void *s)
6697{
6698 *(uint64_t *)d = *(uint64_t *)s;
6699}
6700
6701#define SHIFT 0
6702#include "ops_sse.h"
6703
6704#define SHIFT 1
6705#include "ops_sse.h"
6706
6707#define SHIFT 0
6708#include "helper_template.h"
6709#undef SHIFT
6710
6711#define SHIFT 1
6712#include "helper_template.h"
6713#undef SHIFT
6714
6715#define SHIFT 2
6716#include "helper_template.h"
6717#undef SHIFT
6718
6719#ifdef TARGET_X86_64
6720
6721#define SHIFT 3
6722#include "helper_template.h"
6723#undef SHIFT
6724
6725#endif
6726
6727/* bit operations */
6728target_ulong helper_bsf(target_ulong t0)
6729{
6730 int count;
6731 target_ulong res;
6732
6733 res = t0;
6734 count = 0;
6735 while ((res & 1) == 0) {
6736 count++;
6737 res >>= 1;
6738 }
6739 return count;
6740}
6741
6742target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6743{
6744 int count;
6745 target_ulong res, mask;
6746
6747 if (wordsize > 0 && t0 == 0) {
6748 return wordsize;
6749 }
6750 res = t0;
6751 count = TARGET_LONG_BITS - 1;
6752 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6753 while ((res & mask) == 0) {
6754 count--;
6755 res <<= 1;
6756 }
6757 if (wordsize > 0) {
6758 return wordsize - 1 - count;
6759 }
6760 return count;
6761}
6762
6763target_ulong helper_bsr(target_ulong t0)
6764{
6765 return helper_lzcnt(t0, 0);
6766}
6767
6768static int compute_all_eflags(void)
6769{
6770 return CC_SRC;
6771}
6772
6773static int compute_c_eflags(void)
6774{
6775 return CC_SRC & CC_C;
6776}
6777
6778uint32_t helper_cc_compute_all(int op)
6779{
6780 switch (op) {
6781 default: /* should never happen */ return 0;
6782
6783 case CC_OP_EFLAGS: return compute_all_eflags();
6784
6785 case CC_OP_MULB: return compute_all_mulb();
6786 case CC_OP_MULW: return compute_all_mulw();
6787 case CC_OP_MULL: return compute_all_mull();
6788
6789 case CC_OP_ADDB: return compute_all_addb();
6790 case CC_OP_ADDW: return compute_all_addw();
6791 case CC_OP_ADDL: return compute_all_addl();
6792
6793 case CC_OP_ADCB: return compute_all_adcb();
6794 case CC_OP_ADCW: return compute_all_adcw();
6795 case CC_OP_ADCL: return compute_all_adcl();
6796
6797 case CC_OP_SUBB: return compute_all_subb();
6798 case CC_OP_SUBW: return compute_all_subw();
6799 case CC_OP_SUBL: return compute_all_subl();
6800
6801 case CC_OP_SBBB: return compute_all_sbbb();
6802 case CC_OP_SBBW: return compute_all_sbbw();
6803 case CC_OP_SBBL: return compute_all_sbbl();
6804
6805 case CC_OP_LOGICB: return compute_all_logicb();
6806 case CC_OP_LOGICW: return compute_all_logicw();
6807 case CC_OP_LOGICL: return compute_all_logicl();
6808
6809 case CC_OP_INCB: return compute_all_incb();
6810 case CC_OP_INCW: return compute_all_incw();
6811 case CC_OP_INCL: return compute_all_incl();
6812
6813 case CC_OP_DECB: return compute_all_decb();
6814 case CC_OP_DECW: return compute_all_decw();
6815 case CC_OP_DECL: return compute_all_decl();
6816
6817 case CC_OP_SHLB: return compute_all_shlb();
6818 case CC_OP_SHLW: return compute_all_shlw();
6819 case CC_OP_SHLL: return compute_all_shll();
6820
6821 case CC_OP_SARB: return compute_all_sarb();
6822 case CC_OP_SARW: return compute_all_sarw();
6823 case CC_OP_SARL: return compute_all_sarl();
6824
6825#ifdef TARGET_X86_64
6826 case CC_OP_MULQ: return compute_all_mulq();
6827
6828 case CC_OP_ADDQ: return compute_all_addq();
6829
6830 case CC_OP_ADCQ: return compute_all_adcq();
6831
6832 case CC_OP_SUBQ: return compute_all_subq();
6833
6834 case CC_OP_SBBQ: return compute_all_sbbq();
6835
6836 case CC_OP_LOGICQ: return compute_all_logicq();
6837
6838 case CC_OP_INCQ: return compute_all_incq();
6839
6840 case CC_OP_DECQ: return compute_all_decq();
6841
6842 case CC_OP_SHLQ: return compute_all_shlq();
6843
6844 case CC_OP_SARQ: return compute_all_sarq();
6845#endif
6846 }
6847}
6848
6849uint32_t helper_cc_compute_c(int op)
6850{
6851 switch (op) {
6852 default: /* should never happen */ return 0;
6853
6854 case CC_OP_EFLAGS: return compute_c_eflags();
6855
6856 case CC_OP_MULB: return compute_c_mull();
6857 case CC_OP_MULW: return compute_c_mull();
6858 case CC_OP_MULL: return compute_c_mull();
6859
6860 case CC_OP_ADDB: return compute_c_addb();
6861 case CC_OP_ADDW: return compute_c_addw();
6862 case CC_OP_ADDL: return compute_c_addl();
6863
6864 case CC_OP_ADCB: return compute_c_adcb();
6865 case CC_OP_ADCW: return compute_c_adcw();
6866 case CC_OP_ADCL: return compute_c_adcl();
6867
6868 case CC_OP_SUBB: return compute_c_subb();
6869 case CC_OP_SUBW: return compute_c_subw();
6870 case CC_OP_SUBL: return compute_c_subl();
6871
6872 case CC_OP_SBBB: return compute_c_sbbb();
6873 case CC_OP_SBBW: return compute_c_sbbw();
6874 case CC_OP_SBBL: return compute_c_sbbl();
6875
6876 case CC_OP_LOGICB: return compute_c_logicb();
6877 case CC_OP_LOGICW: return compute_c_logicw();
6878 case CC_OP_LOGICL: return compute_c_logicl();
6879
6880 case CC_OP_INCB: return compute_c_incl();
6881 case CC_OP_INCW: return compute_c_incl();
6882 case CC_OP_INCL: return compute_c_incl();
6883
6884 case CC_OP_DECB: return compute_c_incl();
6885 case CC_OP_DECW: return compute_c_incl();
6886 case CC_OP_DECL: return compute_c_incl();
6887
6888 case CC_OP_SHLB: return compute_c_shlb();
6889 case CC_OP_SHLW: return compute_c_shlw();
6890 case CC_OP_SHLL: return compute_c_shll();
6891
6892 case CC_OP_SARB: return compute_c_sarl();
6893 case CC_OP_SARW: return compute_c_sarl();
6894 case CC_OP_SARL: return compute_c_sarl();
6895
6896#ifdef TARGET_X86_64
6897 case CC_OP_MULQ: return compute_c_mull();
6898
6899 case CC_OP_ADDQ: return compute_c_addq();
6900
6901 case CC_OP_ADCQ: return compute_c_adcq();
6902
6903 case CC_OP_SUBQ: return compute_c_subq();
6904
6905 case CC_OP_SBBQ: return compute_c_sbbq();
6906
6907 case CC_OP_LOGICQ: return compute_c_logicq();
6908
6909 case CC_OP_INCQ: return compute_c_incl();
6910
6911 case CC_OP_DECQ: return compute_c_incl();
6912
6913 case CC_OP_SHLQ: return compute_c_shlq();
6914
6915 case CC_OP_SARQ: return compute_c_sarl();
6916#endif
6917 }
6918}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette