VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 72484

最後變更 在這個檔案從72484是 72484,由 vboxsync 提交於 7 年 前

IEM,NEM: Define minimum CPUMCTX set for IEM and hook it up to NEM for fetching missing bits as needed. bugref:9044

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 87.4 KB
 
1/* $Id: IEMInternal.h 72484 2018-06-08 17:05:40Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___IEMInternal_h
19#define ___IEMInternal_h
20
21#include <VBox/vmm/cpum.h>
22#include <VBox/vmm/iem.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/param.h>
25
26#include <setjmp.h>
27
28
29RT_C_DECLS_BEGIN
30
31
32/** @defgroup grp_iem_int Internals
33 * @ingroup grp_iem
34 * @internal
35 * @{
36 */
37
38/** For expanding symbol in slickedit and other products tagging and
39 * crossreferencing IEM symbols. */
40#ifndef IEM_STATIC
41# define IEM_STATIC static
42#endif
43
44/** @def IEM_WITH_3DNOW
45 * Includes the 3DNow decoding. */
46#define IEM_WITH_3DNOW
47
48/** @def IEM_WITH_THREE_0F_38
49 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
50#define IEM_WITH_THREE_0F_38
51
52/** @def IEM_WITH_THREE_0F_3A
53 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
54#define IEM_WITH_THREE_0F_3A
55
56/** @def IEM_WITH_VEX
57 * Includes the VEX decoding. */
58#define IEM_WITH_VEX
59
60
61/** @def IEM_VERIFICATION_MODE_FULL
62 * Shorthand for:
63 * defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
64 */
65#if (defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_MINIMAL) && !defined(IEM_VERIFICATION_MODE_FULL)) \
66 || defined(DOXYGEN_RUNNING)
67# define IEM_VERIFICATION_MODE_FULL
68#endif
69
70
71/** @def IEM_CFG_TARGET_CPU
72 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
73 *
74 * By default we allow this to be configured by the user via the
75 * CPUM/GuestCpuName config string, but this comes at a slight cost during
76 * decoding. So, for applications of this code where there is no need to
77 * be dynamic wrt target CPU, just modify this define.
78 */
79#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
80# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
81#endif
82
83
84//#define IEM_WITH_CODE_TLB// - work in progress
85
86
87#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
88/** Instruction statistics. */
89typedef struct IEMINSTRSTATS
90{
91# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
92# include "IEMInstructionStatisticsTmpl.h"
93# undef IEM_DO_INSTR_STAT
94} IEMINSTRSTATS;
95#else
96struct IEMINSTRSTATS;
97typedef struct IEMINSTRSTATS IEMINSTRSTATS;
98#endif
99/** Pointer to IEM instruction statistics. */
100typedef IEMINSTRSTATS *PIEMINSTRSTATS;
101
102/** Finish and move to types.h */
103typedef union
104{
105 uint32_t u32;
106} RTFLOAT32U;
107typedef RTFLOAT32U *PRTFLOAT32U;
108typedef RTFLOAT32U const *PCRTFLOAT32U;
109
110
111/**
112 * Extended operand mode that includes a representation of 8-bit.
113 *
114 * This is used for packing down modes when invoking some C instruction
115 * implementations.
116 */
117typedef enum IEMMODEX
118{
119 IEMMODEX_16BIT = IEMMODE_16BIT,
120 IEMMODEX_32BIT = IEMMODE_32BIT,
121 IEMMODEX_64BIT = IEMMODE_64BIT,
122 IEMMODEX_8BIT
123} IEMMODEX;
124AssertCompileSize(IEMMODEX, 4);
125
126
127/**
128 * Branch types.
129 */
130typedef enum IEMBRANCH
131{
132 IEMBRANCH_JUMP = 1,
133 IEMBRANCH_CALL,
134 IEMBRANCH_TRAP,
135 IEMBRANCH_SOFTWARE_INT,
136 IEMBRANCH_HARDWARE_INT
137} IEMBRANCH;
138AssertCompileSize(IEMBRANCH, 4);
139
140
141/**
142 * INT instruction types.
143 */
144typedef enum IEMINT
145{
146 /** INT n instruction (opcode 0xcd imm). */
147 IEMINT_INTN = 0,
148 /** Single byte INT3 instruction (opcode 0xcc). */
149 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
150 /** Single byte INTO instruction (opcode 0xce). */
151 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
152 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
153 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
154} IEMINT;
155AssertCompileSize(IEMINT, 4);
156
157
158/**
159 * A FPU result.
160 */
161typedef struct IEMFPURESULT
162{
163 /** The output value. */
164 RTFLOAT80U r80Result;
165 /** The output status. */
166 uint16_t FSW;
167} IEMFPURESULT;
168AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
169/** Pointer to a FPU result. */
170typedef IEMFPURESULT *PIEMFPURESULT;
171/** Pointer to a const FPU result. */
172typedef IEMFPURESULT const *PCIEMFPURESULT;
173
174
175/**
176 * A FPU result consisting of two output values and FSW.
177 */
178typedef struct IEMFPURESULTTWO
179{
180 /** The first output value. */
181 RTFLOAT80U r80Result1;
182 /** The output status. */
183 uint16_t FSW;
184 /** The second output value. */
185 RTFLOAT80U r80Result2;
186} IEMFPURESULTTWO;
187AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
188AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
189/** Pointer to a FPU result consisting of two output values and FSW. */
190typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
191/** Pointer to a const FPU result consisting of two output values and FSW. */
192typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
193
194
195
196#ifdef IEM_VERIFICATION_MODE_FULL
197
198/**
199 * Verification event type.
200 */
201typedef enum IEMVERIFYEVENT
202{
203 IEMVERIFYEVENT_INVALID = 0,
204 IEMVERIFYEVENT_IOPORT_READ,
205 IEMVERIFYEVENT_IOPORT_WRITE,
206 IEMVERIFYEVENT_IOPORT_STR_READ,
207 IEMVERIFYEVENT_IOPORT_STR_WRITE,
208 IEMVERIFYEVENT_RAM_WRITE,
209 IEMVERIFYEVENT_RAM_READ
210} IEMVERIFYEVENT;
211
212/** Checks if the event type is a RAM read or write. */
213# define IEMVERIFYEVENT_IS_RAM(a_enmType) ((a_enmType) == IEMVERIFYEVENT_RAM_WRITE || (a_enmType) == IEMVERIFYEVENT_RAM_READ)
214
215/**
216 * Verification event record.
217 */
218typedef struct IEMVERIFYEVTREC
219{
220 /** Pointer to the next record in the list. */
221 struct IEMVERIFYEVTREC *pNext;
222 /** The event type. */
223 IEMVERIFYEVENT enmEvent;
224 /** The event data. */
225 union
226 {
227 /** IEMVERIFYEVENT_IOPORT_READ */
228 struct
229 {
230 RTIOPORT Port;
231 uint8_t cbValue;
232 } IOPortRead;
233
234 /** IEMVERIFYEVENT_IOPORT_WRITE */
235 struct
236 {
237 RTIOPORT Port;
238 uint8_t cbValue;
239 uint32_t u32Value;
240 } IOPortWrite;
241
242 /** IEMVERIFYEVENT_IOPORT_STR_READ */
243 struct
244 {
245 RTIOPORT Port;
246 uint8_t cbValue;
247 RTGCUINTREG cTransfers;
248 } IOPortStrRead;
249
250 /** IEMVERIFYEVENT_IOPORT_STR_WRITE */
251 struct
252 {
253 RTIOPORT Port;
254 uint8_t cbValue;
255 RTGCUINTREG cTransfers;
256 } IOPortStrWrite;
257
258 /** IEMVERIFYEVENT_RAM_READ */
259 struct
260 {
261 RTGCPHYS GCPhys;
262 uint32_t cb;
263 } RamRead;
264
265 /** IEMVERIFYEVENT_RAM_WRITE */
266 struct
267 {
268 RTGCPHYS GCPhys;
269 uint32_t cb;
270 uint8_t ab[512];
271 } RamWrite;
272 } u;
273} IEMVERIFYEVTREC;
274/** Pointer to an IEM event verification records. */
275typedef IEMVERIFYEVTREC *PIEMVERIFYEVTREC;
276
277#endif /* IEM_VERIFICATION_MODE_FULL */
278
279
280/**
281 * IEM TLB entry.
282 *
283 * Lookup assembly:
284 * @code{.asm}
285 ; Calculate tag.
286 mov rax, [VA]
287 shl rax, 16
288 shr rax, 16 + X86_PAGE_SHIFT
289 or rax, [uTlbRevision]
290
291 ; Do indexing.
292 movzx ecx, al
293 lea rcx, [pTlbEntries + rcx]
294
295 ; Check tag.
296 cmp [rcx + IEMTLBENTRY.uTag], rax
297 jne .TlbMiss
298
299 ; Check access.
300 movsx rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
301 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
302 cmp rax, [uTlbPhysRev]
303 jne .TlbMiss
304
305 ; Calc address and we're done.
306 mov eax, X86_PAGE_OFFSET_MASK
307 and eax, [VA]
308 or rax, [rcx + IEMTLBENTRY.pMappingR3]
309 %ifdef VBOX_WITH_STATISTICS
310 inc qword [cTlbHits]
311 %endif
312 jmp .Done
313
314 .TlbMiss:
315 mov r8d, ACCESS_FLAGS
316 mov rdx, [VA]
317 mov rcx, [pVCpu]
318 call iemTlbTypeMiss
319 .Done:
320
321 @endcode
322 *
323 */
324typedef struct IEMTLBENTRY
325{
326 /** The TLB entry tag.
327 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits.
328 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
329 *
330 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
331 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
332 * revision wraps around though, the tags needs to be zeroed.
333 *
334 * @note Try use SHRD instruction? After seeing
335 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
336 */
337 uint64_t uTag;
338 /** Access flags and physical TLB revision.
339 *
340 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
341 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
342 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
343 * - Bit 3 - pgm phys/virt - not directly writable.
344 * - Bit 4 - pgm phys page - not directly readable.
345 * - Bit 5 - currently unused.
346 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
347 * - Bit 7 - tlb entry - pMappingR3 member not valid.
348 * - Bits 63 thru 8 are used for the physical TLB revision number.
349 *
350 * We're using complemented bit meanings here because it makes it easy to check
351 * whether special action is required. For instance a user mode write access
352 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
353 * non-zero result would mean special handling needed because either it wasn't
354 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
355 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
356 * need to check any PTE flag.
357 */
358 uint64_t fFlagsAndPhysRev;
359 /** The guest physical page address. */
360 uint64_t GCPhys;
361 /** Pointer to the ring-3 mapping (possibly also valid in ring-0). */
362#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
363 R3PTRTYPE(uint8_t *) pbMappingR3;
364#else
365 R3R0PTRTYPE(uint8_t *) pbMappingR3;
366#endif
367#if HC_ARCH_BITS == 32
368 uint32_t u32Padding1;
369#endif
370} IEMTLBENTRY;
371AssertCompileSize(IEMTLBENTRY, 32);
372/** Pointer to an IEM TLB entry. */
373typedef IEMTLBENTRY *PIEMTLBENTRY;
374
375/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
376 * @{ */
377#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
378#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
379#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
380#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
381#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
382#define IEMTLBE_F_PATCH_CODE RT_BIT_64(5) /**< Code TLB: Patch code (PATM). */
383#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
384#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
385#define IEMTLBE_F_PHYS_REV UINT64_C(0xffffffffffffff00) /**< Physical revision mask. */
386/** @} */
387
388
389/**
390 * An IEM TLB.
391 *
392 * We've got two of these, one for data and one for instructions.
393 */
394typedef struct IEMTLB
395{
396 /** The TLB entries.
397 * We've choosen 256 because that way we can obtain the result directly from a
398 * 8-bit register without an additional AND instruction. */
399 IEMTLBENTRY aEntries[256];
400 /** The TLB revision.
401 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
402 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
403 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
404 * (The revision zero indicates an invalid TLB entry.)
405 *
406 * The initial value is choosen to cause an early wraparound. */
407 uint64_t uTlbRevision;
408 /** The TLB physical address revision - shadow of PGM variable.
409 *
410 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
411 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
412 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
413 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
414 *
415 * The initial value is choosen to cause an early wraparound. */
416 uint64_t volatile uTlbPhysRev;
417
418 /* Statistics: */
419
420 /** TLB hits (VBOX_WITH_STATISTICS only). */
421 uint64_t cTlbHits;
422 /** TLB misses. */
423 uint32_t cTlbMisses;
424 /** Slow read path. */
425 uint32_t cTlbSlowReadPath;
426#if 0
427 /** TLB misses because of tag mismatch. */
428 uint32_t cTlbMissesTag;
429 /** TLB misses because of virtual access violation. */
430 uint32_t cTlbMissesVirtAccess;
431 /** TLB misses because of dirty bit. */
432 uint32_t cTlbMissesDirty;
433 /** TLB misses because of MMIO */
434 uint32_t cTlbMissesMmio;
435 /** TLB misses because of write access handlers. */
436 uint32_t cTlbMissesWriteHandler;
437 /** TLB misses because no r3(/r0) mapping. */
438 uint32_t cTlbMissesMapping;
439#endif
440 /** Alignment padding. */
441 uint32_t au32Padding[3+5];
442} IEMTLB;
443AssertCompileSizeAlignment(IEMTLB, 64);
444/** IEMTLB::uTlbRevision increment. */
445#define IEMTLB_REVISION_INCR RT_BIT_64(36)
446/** IEMTLB::uTlbPhysRev increment. */
447#define IEMTLB_PHYS_REV_INCR RT_BIT_64(8)
448
449
450/**
451 * The per-CPU IEM state.
452 */
453typedef struct IEMCPU
454{
455 /** Info status code that needs to be propagated to the IEM caller.
456 * This cannot be passed internally, as it would complicate all success
457 * checks within the interpreter making the code larger and almost impossible
458 * to get right. Instead, we'll store status codes to pass on here. Each
459 * source of these codes will perform appropriate sanity checks. */
460 int32_t rcPassUp; /* 0x00 */
461
462 /** The current CPU execution mode (CS). */
463 IEMMODE enmCpuMode; /* 0x04 */
464 /** The CPL. */
465 uint8_t uCpl; /* 0x05 */
466
467 /** Whether to bypass access handlers or not. */
468 bool fBypassHandlers; /* 0x06 */
469 /** Indicates that we're interpreting patch code - RC only! */
470 bool fInPatchCode; /* 0x07 */
471
472 /** @name Decoder state.
473 * @{ */
474#ifdef IEM_WITH_CODE_TLB
475 /** The offset of the next instruction byte. */
476 uint32_t offInstrNextByte; /* 0x08 */
477 /** The number of bytes available at pbInstrBuf for the current instruction.
478 * This takes the max opcode length into account so that doesn't need to be
479 * checked separately. */
480 uint32_t cbInstrBuf; /* 0x0c */
481 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
482 * This can be NULL if the page isn't mappable for some reason, in which
483 * case we'll do fallback stuff.
484 *
485 * If we're executing an instruction from a user specified buffer,
486 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
487 * aligned pointer but pointer to the user data.
488 *
489 * For instructions crossing pages, this will start on the first page and be
490 * advanced to the next page by the time we've decoded the instruction. This
491 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
492 */
493 uint8_t const *pbInstrBuf; /* 0x10 */
494# if ARCH_BITS == 32
495 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
496# endif
497 /** The program counter corresponding to pbInstrBuf.
498 * This is set to a non-canonical address when we need to invalidate it. */
499 uint64_t uInstrBufPc; /* 0x18 */
500 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
501 * This takes the CS segment limit into account. */
502 uint16_t cbInstrBufTotal; /* 0x20 */
503 /** Offset into pbInstrBuf of the first byte of the current instruction.
504 * Can be negative to efficiently handle cross page instructions. */
505 int16_t offCurInstrStart; /* 0x22 */
506
507 /** The prefix mask (IEM_OP_PRF_XXX). */
508 uint32_t fPrefixes; /* 0x24 */
509 /** The extra REX ModR/M register field bit (REX.R << 3). */
510 uint8_t uRexReg; /* 0x28 */
511 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
512 * (REX.B << 3). */
513 uint8_t uRexB; /* 0x29 */
514 /** The extra REX SIB index field bit (REX.X << 3). */
515 uint8_t uRexIndex; /* 0x2a */
516
517 /** The effective segment register (X86_SREG_XXX). */
518 uint8_t iEffSeg; /* 0x2b */
519
520#else
521 /** The size of what has currently been fetched into abOpcode. */
522 uint8_t cbOpcode; /* 0x08 */
523 /** The current offset into abOpcode. */
524 uint8_t offOpcode; /* 0x09 */
525
526 /** The effective segment register (X86_SREG_XXX). */
527 uint8_t iEffSeg; /* 0x0a */
528
529 /** The extra REX ModR/M register field bit (REX.R << 3). */
530 uint8_t uRexReg; /* 0x0b */
531 /** The prefix mask (IEM_OP_PRF_XXX). */
532 uint32_t fPrefixes; /* 0x0c */
533 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
534 * (REX.B << 3). */
535 uint8_t uRexB; /* 0x10 */
536 /** The extra REX SIB index field bit (REX.X << 3). */
537 uint8_t uRexIndex; /* 0x11 */
538
539#endif
540
541 /** The effective operand mode. */
542 IEMMODE enmEffOpSize; /* 0x2c, 0x12 */
543 /** The default addressing mode. */
544 IEMMODE enmDefAddrMode; /* 0x2d, 0x13 */
545 /** The effective addressing mode. */
546 IEMMODE enmEffAddrMode; /* 0x2e, 0x14 */
547 /** The default operand mode. */
548 IEMMODE enmDefOpSize; /* 0x2f, 0x15 */
549
550 /** Prefix index (VEX.pp) for two byte and three byte tables. */
551 uint8_t idxPrefix; /* 0x30, 0x16 */
552 /** 3rd VEX/EVEX/XOP register.
553 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
554 uint8_t uVex3rdReg; /* 0x31, 0x17 */
555 /** The VEX/EVEX/XOP length field. */
556 uint8_t uVexLength; /* 0x32, 0x18 */
557 /** Additional EVEX stuff. */
558 uint8_t fEvexStuff; /* 0x33, 0x19 */
559
560 /** The FPU opcode (FOP). */
561 uint16_t uFpuOpcode; /* 0x34, 0x1a */
562
563 /** Explicit alignment padding. */
564#ifdef IEM_WITH_CODE_TLB
565 uint8_t abAlignment2a[2]; /* 0x36 */
566#endif
567
568 /** The opcode bytes. */
569 uint8_t abOpcode[15]; /* 0x48, 0x1c */
570 /** Explicit alignment padding. */
571#ifdef IEM_WITH_CODE_TLB
572 uint8_t abAlignment2c[0x48 - 0x47]; /* 0x37 */
573#else
574 uint8_t abAlignment2c[0x48 - 0x2b]; /* 0x2b */
575#endif
576 /** @} */
577
578
579 /** The flags of the current exception / interrupt. */
580 uint32_t fCurXcpt; /* 0x48, 0x48 */
581 /** The current exception / interrupt. */
582 uint8_t uCurXcpt;
583 /** Exception / interrupt recursion depth. */
584 int8_t cXcptRecursions;
585
586 /** The number of active guest memory mappings. */
587 uint8_t cActiveMappings;
588 /** The next unused mapping index. */
589 uint8_t iNextMapping;
590 /** Records for tracking guest memory mappings. */
591 struct
592 {
593 /** The address of the mapped bytes. */
594 void *pv;
595#if defined(IN_RC) && HC_ARCH_BITS == 64
596 uint32_t u32Alignment3; /**< Alignment padding. */
597#endif
598 /** The access flags (IEM_ACCESS_XXX).
599 * IEM_ACCESS_INVALID if the entry is unused. */
600 uint32_t fAccess;
601#if HC_ARCH_BITS == 64
602 uint32_t u32Alignment4; /**< Alignment padding. */
603#endif
604 } aMemMappings[3];
605
606 /** Locking records for the mapped memory. */
607 union
608 {
609 PGMPAGEMAPLOCK Lock;
610 uint64_t au64Padding[2];
611 } aMemMappingLocks[3];
612
613 /** Bounce buffer info.
614 * This runs in parallel to aMemMappings. */
615 struct
616 {
617 /** The physical address of the first byte. */
618 RTGCPHYS GCPhysFirst;
619 /** The physical address of the second page. */
620 RTGCPHYS GCPhysSecond;
621 /** The number of bytes in the first page. */
622 uint16_t cbFirst;
623 /** The number of bytes in the second page. */
624 uint16_t cbSecond;
625 /** Whether it's unassigned memory. */
626 bool fUnassigned;
627 /** Explicit alignment padding. */
628 bool afAlignment5[3];
629 } aMemBbMappings[3];
630
631 /** Bounce buffer storage.
632 * This runs in parallel to aMemMappings and aMemBbMappings. */
633 struct
634 {
635 uint8_t ab[512];
636 } aBounceBuffers[3];
637
638
639 /** Pointer set jump buffer - ring-3 context. */
640 R3PTRTYPE(jmp_buf *) pJmpBufR3;
641 /** Pointer set jump buffer - ring-0 context. */
642 R0PTRTYPE(jmp_buf *) pJmpBufR0;
643 /** Pointer set jump buffer - raw-mode context. */
644 RCPTRTYPE(jmp_buf *) pJmpBufRC;
645
646 /** @todo Should move this near @a fCurXcpt later. */
647 /** The error code for the current exception / interrupt. */
648 uint32_t uCurXcptErr;
649 /** The CR2 for the current exception / interrupt. */
650 uint64_t uCurXcptCr2;
651
652 /** @name Statistics
653 * @{ */
654 /** The number of instructions we've executed. */
655 uint32_t cInstructions;
656 /** The number of potential exits. */
657 uint32_t cPotentialExits;
658 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
659 * This may contain uncommitted writes. */
660 uint32_t cbWritten;
661 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
662 uint32_t cRetInstrNotImplemented;
663 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
664 uint32_t cRetAspectNotImplemented;
665 /** Counts informational statuses returned (other than VINF_SUCCESS). */
666 uint32_t cRetInfStatuses;
667 /** Counts other error statuses returned. */
668 uint32_t cRetErrStatuses;
669 /** Number of times rcPassUp has been used. */
670 uint32_t cRetPassUpStatus;
671 /** Number of times RZ left with instruction commit pending for ring-3. */
672 uint32_t cPendingCommit;
673 /** Number of long jumps. */
674 uint32_t cLongJumps;
675 uint32_t uAlignment6; /**< Alignment padding. */
676#ifdef IEM_VERIFICATION_MODE_FULL
677 /** The Number of I/O port reads that has been performed. */
678 uint32_t cIOReads;
679 /** The Number of I/O port writes that has been performed. */
680 uint32_t cIOWrites;
681 /** Set if no comparison to REM is currently performed.
682 * This is used to skip past really slow bits. */
683 bool fNoRem;
684 /** Saved fNoRem flag used by #iemInitExec and #iemUninitExec. */
685 bool fNoRemSavedByExec;
686 /** Indicates that RAX and RDX differences should be ignored since RDTSC
687 * and RDTSCP are timing sensitive. */
688 bool fIgnoreRaxRdx;
689 /** Indicates that a MOVS instruction with overlapping source and destination
690 * was executed, causing the memory write records to be incorrrect. */
691 bool fOverlappingMovs;
692 /** Set if there are problematic memory accesses (MMIO, write monitored, ++). */
693 bool fProblematicMemory;
694 /** This is used to communicate a CPL changed caused by IEMInjectTrap that
695 * CPUM doesn't yet reflect. */
696 uint8_t uInjectCpl;
697 /** To prevent EMR3HmSingleInstruction from triggering endless recursion via
698 * emR3ExecuteInstruction and iemExecVerificationModeCheck. */
699 uint8_t cVerifyDepth;
700 bool afAlignment7[2];
701 /** Mask of undefined eflags.
702 * The verifier will any difference in these flags. */
703 uint32_t fUndefinedEFlags;
704 /** The CS of the instruction being interpreted. */
705 RTSEL uOldCs;
706 /** The RIP of the instruction being interpreted. */
707 uint64_t uOldRip;
708 /** The physical address corresponding to abOpcodes[0]. */
709 RTGCPHYS GCPhysOpcodes;
710#endif
711 /** @} */
712
713 /** @name Target CPU information.
714 * @{ */
715#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
716 /** The target CPU. */
717 uint32_t uTargetCpu;
718#else
719 uint32_t u32TargetCpuPadding;
720#endif
721 /** The CPU vendor. */
722 CPUMCPUVENDOR enmCpuVendor;
723 /** @} */
724
725 /** @name Host CPU information.
726 * @{ */
727 /** The CPU vendor. */
728 CPUMCPUVENDOR enmHostCpuVendor;
729 /** @} */
730
731 uint32_t au32Alignment8[HC_ARCH_BITS == 64 ? 4 + 8 : 4]; /**< Alignment padding. */
732
733 /** Data TLB.
734 * @remarks Must be 64-byte aligned. */
735 IEMTLB DataTlb;
736 /** Instruction TLB.
737 * @remarks Must be 64-byte aligned. */
738 IEMTLB CodeTlb;
739
740 /** Pointer to the CPU context - ring-3 context.
741 * @todo put inside IEM_VERIFICATION_MODE_FULL++. */
742 R3PTRTYPE(PCPUMCTX) pCtxR3;
743 /** Pointer to the CPU context - ring-0 context. */
744 R0PTRTYPE(PCPUMCTX) pCtxR0;
745 /** Pointer to the CPU context - raw-mode context. */
746 RCPTRTYPE(PCPUMCTX) pCtxRC;
747
748 /** Pointer to instruction statistics for raw-mode context (same as R0). */
749 RCPTRTYPE(PIEMINSTRSTATS) pStatsRC;
750 /** Pointer to instruction statistics for ring-0 context (same as RC). */
751 R0PTRTYPE(PIEMINSTRSTATS) pStatsR0;
752 /** Pointer to instruction statistics for non-ring-3 code. */
753 R3PTRTYPE(PIEMINSTRSTATS) pStatsCCR3;
754 /** Pointer to instruction statistics for ring-3 context. */
755 R3PTRTYPE(PIEMINSTRSTATS) pStatsR3;
756
757#ifdef IEM_VERIFICATION_MODE_FULL
758 /** The event verification records for what IEM did (LIFO). */
759 R3PTRTYPE(PIEMVERIFYEVTREC) pIemEvtRecHead;
760 /** Insertion point for pIemEvtRecHead. */
761 R3PTRTYPE(PIEMVERIFYEVTREC *) ppIemEvtRecNext;
762 /** The event verification records for what the other party did (FIFO). */
763 R3PTRTYPE(PIEMVERIFYEVTREC) pOtherEvtRecHead;
764 /** Insertion point for pOtherEvtRecHead. */
765 R3PTRTYPE(PIEMVERIFYEVTREC *) ppOtherEvtRecNext;
766 /** List of free event records. */
767 R3PTRTYPE(PIEMVERIFYEVTREC) pFreeEvtRec;
768#endif
769} IEMCPU;
770AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48);
771AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
772AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
773/** Pointer to the per-CPU IEM state. */
774typedef IEMCPU *PIEMCPU;
775/** Pointer to the const per-CPU IEM state. */
776typedef IEMCPU const *PCIEMCPU;
777
778
779/** @def IEM_GET_CTX
780 * Gets the guest CPU context for the calling EMT.
781 * @returns PCPUMCTX
782 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
783 */
784#if !defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE) \
785 && !defined(IEM_VERIFICATION_MODE_MINIMAL) && defined(VMCPU_INCL_CPUM_GST_CTX)
786# define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
787#else
788# define IEM_GET_CTX(a_pVCpu) ((a_pVCpu)->iem.s.CTX_SUFF(pCtx))
789#endif
790
791/** @def IEM_CTX_ASSERT
792 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
793 * @param a_pCtx The CPUMCTX structure.
794 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
795 */
796#define IEM_CTX_ASSERT(a_pCtx, a_fExtrnMbz) Assert(!((a_pCtx)->fExtrn & (a_fExtrnMbz)))
797
798/** @def IEM_CTX_IMPORT_RET
799 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
800 *
801 * Will call the keep to import the bits as needed.
802 *
803 * Returns on import failure.
804 *
805 * @param a_pVCpu The cross context virtual CPU structure.
806 * @param a_pCtx The CPUMCTX structure.
807 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
808 */
809#define IEM_CTX_IMPORT_RET(a_pVCpu, a_pCtx, a_fExtrnImport) \
810 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \
811 { /* likely */ } \
812 else do { \
813 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
814 AssertRCReturn(rcCtxImport, rcCtxImport); \
815 } while (0)
816
817/** @def IEM_CTX_IMPORT_NORET
818 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
819 *
820 * Will call the keep to import the bits as needed.
821 *
822 * @param a_pVCpu The cross context virtual CPU structure.
823 * @param a_pCtx The CPUMCTX structure.
824 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
825 */
826#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_pCtx, a_fExtrnImport) \
827 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \
828 { /* likely */ } \
829 else do { \
830 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
831 AssertLogRelRC(rcCtxImport); \
832 } while (0)
833
834/** @def IEM_CTX_IMPORT_JMP
835 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
836 *
837 * Will call the keep to import the bits as needed.
838 *
839 * Jumps on import failure.
840 *
841 * @param a_pVCpu The cross context virtual CPU structure.
842 * @param a_pCtx The CPUMCTX structure.
843 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
844 */
845#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_pCtx, a_fExtrnImport) \
846 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \
847 { /* likely */ } \
848 else do { \
849 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
850 AssertRCStmt(rcCtxImport, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), rcCtxImport)); \
851 } while (0)
852
853int iemCtxImport(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fExtrnImport);
854
855
856/** Gets the current IEMTARGETCPU value.
857 * @returns IEMTARGETCPU value.
858 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
859 */
860#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
861# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
862#else
863# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
864#endif
865
866/** @def Gets the instruction length. */
867#ifdef IEM_WITH_CODE_TLB
868# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
869#else
870# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
871#endif
872
873
874/** @name IEM_ACCESS_XXX - Access details.
875 * @{ */
876#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
877#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
878#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
879#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
880#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
881#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
882#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
883#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
884#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
885#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
886/** The writes are partial, so if initialize the bounce buffer with the
887 * orignal RAM content. */
888#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
889/** Used in aMemMappings to indicate that the entry is bounce buffered. */
890#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
891/** Bounce buffer with ring-3 write pending, first page. */
892#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
893/** Bounce buffer with ring-3 write pending, second page. */
894#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
895/** Valid bit mask. */
896#define IEM_ACCESS_VALID_MASK UINT32_C(0x00000fff)
897/** Read+write data alias. */
898#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
899/** Write data alias. */
900#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
901/** Read data alias. */
902#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
903/** Instruction fetch alias. */
904#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
905/** Stack write alias. */
906#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
907/** Stack read alias. */
908#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
909/** Stack read+write alias. */
910#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
911/** Read system table alias. */
912#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
913/** Read+write system table alias. */
914#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
915/** @} */
916
917/** @name Prefix constants (IEMCPU::fPrefixes)
918 * @{ */
919#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
920#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
921#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
922#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
923#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
924#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
925#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
926
927#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
928#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
929#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
930
931#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
932#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
933#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
934
935#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
936#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
937#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
938#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
939/** Mask with all the REX prefix flags.
940 * This is generally for use when needing to undo the REX prefixes when they
941 * are followed legacy prefixes and therefore does not immediately preceed
942 * the first opcode byte.
943 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
944#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
945
946#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
947#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
948#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
949/** @} */
950
951/** @name IEMOPFORM_XXX - Opcode forms
952 * @note These are ORed together with IEMOPHINT_XXX.
953 * @{ */
954/** ModR/M: reg, r/m */
955#define IEMOPFORM_RM 0
956/** ModR/M: reg, r/m (register) */
957#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
958/** ModR/M: reg, r/m (memory) */
959#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
960/** ModR/M: r/m, reg */
961#define IEMOPFORM_MR 1
962/** ModR/M: r/m (register), reg */
963#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
964/** ModR/M: r/m (memory), reg */
965#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
966/** ModR/M: r/m only */
967#define IEMOPFORM_M 2
968/** ModR/M: r/m only (register). */
969#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
970/** ModR/M: r/m only (memory). */
971#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
972/** ModR/M: reg only */
973#define IEMOPFORM_R 3
974
975/** VEX+ModR/M: reg, r/m */
976#define IEMOPFORM_VEX_RM 4
977/** VEX+ModR/M: reg, r/m (register) */
978#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
979/** VEX+ModR/M: reg, r/m (memory) */
980#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
981/** VEX+ModR/M: r/m, reg */
982#define IEMOPFORM_VEX_MR 5
983/** VEX+ModR/M: r/m (register), reg */
984#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
985/** VEX+ModR/M: r/m (memory), reg */
986#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
987/** VEX+ModR/M: r/m only */
988#define IEMOPFORM_VEX_M 6
989/** VEX+ModR/M: r/m only (register). */
990#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
991/** VEX+ModR/M: r/m only (memory). */
992#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
993/** VEX+ModR/M: reg only */
994#define IEMOPFORM_VEX_R 7
995/** VEX+ModR/M: reg, vvvv, r/m */
996#define IEMOPFORM_VEX_RVM 8
997/** VEX+ModR/M: reg, vvvv, r/m (register). */
998#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
999/** VEX+ModR/M: reg, vvvv, r/m (memory). */
1000#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
1001/** VEX+ModR/M: r/m, vvvv, reg */
1002#define IEMOPFORM_VEX_MVR 9
1003/** VEX+ModR/M: r/m, vvvv, reg (register) */
1004#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
1005/** VEX+ModR/M: r/m, vvvv, reg (memory) */
1006#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
1007
1008/** Fixed register instruction, no R/M. */
1009#define IEMOPFORM_FIXED 16
1010
1011/** The r/m is a register. */
1012#define IEMOPFORM_MOD3 RT_BIT_32(8)
1013/** The r/m is a memory access. */
1014#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
1015/** @} */
1016
1017/** @name IEMOPHINT_XXX - Additional Opcode Hints
1018 * @note These are ORed together with IEMOPFORM_XXX.
1019 * @{ */
1020/** Ignores the operand size prefix (66h). */
1021#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
1022/** Ignores REX.W (aka WIG). */
1023#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
1024/** Both the operand size prefixes (66h + REX.W) are ignored. */
1025#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
1026/** Allowed with the lock prefix. */
1027#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
1028/** The VEX.L value is ignored (aka LIG). */
1029#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
1030/** The VEX.L value must be zero (i.e. 128-bit width only). */
1031#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
1032
1033/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
1034#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
1035/** @} */
1036
1037/**
1038 * Possible hardware task switch sources.
1039 */
1040typedef enum IEMTASKSWITCH
1041{
1042 /** Task switch caused by an interrupt/exception. */
1043 IEMTASKSWITCH_INT_XCPT = 1,
1044 /** Task switch caused by a far CALL. */
1045 IEMTASKSWITCH_CALL,
1046 /** Task switch caused by a far JMP. */
1047 IEMTASKSWITCH_JUMP,
1048 /** Task switch caused by an IRET. */
1049 IEMTASKSWITCH_IRET
1050} IEMTASKSWITCH;
1051AssertCompileSize(IEMTASKSWITCH, 4);
1052
1053/**
1054 * Possible CrX load (write) sources.
1055 */
1056typedef enum IEMACCESSCRX
1057{
1058 /** CrX access caused by 'mov crX' instruction. */
1059 IEMACCESSCRX_MOV_CRX,
1060 /** CrX (CR0) write caused by 'lmsw' instruction. */
1061 IEMACCESSCRX_LMSW,
1062 /** CrX (CR0) write caused by 'clts' instruction. */
1063 IEMACCESSCRX_CLTS,
1064 /** CrX (CR0) read caused by 'smsw' instruction. */
1065 IEMACCESSCRX_SMSW
1066} IEMACCESSCRX;
1067
1068/**
1069 * Tests if verification mode is enabled.
1070 *
1071 * This expands to @c false when IEM_VERIFICATION_MODE is not defined and
1072 * should therefore cause the compiler to eliminate the verification branch
1073 * of an if statement. */
1074#ifdef IEM_VERIFICATION_MODE_FULL
1075# define IEM_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)
1076#elif defined(IEM_VERIFICATION_MODE_MINIMAL)
1077# define IEM_VERIFICATION_ENABLED(a_pVCpu) (true)
1078#else
1079# define IEM_VERIFICATION_ENABLED(a_pVCpu) (false)
1080#endif
1081
1082/**
1083 * Tests if full verification mode is enabled.
1084 *
1085 * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and
1086 * should therefore cause the compiler to eliminate the verification branch
1087 * of an if statement. */
1088#ifdef IEM_VERIFICATION_MODE_FULL
1089# define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)
1090#else
1091# define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (false)
1092#endif
1093
1094/**
1095 * Tests if full verification mode is enabled again REM.
1096 *
1097 * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and
1098 * should therefore cause the compiler to eliminate the verification branch
1099 * of an if statement. */
1100#ifdef IEM_VERIFICATION_MODE_FULL
1101# ifdef IEM_VERIFICATION_MODE_FULL_HM
1102# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem && !HMIsEnabled((a_pVCpu)->CTX_SUFF(pVM)))
1103# else
1104# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)
1105# endif
1106#else
1107# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (false)
1108#endif
1109
1110/** @def IEM_VERIFICATION_MODE
1111 * Indicates that one of the verfication modes are enabled.
1112 */
1113#if (defined(IEM_VERIFICATION_MODE_FULL) || defined(IEM_VERIFICATION_MODE_MINIMAL)) && !defined(IEM_VERIFICATION_MODE) \
1114 || defined(DOXYGEN_RUNNING)
1115# define IEM_VERIFICATION_MODE
1116#endif
1117
1118/**
1119 * Indicates to the verifier that the given flag set is undefined.
1120 *
1121 * Can be invoked again to add more flags.
1122 *
1123 * This is a NOOP if the verifier isn't compiled in.
1124 */
1125#ifdef IEM_VERIFICATION_MODE_FULL
1126# define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { pVCpu->iem.s.fUndefinedEFlags |= (a_fEfl); } while (0)
1127#else
1128# define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
1129#endif
1130
1131
1132/** @def IEM_DECL_IMPL_TYPE
1133 * For typedef'ing an instruction implementation function.
1134 *
1135 * @param a_RetType The return type.
1136 * @param a_Name The name of the type.
1137 * @param a_ArgList The argument list enclosed in parentheses.
1138 */
1139
1140/** @def IEM_DECL_IMPL_DEF
1141 * For defining an instruction implementation function.
1142 *
1143 * @param a_RetType The return type.
1144 * @param a_Name The name of the type.
1145 * @param a_ArgList The argument list enclosed in parentheses.
1146 */
1147
1148#if defined(__GNUC__) && defined(RT_ARCH_X86)
1149# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
1150 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
1151# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
1152 __attribute__((__fastcall__, __nothrow__)) a_RetType a_Name a_ArgList
1153
1154#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
1155# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
1156 a_RetType (__fastcall a_Name) a_ArgList
1157# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
1158 a_RetType __fastcall a_Name a_ArgList
1159
1160#else
1161# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
1162 a_RetType (VBOXCALL a_Name) a_ArgList
1163# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
1164 a_RetType VBOXCALL a_Name a_ArgList
1165
1166#endif
1167
1168/** @name Arithmetic assignment operations on bytes (binary).
1169 * @{ */
1170typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
1171typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
1172FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
1173FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
1174FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
1175FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
1176FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
1177FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
1178FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
1179/** @} */
1180
1181/** @name Arithmetic assignment operations on words (binary).
1182 * @{ */
1183typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
1184typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
1185FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
1186FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
1187FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
1188FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
1189FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
1190FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
1191FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
1192/** @} */
1193
1194/** @name Arithmetic assignment operations on double words (binary).
1195 * @{ */
1196typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
1197typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
1198FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
1199FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
1200FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
1201FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
1202FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
1203FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
1204FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
1205/** @} */
1206
1207/** @name Arithmetic assignment operations on quad words (binary).
1208 * @{ */
1209typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
1210typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
1211FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
1212FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
1213FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
1214FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
1215FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
1216FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
1217FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
1218/** @} */
1219
1220/** @name Compare operations (thrown in with the binary ops).
1221 * @{ */
1222FNIEMAIMPLBINU8 iemAImpl_cmp_u8;
1223FNIEMAIMPLBINU16 iemAImpl_cmp_u16;
1224FNIEMAIMPLBINU32 iemAImpl_cmp_u32;
1225FNIEMAIMPLBINU64 iemAImpl_cmp_u64;
1226/** @} */
1227
1228/** @name Test operations (thrown in with the binary ops).
1229 * @{ */
1230FNIEMAIMPLBINU8 iemAImpl_test_u8;
1231FNIEMAIMPLBINU16 iemAImpl_test_u16;
1232FNIEMAIMPLBINU32 iemAImpl_test_u32;
1233FNIEMAIMPLBINU64 iemAImpl_test_u64;
1234/** @} */
1235
1236/** @name Bit operations operations (thrown in with the binary ops).
1237 * @{ */
1238FNIEMAIMPLBINU16 iemAImpl_bt_u16, iemAImpl_bt_u16_locked;
1239FNIEMAIMPLBINU32 iemAImpl_bt_u32, iemAImpl_bt_u32_locked;
1240FNIEMAIMPLBINU64 iemAImpl_bt_u64, iemAImpl_bt_u64_locked;
1241FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
1242FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
1243FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
1244FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
1245FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
1246FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
1247FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
1248FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
1249FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
1250/** @} */
1251
1252/** @name Exchange memory with register operations.
1253 * @{ */
1254IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
1255IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
1256IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
1257IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
1258/** @} */
1259
1260/** @name Exchange and add operations.
1261 * @{ */
1262IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
1263IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
1264IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
1265IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
1266IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
1267IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
1268IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
1269IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
1270/** @} */
1271
1272/** @name Compare and exchange.
1273 * @{ */
1274IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
1275IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
1276IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
1277IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
1278IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
1279IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
1280#ifdef RT_ARCH_X86
1281IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
1282IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
1283#else
1284IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
1285IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
1286#endif
1287IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
1288 uint32_t *pEFlags));
1289IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
1290 uint32_t *pEFlags));
1291IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
1292 uint32_t *pEFlags));
1293IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
1294 uint32_t *pEFlags));
1295IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
1296 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
1297/** @} */
1298
1299/** @name Memory ordering
1300 * @{ */
1301typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
1302typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
1303IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
1304IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
1305IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
1306IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
1307/** @} */
1308
1309/** @name Double precision shifts
1310 * @{ */
1311typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
1312typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
1313typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
1314typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
1315typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
1316typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
1317FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16;
1318FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32;
1319FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64;
1320FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16;
1321FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32;
1322FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64;
1323/** @} */
1324
1325
1326/** @name Bit search operations (thrown in with the binary ops).
1327 * @{ */
1328FNIEMAIMPLBINU16 iemAImpl_bsf_u16;
1329FNIEMAIMPLBINU32 iemAImpl_bsf_u32;
1330FNIEMAIMPLBINU64 iemAImpl_bsf_u64;
1331FNIEMAIMPLBINU16 iemAImpl_bsr_u16;
1332FNIEMAIMPLBINU32 iemAImpl_bsr_u32;
1333FNIEMAIMPLBINU64 iemAImpl_bsr_u64;
1334/** @} */
1335
1336/** @name Signed multiplication operations (thrown in with the binary ops).
1337 * @{ */
1338FNIEMAIMPLBINU16 iemAImpl_imul_two_u16;
1339FNIEMAIMPLBINU32 iemAImpl_imul_two_u32;
1340FNIEMAIMPLBINU64 iemAImpl_imul_two_u64;
1341/** @} */
1342
1343/** @name Arithmetic assignment operations on bytes (unary).
1344 * @{ */
1345typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
1346typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
1347FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
1348FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
1349FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
1350FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
1351/** @} */
1352
1353/** @name Arithmetic assignment operations on words (unary).
1354 * @{ */
1355typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
1356typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
1357FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
1358FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
1359FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
1360FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
1361/** @} */
1362
1363/** @name Arithmetic assignment operations on double words (unary).
1364 * @{ */
1365typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
1366typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
1367FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
1368FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
1369FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
1370FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
1371/** @} */
1372
1373/** @name Arithmetic assignment operations on quad words (unary).
1374 * @{ */
1375typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
1376typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
1377FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
1378FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
1379FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
1380FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
1381/** @} */
1382
1383
1384/** @name Shift operations on bytes (Group 2).
1385 * @{ */
1386typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
1387typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
1388FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8;
1389FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8;
1390FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8;
1391FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8;
1392FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8;
1393FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8;
1394FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8;
1395/** @} */
1396
1397/** @name Shift operations on words (Group 2).
1398 * @{ */
1399typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
1400typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
1401FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16;
1402FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16;
1403FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16;
1404FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16;
1405FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16;
1406FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16;
1407FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16;
1408/** @} */
1409
1410/** @name Shift operations on double words (Group 2).
1411 * @{ */
1412typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
1413typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
1414FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32;
1415FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32;
1416FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32;
1417FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32;
1418FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32;
1419FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32;
1420FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32;
1421/** @} */
1422
1423/** @name Shift operations on words (Group 2).
1424 * @{ */
1425typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
1426typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
1427FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64;
1428FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64;
1429FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64;
1430FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64;
1431FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64;
1432FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64;
1433FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64;
1434/** @} */
1435
1436/** @name Multiplication and division operations.
1437 * @{ */
1438typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
1439typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
1440FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_imul_u8;
1441FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_idiv_u8;
1442
1443typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
1444typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
1445FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_imul_u16;
1446FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_idiv_u16;
1447
1448typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
1449typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
1450FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_imul_u32;
1451FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_idiv_u32;
1452
1453typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
1454typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
1455FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_imul_u64;
1456FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_idiv_u64;
1457/** @} */
1458
1459/** @name Byte Swap.
1460 * @{ */
1461IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
1462IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
1463IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
1464/** @} */
1465
1466/** @name Misc.
1467 * @{ */
1468FNIEMAIMPLBINU16 iemAImpl_arpl;
1469/** @} */
1470
1471
1472/** @name FPU operations taking a 32-bit float argument
1473 * @{ */
1474typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
1475 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
1476typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
1477
1478typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1479 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
1480typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
1481
1482FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
1483FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
1484FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
1485FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
1486FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
1487FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
1488FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
1489
1490IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r32_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
1491IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1492 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
1493/** @} */
1494
1495/** @name FPU operations taking a 64-bit float argument
1496 * @{ */
1497typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1498 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
1499typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
1500
1501FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
1502FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
1503FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
1504FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
1505FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
1506FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
1507
1508IEM_DECL_IMPL_DEF(void, iemAImpl_fcom_r80_by_r64,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
1509 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
1510IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r64_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
1511IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1512 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
1513/** @} */
1514
1515/** @name FPU operations taking a 80-bit float argument
1516 * @{ */
1517typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1518 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
1519typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
1520FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
1521FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
1522FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
1523FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
1524FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
1525FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
1526FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
1527FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
1528FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
1529
1530FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80;
1531FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80;
1532FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80;
1533
1534typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
1535 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
1536typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
1537FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
1538FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
1539
1540typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1541 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
1542typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
1543FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
1544FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
1545
1546typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
1547typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
1548FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
1549FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
1550FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80;
1551FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
1552FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
1553FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80;
1554FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80;
1555
1556typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
1557typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
1558FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
1559FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
1560
1561typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
1562typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
1563FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
1564FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
1565FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
1566FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
1567FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
1568FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
1569FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
1570
1571typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
1572 PCRTFLOAT80U pr80Val));
1573typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
1574FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80;
1575FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
1576FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80;
1577
1578IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
1579IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1580 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
1581
1582/** @} */
1583
1584/** @name FPU operations taking a 16-bit signed integer argument
1585 * @{ */
1586typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1587 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
1588typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
1589
1590FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
1591FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
1592FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
1593FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
1594FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
1595FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
1596
1597IEM_DECL_IMPL_DEF(void, iemAImpl_ficom_r80_by_i16,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1598 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
1599
1600IEM_DECL_IMPL_DEF(void, iemAImpl_fild_i16_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
1601IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i16,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1602 int16_t *pi16Val, PCRTFLOAT80U pr80Val));
1603IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i16,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1604 int16_t *pi16Val, PCRTFLOAT80U pr80Val));
1605/** @} */
1606
1607/** @name FPU operations taking a 32-bit signed integer argument
1608 * @{ */
1609typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1610 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
1611typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
1612
1613FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
1614FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
1615FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
1616FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
1617FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
1618FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
1619
1620IEM_DECL_IMPL_DEF(void, iemAImpl_ficom_r80_by_i32,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1621 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
1622
1623IEM_DECL_IMPL_DEF(void, iemAImpl_fild_i32_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
1624IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1625 int32_t *pi32Val, PCRTFLOAT80U pr80Val));
1626IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1627 int32_t *pi32Val, PCRTFLOAT80U pr80Val));
1628/** @} */
1629
1630/** @name FPU operations taking a 64-bit signed integer argument
1631 * @{ */
1632typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1633 PCRTFLOAT80U pr80Val1, int64_t const *pi64Val2));
1634typedef FNIEMAIMPLFPUI64 *PFNIEMAIMPLFPUI64;
1635
1636FNIEMAIMPLFPUI64 iemAImpl_fiadd_r80_by_i64;
1637FNIEMAIMPLFPUI64 iemAImpl_fimul_r80_by_i64;
1638FNIEMAIMPLFPUI64 iemAImpl_fisub_r80_by_i64;
1639FNIEMAIMPLFPUI64 iemAImpl_fisubr_r80_by_i64;
1640FNIEMAIMPLFPUI64 iemAImpl_fidiv_r80_by_i64;
1641FNIEMAIMPLFPUI64 iemAImpl_fidivr_r80_by_i64;
1642
1643IEM_DECL_IMPL_DEF(void, iemAImpl_ficom_r80_by_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1644 PCRTFLOAT80U pr80Val1, int64_t const *pi64Val2));
1645
1646IEM_DECL_IMPL_DEF(void, iemAImpl_fild_i64_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
1647IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1648 int64_t *pi64Val, PCRTFLOAT80U pr80Val));
1649IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1650 int64_t *pi32Val, PCRTFLOAT80U pr80Val));
1651/** @} */
1652
1653
1654/** Temporary type representing a 256-bit vector register. */
1655typedef struct {uint64_t au64[4]; } IEMVMM256;
1656/** Temporary type pointing to a 256-bit vector register. */
1657typedef IEMVMM256 *PIEMVMM256;
1658/** Temporary type pointing to a const 256-bit vector register. */
1659typedef IEMVMM256 *PCIEMVMM256;
1660
1661
1662/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
1663 * @{ */
1664typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
1665typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
1666typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst, PCRTUINT128U pu128Src));
1667typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
1668FNIEMAIMPLMEDIAF2U64 iemAImpl_pxor_u64, iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
1669FNIEMAIMPLMEDIAF2U128 iemAImpl_pxor_u128, iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
1670/** @} */
1671
1672/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
1673 * @{ */
1674typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint32_t const *pu32Src));
1675typedef FNIEMAIMPLMEDIAF1L1U64 *PFNIEMAIMPLMEDIAF1L1U64;
1676typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U128,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst, uint64_t const *pu64Src));
1677typedef FNIEMAIMPLMEDIAF1L1U128 *PFNIEMAIMPLMEDIAF1L1U128;
1678FNIEMAIMPLMEDIAF1L1U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
1679FNIEMAIMPLMEDIAF1L1U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
1680/** @} */
1681
1682/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
1683 * @{ */
1684typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
1685typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF1H1U64;
1686typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U128,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst, PCRTUINT128U pu128Src));
1687typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF1H1U128;
1688FNIEMAIMPLMEDIAF1H1U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
1689FNIEMAIMPLMEDIAF1H1U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
1690/** @} */
1691
1692/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
1693 * @{ */
1694typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUF,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst,
1695 PCRTUINT128U pu128Src, uint8_t bEvil));
1696typedef FNIEMAIMPLMEDIAPSHUF *PFNIEMAIMPLMEDIAPSHUF;
1697FNIEMAIMPLMEDIAPSHUF iemAImpl_pshufhw, iemAImpl_pshuflw, iemAImpl_pshufd;
1698IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src, uint8_t bEvil));
1699/** @} */
1700
1701/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
1702 * @{ */
1703IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
1704IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, PCRTUINT128U pu128Src));
1705/** @} */
1706
1707/** @name Media (SSE/MMX/AVX) operation: Sort this later
1708 * @{ */
1709IEM_DECL_IMPL_DEF(void, iemAImpl_movsldup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
1710IEM_DECL_IMPL_DEF(void, iemAImpl_movshdup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
1711IEM_DECL_IMPL_DEF(void, iemAImpl_movddup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, uint64_t uSrc));
1712
1713IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
1714IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
1715IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
1716IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
1717
1718/** @} */
1719
1720
1721/** @name Function tables.
1722 * @{
1723 */
1724
1725/**
1726 * Function table for a binary operator providing implementation based on
1727 * operand size.
1728 */
1729typedef struct IEMOPBINSIZES
1730{
1731 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
1732 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
1733 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
1734 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
1735} IEMOPBINSIZES;
1736/** Pointer to a binary operator function table. */
1737typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
1738
1739
1740/**
1741 * Function table for a unary operator providing implementation based on
1742 * operand size.
1743 */
1744typedef struct IEMOPUNARYSIZES
1745{
1746 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
1747 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
1748 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
1749 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
1750} IEMOPUNARYSIZES;
1751/** Pointer to a unary operator function table. */
1752typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
1753
1754
1755/**
1756 * Function table for a shift operator providing implementation based on
1757 * operand size.
1758 */
1759typedef struct IEMOPSHIFTSIZES
1760{
1761 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
1762 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
1763 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
1764 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
1765} IEMOPSHIFTSIZES;
1766/** Pointer to a shift operator function table. */
1767typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
1768
1769
1770/**
1771 * Function table for a multiplication or division operation.
1772 */
1773typedef struct IEMOPMULDIVSIZES
1774{
1775 PFNIEMAIMPLMULDIVU8 pfnU8;
1776 PFNIEMAIMPLMULDIVU16 pfnU16;
1777 PFNIEMAIMPLMULDIVU32 pfnU32;
1778 PFNIEMAIMPLMULDIVU64 pfnU64;
1779} IEMOPMULDIVSIZES;
1780/** Pointer to a multiplication or division operation function table. */
1781typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
1782
1783
1784/**
1785 * Function table for a double precision shift operator providing implementation
1786 * based on operand size.
1787 */
1788typedef struct IEMOPSHIFTDBLSIZES
1789{
1790 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
1791 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
1792 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
1793} IEMOPSHIFTDBLSIZES;
1794/** Pointer to a double precision shift function table. */
1795typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
1796
1797
1798/**
1799 * Function table for media instruction taking two full sized media registers,
1800 * optionally the 2nd being a memory reference (only modifying the first op.)
1801 */
1802typedef struct IEMOPMEDIAF2
1803{
1804 PFNIEMAIMPLMEDIAF2U64 pfnU64;
1805 PFNIEMAIMPLMEDIAF2U128 pfnU128;
1806} IEMOPMEDIAF2;
1807/** Pointer to a media operation function table for full sized ops. */
1808typedef IEMOPMEDIAF2 const *PCIEMOPMEDIAF2;
1809
1810/**
1811 * Function table for media instruction taking taking one full and one lower
1812 * half media register.
1813 */
1814typedef struct IEMOPMEDIAF1L1
1815{
1816 PFNIEMAIMPLMEDIAF1L1U64 pfnU64;
1817 PFNIEMAIMPLMEDIAF1L1U128 pfnU128;
1818} IEMOPMEDIAF1L1;
1819/** Pointer to a media operation function table for lowhalf+lowhalf -> full. */
1820typedef IEMOPMEDIAF1L1 const *PCIEMOPMEDIAF1L1;
1821
1822/**
1823 * Function table for media instruction taking taking one full and one high half
1824 * media register.
1825 */
1826typedef struct IEMOPMEDIAF1H1
1827{
1828 PFNIEMAIMPLMEDIAF1H1U64 pfnU64;
1829 PFNIEMAIMPLMEDIAF1H1U128 pfnU128;
1830} IEMOPMEDIAF1H1;
1831/** Pointer to a media operation function table for hihalf+hihalf -> full. */
1832typedef IEMOPMEDIAF1H1 const *PCIEMOPMEDIAF1H1;
1833
1834
1835/** @} */
1836
1837
1838/** @name C instruction implementations for anything slightly complicated.
1839 * @{ */
1840
1841/**
1842 * For typedef'ing or declaring a C instruction implementation function taking
1843 * no extra arguments.
1844 *
1845 * @param a_Name The name of the type.
1846 */
1847# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
1848 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr))
1849/**
1850 * For defining a C instruction implementation function taking no extra
1851 * arguments.
1852 *
1853 * @param a_Name The name of the function
1854 */
1855# define IEM_CIMPL_DEF_0(a_Name) \
1856 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr))
1857/**
1858 * For calling a C instruction implementation function taking no extra
1859 * arguments.
1860 *
1861 * This special call macro adds default arguments to the call and allow us to
1862 * change these later.
1863 *
1864 * @param a_fn The name of the function.
1865 */
1866# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
1867
1868/**
1869 * For typedef'ing or declaring a C instruction implementation function taking
1870 * one extra argument.
1871 *
1872 * @param a_Name The name of the type.
1873 * @param a_Type0 The argument type.
1874 * @param a_Arg0 The argument name.
1875 */
1876# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
1877 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
1878/**
1879 * For defining a C instruction implementation function taking one extra
1880 * argument.
1881 *
1882 * @param a_Name The name of the function
1883 * @param a_Type0 The argument type.
1884 * @param a_Arg0 The argument name.
1885 */
1886# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
1887 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
1888/**
1889 * For calling a C instruction implementation function taking one extra
1890 * argument.
1891 *
1892 * This special call macro adds default arguments to the call and allow us to
1893 * change these later.
1894 *
1895 * @param a_fn The name of the function.
1896 * @param a0 The name of the 1st argument.
1897 */
1898# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
1899
1900/**
1901 * For typedef'ing or declaring a C instruction implementation function taking
1902 * two extra arguments.
1903 *
1904 * @param a_Name The name of the type.
1905 * @param a_Type0 The type of the 1st argument
1906 * @param a_Arg0 The name of the 1st argument.
1907 * @param a_Type1 The type of the 2nd argument.
1908 * @param a_Arg1 The name of the 2nd argument.
1909 */
1910# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
1911 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
1912/**
1913 * For defining a C instruction implementation function taking two extra
1914 * arguments.
1915 *
1916 * @param a_Name The name of the function.
1917 * @param a_Type0 The type of the 1st argument
1918 * @param a_Arg0 The name of the 1st argument.
1919 * @param a_Type1 The type of the 2nd argument.
1920 * @param a_Arg1 The name of the 2nd argument.
1921 */
1922# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
1923 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
1924/**
1925 * For calling a C instruction implementation function taking two extra
1926 * arguments.
1927 *
1928 * This special call macro adds default arguments to the call and allow us to
1929 * change these later.
1930 *
1931 * @param a_fn The name of the function.
1932 * @param a0 The name of the 1st argument.
1933 * @param a1 The name of the 2nd argument.
1934 */
1935# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
1936
1937/**
1938 * For typedef'ing or declaring a C instruction implementation function taking
1939 * three extra arguments.
1940 *
1941 * @param a_Name The name of the type.
1942 * @param a_Type0 The type of the 1st argument
1943 * @param a_Arg0 The name of the 1st argument.
1944 * @param a_Type1 The type of the 2nd argument.
1945 * @param a_Arg1 The name of the 2nd argument.
1946 * @param a_Type2 The type of the 3rd argument.
1947 * @param a_Arg2 The name of the 3rd argument.
1948 */
1949# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
1950 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
1951/**
1952 * For defining a C instruction implementation function taking three extra
1953 * arguments.
1954 *
1955 * @param a_Name The name of the function.
1956 * @param a_Type0 The type of the 1st argument
1957 * @param a_Arg0 The name of the 1st argument.
1958 * @param a_Type1 The type of the 2nd argument.
1959 * @param a_Arg1 The name of the 2nd argument.
1960 * @param a_Type2 The type of the 3rd argument.
1961 * @param a_Arg2 The name of the 3rd argument.
1962 */
1963# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
1964 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
1965/**
1966 * For calling a C instruction implementation function taking three extra
1967 * arguments.
1968 *
1969 * This special call macro adds default arguments to the call and allow us to
1970 * change these later.
1971 *
1972 * @param a_fn The name of the function.
1973 * @param a0 The name of the 1st argument.
1974 * @param a1 The name of the 2nd argument.
1975 * @param a2 The name of the 3rd argument.
1976 */
1977# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
1978
1979
1980/**
1981 * For typedef'ing or declaring a C instruction implementation function taking
1982 * four extra arguments.
1983 *
1984 * @param a_Name The name of the type.
1985 * @param a_Type0 The type of the 1st argument
1986 * @param a_Arg0 The name of the 1st argument.
1987 * @param a_Type1 The type of the 2nd argument.
1988 * @param a_Arg1 The name of the 2nd argument.
1989 * @param a_Type2 The type of the 3rd argument.
1990 * @param a_Arg2 The name of the 3rd argument.
1991 * @param a_Type3 The type of the 4th argument.
1992 * @param a_Arg3 The name of the 4th argument.
1993 */
1994# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
1995 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
1996/**
1997 * For defining a C instruction implementation function taking four extra
1998 * arguments.
1999 *
2000 * @param a_Name The name of the function.
2001 * @param a_Type0 The type of the 1st argument
2002 * @param a_Arg0 The name of the 1st argument.
2003 * @param a_Type1 The type of the 2nd argument.
2004 * @param a_Arg1 The name of the 2nd argument.
2005 * @param a_Type2 The type of the 3rd argument.
2006 * @param a_Arg2 The name of the 3rd argument.
2007 * @param a_Type3 The type of the 4th argument.
2008 * @param a_Arg3 The name of the 4th argument.
2009 */
2010# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
2011 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
2012 a_Type2 a_Arg2, a_Type3 a_Arg3))
2013/**
2014 * For calling a C instruction implementation function taking four extra
2015 * arguments.
2016 *
2017 * This special call macro adds default arguments to the call and allow us to
2018 * change these later.
2019 *
2020 * @param a_fn The name of the function.
2021 * @param a0 The name of the 1st argument.
2022 * @param a1 The name of the 2nd argument.
2023 * @param a2 The name of the 3rd argument.
2024 * @param a3 The name of the 4th argument.
2025 */
2026# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
2027
2028
2029/**
2030 * For typedef'ing or declaring a C instruction implementation function taking
2031 * five extra arguments.
2032 *
2033 * @param a_Name The name of the type.
2034 * @param a_Type0 The type of the 1st argument
2035 * @param a_Arg0 The name of the 1st argument.
2036 * @param a_Type1 The type of the 2nd argument.
2037 * @param a_Arg1 The name of the 2nd argument.
2038 * @param a_Type2 The type of the 3rd argument.
2039 * @param a_Arg2 The name of the 3rd argument.
2040 * @param a_Type3 The type of the 4th argument.
2041 * @param a_Arg3 The name of the 4th argument.
2042 * @param a_Type4 The type of the 5th argument.
2043 * @param a_Arg4 The name of the 5th argument.
2044 */
2045# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
2046 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, \
2047 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
2048 a_Type3 a_Arg3, a_Type4 a_Arg4))
2049/**
2050 * For defining a C instruction implementation function taking five extra
2051 * arguments.
2052 *
2053 * @param a_Name The name of the function.
2054 * @param a_Type0 The type of the 1st argument
2055 * @param a_Arg0 The name of the 1st argument.
2056 * @param a_Type1 The type of the 2nd argument.
2057 * @param a_Arg1 The name of the 2nd argument.
2058 * @param a_Type2 The type of the 3rd argument.
2059 * @param a_Arg2 The name of the 3rd argument.
2060 * @param a_Type3 The type of the 4th argument.
2061 * @param a_Arg3 The name of the 4th argument.
2062 * @param a_Type4 The type of the 5th argument.
2063 * @param a_Arg4 The name of the 5th argument.
2064 */
2065# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
2066 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, \
2067 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
2068 a_Type3 a_Arg3, a_Type4 a_Arg4))
2069/**
2070 * For calling a C instruction implementation function taking five extra
2071 * arguments.
2072 *
2073 * This special call macro adds default arguments to the call and allow us to
2074 * change these later.
2075 *
2076 * @param a_fn The name of the function.
2077 * @param a0 The name of the 1st argument.
2078 * @param a1 The name of the 2nd argument.
2079 * @param a2 The name of the 3rd argument.
2080 * @param a3 The name of the 4th argument.
2081 * @param a4 The name of the 5th argument.
2082 */
2083# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
2084
2085/** @} */
2086
2087
2088/** @} */
2089
2090RT_C_DECLS_END
2091
2092#endif
2093
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette