VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal-armv8.h@ 108186

最後變更 在這個檔案從108186是 108186,由 vboxsync 提交於 4 週 前

VMM/IEM: Removed memory write stats since nobody is using the anymore (consumer was PATM); mark APIs as internal where possible. jiraref:VBP-1431

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 59.3 KB
 
1/* $Id: IEMInternal-armv8.h 108186 2025-02-12 15:35:15Z vboxsync $ */
2/** @file
3 * IEM - Internal header file, ARMv8 variant.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_armv8_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_armv8_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41
42
43RT_C_DECLS_BEGIN
44
45
46/** @defgroup grp_iem_int Internals
47 * @ingroup grp_iem
48 * @internal
49 * @{
50 */
51
52/** For expanding symbol in slickedit and other products tagging and
53 * crossreferencing IEM symbols. */
54#ifndef IEM_STATIC
55# define IEM_STATIC static
56#endif
57
58/** @def IEM_WITH_SETJMP
59 * Enables alternative status code handling using setjmps.
60 *
61 * This adds a bit of expense via the setjmp() call since it saves all the
62 * non-volatile registers. However, it eliminates return code checks and allows
63 * for more optimal return value passing (return regs instead of stack buffer).
64 */
65#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
66# define IEM_WITH_SETJMP
67#endif
68
69/** @def IEM_WITH_THROW_CATCH
70 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
71 * mode code when IEM_WITH_SETJMP is in effect.
72 *
73 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
74 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
75 * result value improving by more than 1%. (Best out of three.)
76 *
77 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
78 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
79 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
80 * Linux, but it should be quite a bit faster for normal code.
81 */
82#if (defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
83 || defined(DOXYGEN_RUNNING)
84# define IEM_WITH_THROW_CATCH
85#endif
86
87/** @def IEM_DO_LONGJMP
88 *
89 * Wrapper around longjmp / throw.
90 *
91 * @param a_pVCpu The CPU handle.
92 * @param a_rc The status code jump back with / throw.
93 */
94#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
95# ifdef IEM_WITH_THROW_CATCH
96# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
97# else
98# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
99# endif
100#endif
101
102/** For use with IEM function that may do a longjmp (when enabled).
103 *
104 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
105 * attribute. So, we indicate that function that may be part of a longjmp may
106 * throw "exceptions" and that the compiler should definitely not generate and
107 * std::terminate calling unwind code.
108 *
109 * Here is one example of this ending in std::terminate:
110 * @code{.txt}
11100 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
11201 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
11302 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
11403 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
11504 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
11605 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
11706 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
11807 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
11908 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
12009 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1210a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1220b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1230c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1240d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1250e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1260f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
12710 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
128 @endcode
129 *
130 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
131 */
132#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
133# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
134#else
135# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
136#endif
137
138/** @def IEM_CFG_TARGET_CPU
139 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
140 *
141 * By default we allow this to be configured by the user via the
142 * CPUM/GuestCpuName config string, but this comes at a slight cost during
143 * decoding. So, for applications of this code where there is no need to
144 * be dynamic wrt target CPU, just modify this define.
145 */
146#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
147# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
148#endif
149
150//#define IEM_WITH_CODE_TLB // - work in progress
151//#define IEM_WITH_DATA_TLB // - work in progress
152
153
154//#define IEM_LOG_MEMORY_WRITES
155
156#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
157/** Instruction statistics. */
158typedef struct IEMINSTRSTATS
159{
160# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
161/** @todo # include "IEMInstructionStatisticsTmpl.h" */
162 uint8_t bDummy;
163# undef IEM_DO_INSTR_STAT
164} IEMINSTRSTATS;
165#else
166struct IEMINSTRSTATS;
167typedef struct IEMINSTRSTATS IEMINSTRSTATS;
168#endif
169/** Pointer to IEM instruction statistics. */
170typedef IEMINSTRSTATS *PIEMINSTRSTATS;
171
172
173/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
174 * @{ */
175#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native result; Intel EFLAGS when on non-x86 hosts. */
176#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 1 /**< Reserved/dummy entry slot that's the same as 0. */
177#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 1 /**< For masking the index before use. */
178/** Selects the right variant from a_aArray.
179 * pVCpu is implicit in the caller context. */
180#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
181 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
182/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
183 * be used because the host CPU does not support the operation. */
184#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
185 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
186/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
187 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
188 * into the two.
189 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
190#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
191# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
192 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
193#else
194# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
195 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
196#endif
197/** @} */
198
199/**
200 * Branch types.
201 */
202typedef enum IEMBRANCH
203{
204 IEMBRANCH_JUMP = 1,
205 IEMBRANCH_CALL,
206 IEMBRANCH_TRAP,
207 IEMBRANCH_SOFTWARE_INT,
208 IEMBRANCH_HARDWARE_INT
209} IEMBRANCH;
210AssertCompileSize(IEMBRANCH, 4);
211
212
213/**
214 * INT instruction types.
215 */
216typedef enum IEMINT
217{
218 /** INT n instruction (opcode 0xcd imm). */
219 IEMINT_INTN = 0,
220 /** Single byte INT3 instruction (opcode 0xcc). */
221 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
222 /** Single byte INTO instruction (opcode 0xce). */
223 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
224 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
225 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
226} IEMINT;
227AssertCompileSize(IEMINT, 4);
228
229
230typedef struct IEMTLBENTRY
231{
232 /** The TLB entry tag.
233 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
234 * is ASSUMING a virtual address width of 48 bits.
235 *
236 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
237 *
238 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
239 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
240 * revision wraps around though, the tags needs to be zeroed.
241 *
242 * @note Try use SHRD instruction? After seeing
243 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
244 *
245 * @todo This will need to be reorganized for 57-bit wide virtual address and
246 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
247 * have to move the TLB entry versioning entirely to the
248 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
249 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
250 * consumed by PCID and ASID (12 + 6 = 18).
251 */
252 uint64_t uTag;
253 /** Access flags and physical TLB revision.
254 *
255 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
256 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
257 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
258 * - Bit 3 - pgm phys/virt - not directly writable.
259 * - Bit 4 - pgm phys page - not directly readable.
260 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
261 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
262 * - Bit 7 - tlb entry - pMappingR3 member not valid.
263 * - Bits 63 thru 8 are used for the physical TLB revision number.
264 *
265 * We're using complemented bit meanings here because it makes it easy to check
266 * whether special action is required. For instance a user mode write access
267 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
268 * non-zero result would mean special handling needed because either it wasn't
269 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
270 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
271 * need to check any PTE flag.
272 */
273 uint64_t fFlagsAndPhysRev;
274 /** The guest physical page address. */
275 uint64_t GCPhys;
276 /** Pointer to the ring-3 mapping. */
277 R3PTRTYPE(uint8_t *) pbMappingR3;
278#if HC_ARCH_BITS == 32
279 uint32_t u32Padding1;
280#endif
281} IEMTLBENTRY;
282AssertCompileSize(IEMTLBENTRY, 32);
283/** Pointer to an IEM TLB entry. */
284typedef IEMTLBENTRY *PIEMTLBENTRY;
285
286/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
287 * @{ */
288#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
289#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
290#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
291#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
292#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
293#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
294#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
295#define IEMTLBE_F_PT_LARGE_PAGE RT_BIT_64(7) /**< Page tables: Large 2 or 4 MiB page (for flushing). */
296#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(8) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
297#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(9) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
298#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(10) /**< Phys page: Code page. */
299#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffff800) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
300/** @} */
301
302
303/** The TLB size (power of two).
304 * We initially chose 256 because that way we can obtain the result directly
305 * from a 8-bit register without an additional AND instruction.
306 * See also @bugref{10687}. */
307#define IEMTLB_ENTRY_COUNT 256
308#define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 8
309
310/** TLB slot format spec (assumes uint32_t or unsigned value). */
311#if IEMTLB_ENTRY_COUNT <= 0x100 / 2
312# define IEMTLB_SLOT_FMT "%02x"
313#elif IEMTLB_ENTRY_COUNT <= 0x1000 / 2
314# define IEMTLB_SLOT_FMT "%03x"
315#elif IEMTLB_ENTRY_COUNT <= 0x10000 / 2
316# define IEMTLB_SLOT_FMT "%04x"
317#else
318# define IEMTLB_SLOT_FMT "%05x"
319#endif
320
321
322/**
323 * An IEM TLB.
324 *
325 * We've got two of these, one for data and one for instructions.
326 */
327typedef struct IEMTLB
328{
329 /** The TLB entries.
330 * We've choosen 256 because that way we can obtain the result directly from a
331 * 8-bit register without an additional AND instruction. */
332 IEMTLBENTRY aEntries[IEMTLB_ENTRY_COUNT];
333 /** The TLB revision.
334 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
335 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
336 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
337 * (The revision zero indicates an invalid TLB entry.)
338 *
339 * The initial value is choosen to cause an early wraparound. */
340 uint64_t uTlbRevision;
341 /** The TLB physical address revision - shadow of PGM variable.
342 *
343 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
344 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
345 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
346 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
347 *
348 * The initial value is choosen to cause an early wraparound. */
349 uint64_t volatile uTlbPhysRev;
350
351 /* Statistics: */
352
353 /** TLB hits (VBOX_WITH_STATISTICS only). */
354 uint64_t cTlbHits;
355 /** TLB misses. */
356 uint32_t cTlbMisses;
357 /** Slow read path. */
358 uint32_t cTlbSlowCodeReadPath;
359#if 0
360 /** TLB misses because of tag mismatch. */
361 uint32_t cTlbMissesTag;
362 /** TLB misses because of virtual access violation. */
363 uint32_t cTlbMissesVirtAccess;
364 /** TLB misses because of dirty bit. */
365 uint32_t cTlbMissesDirty;
366 /** TLB misses because of MMIO */
367 uint32_t cTlbMissesMmio;
368 /** TLB misses because of write access handlers. */
369 uint32_t cTlbMissesWriteHandler;
370 /** TLB misses because no r3(/r0) mapping. */
371 uint32_t cTlbMissesMapping;
372#endif
373 /** Alignment padding. */
374 uint32_t au32Padding[3+5];
375} IEMTLB;
376AssertCompileSizeAlignment(IEMTLB, 64);
377/** The width (in bits) of the address portion of the TLB tag. */
378#define IEMTLB_TAG_ADDR_WIDTH 36
379/** IEMTLB::uTlbRevision increment. */
380#define IEMTLB_REVISION_INCR RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH)
381/** IEMTLB::uTlbRevision mask. */
382#define IEMTLB_REVISION_MASK (~(RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH) - 1))
383
384/** IEMTLB::uTlbPhysRev increment.
385 * @sa IEMTLBE_F_PHYS_REV */
386#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
387/**
388 * Calculates the TLB tag for a virtual address.
389 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
390 * @param a_pTlb The TLB.
391 * @param a_GCPtr The virtual address.
392 */
393#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
394/**
395 * Calculates the TLB tag for a virtual address but without TLB revision.
396 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
397 * @param a_GCPtr The virtual address.
398 */
399#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
400/**
401 * Converts a TLB tag value into a TLB index.
402 * @returns Index into IEMTLB::aEntries.
403 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
404 */
405#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
406/**
407 * Converts a TLB tag value into a TLB index.
408 * @returns Index into IEMTLB::aEntries.
409 * @param a_pTlb The TLB.
410 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
411 */
412#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
413
414
415/**
416 * The per-CPU IEM state.
417 *
418 * @todo This is just a STUB currently!
419 */
420typedef struct IEMCPU
421{
422 /** Info status code that needs to be propagated to the IEM caller.
423 * This cannot be passed internally, as it would complicate all success
424 * checks within the interpreter making the code larger and almost impossible
425 * to get right. Instead, we'll store status codes to pass on here. Each
426 * source of these codes will perform appropriate sanity checks. */
427 int32_t rcPassUp; /* 0x00 */
428
429 /** The current CPU execution mode (CS). */
430 IEMMODE enmCpuMode; /* 0x04 */
431 /** The Exception Level (EL). */
432 uint8_t uEl; /* 0x05 */
433
434 /** Whether to bypass access handlers or not. */
435 bool fBypassHandlers : 1; /* 0x06.0 */
436 /** Whether there are pending hardware instruction breakpoints. */
437 bool fPendingInstructionBreakpoints : 1; /* 0x06.2 */
438 /** Whether there are pending hardware data breakpoints. */
439 bool fPendingDataBreakpoints : 1; /* 0x06.3 */
440
441 /* Unused/padding */
442 bool fUnused; /* 0x07 */
443
444 /** @name Decoder state.
445 * @{ */
446#ifndef IEM_WITH_OPAQUE_DECODER_STATE
447 /** The current instruction being executed. */
448 uint32_t u32Insn;
449 uint8_t abOpaqueDecoder[0x48 - 0x4 - 0x8];
450#else /* IEM_WITH_OPAQUE_DECODER_STATE */
451 uint8_t abOpaqueDecoder[0x48 - 0x8];
452#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
453 /** @} */
454
455
456 /** The flags of the current exception / interrupt. */
457 uint32_t fCurXcpt; /* 0x48, 0x48 */
458 /** The current exception / interrupt. */
459 uint8_t uCurXcpt;
460 /** Exception / interrupt recursion depth. */
461 int8_t cXcptRecursions;
462
463 /** The number of active guest memory mappings. */
464 uint8_t cActiveMappings;
465 /** The next unused mapping index. */
466 uint8_t iNextMapping;
467 /** Records for tracking guest memory mappings. */
468 struct
469 {
470 /** The address of the mapped bytes. */
471 void *pv;
472 /** The access flags (IEM_ACCESS_XXX).
473 * IEM_ACCESS_INVALID if the entry is unused. */
474 uint32_t fAccess;
475#if HC_ARCH_BITS == 64
476 uint32_t u32Alignment4; /**< Alignment padding. */
477#endif
478 } aMemMappings[3];
479
480 /** Locking records for the mapped memory. */
481 union
482 {
483 PGMPAGEMAPLOCK Lock;
484 uint64_t au64Padding[2];
485 } aMemMappingLocks[3];
486
487 /** Bounce buffer info.
488 * This runs in parallel to aMemMappings. */
489 struct
490 {
491 /** The physical address of the first byte. */
492 RTGCPHYS GCPhysFirst;
493 /** The physical address of the second page. */
494 RTGCPHYS GCPhysSecond;
495 /** The number of bytes in the first page. */
496 uint16_t cbFirst;
497 /** The number of bytes in the second page. */
498 uint16_t cbSecond;
499 /** Whether it's unassigned memory. */
500 bool fUnassigned;
501 /** Explicit alignment padding. */
502 bool afAlignment5[3];
503 } aMemBbMappings[3];
504
505 /* Ensure that aBounceBuffers are aligned at a 32 byte boundrary. */
506 uint64_t abAlignment7[1];
507
508 /** Bounce buffer storage.
509 * This runs in parallel to aMemMappings and aMemBbMappings. */
510 struct
511 {
512 uint8_t ab[512];
513 } aBounceBuffers[3];
514
515
516 /** Pointer set jump buffer - ring-3 context. */
517 R3PTRTYPE(jmp_buf *) pJmpBufR3;
518
519 /** The error code for the current exception / interrupt. */
520 uint32_t uCurXcptErr;
521
522 /** @name Statistics
523 * @{ */
524 /** The number of instructions we've executed. */
525 uint32_t cInstructions;
526 /** The number of potential exits. */
527 uint32_t cPotentialExits;
528 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
529 uint32_t cRetInstrNotImplemented;
530 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
531 uint32_t cRetAspectNotImplemented;
532 /** Counts informational statuses returned (other than VINF_SUCCESS). */
533 uint32_t cRetInfStatuses;
534 /** Counts other error statuses returned. */
535 uint32_t cRetErrStatuses;
536 /** Number of times rcPassUp has been used. */
537 uint32_t cRetPassUpStatus;
538 /** Number of times RZ left with instruction commit pending for ring-3. */
539 uint32_t cPendingCommit;
540 /** Number of long jumps. */
541 uint32_t cLongJumps;
542 /** @} */
543
544 /** @name Target CPU information.
545 * @{ */
546#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
547 /** The target CPU. */
548 uint8_t uTargetCpu;
549#else
550 uint8_t bTargetCpuPadding;
551#endif
552 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
553 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
554 * native host support and the 2nd for when there is.
555 *
556 * The two values are typically indexed by a g_CpumHostFeatures bit.
557 *
558 * This is for instance used for the BSF & BSR instructions where AMD and
559 * Intel CPUs produce different EFLAGS. */
560 uint8_t aidxTargetCpuEflFlavour[2];
561
562 uint8_t bPadding[5];
563
564 /** The CPU vendor. */
565 CPUMCPUVENDOR enmCpuVendor;
566 /** @} */
567
568 /** @name Host CPU information.
569 * @{ */
570 /** The CPU vendor. */
571 CPUMCPUVENDOR enmHostCpuVendor;
572 /** @} */
573
574 /** Data TLB.
575 * @remarks Must be 64-byte aligned. */
576 IEMTLB DataTlb;
577 /** Instruction TLB.
578 * @remarks Must be 64-byte aligned. */
579 IEMTLB CodeTlb;
580
581 /** Exception statistics. */
582 STAMCOUNTER aStatXcpts[32];
583 /** Interrupt statistics. */
584 uint32_t aStatInts[256];
585
586#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
587 /** Instruction statistics for ring-3. */
588 IEMINSTRSTATS StatsR3;
589#endif
590} IEMCPU;
591AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48);
592AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 8);
593AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 16);
594AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 32);
595AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
596AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
597AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
598
599/** Pointer to the per-CPU IEM state. */
600typedef IEMCPU *PIEMCPU;
601/** Pointer to the const per-CPU IEM state. */
602typedef IEMCPU const *PCIEMCPU;
603
604
605/** @def IEM_GET_CTX
606 * Gets the guest CPU context for the calling EMT.
607 * @returns PCPUMCTX
608 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
609 */
610#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
611
612/** @def IEM_CTX_ASSERT
613 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
614 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
615 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
616 */
617#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
618 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, \
619 (a_fExtrnMbz)))
620
621/** @def IEM_CTX_IMPORT_RET
622 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
623 *
624 * Will call the keep to import the bits as needed.
625 *
626 * Returns on import failure.
627 *
628 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
629 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
630 */
631#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
632 do { \
633 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
634 { /* likely */ } \
635 else \
636 { \
637 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
638 AssertRCReturn(rcCtxImport, rcCtxImport); \
639 } \
640 } while (0)
641
642/** @def IEM_CTX_IMPORT_NORET
643 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
644 *
645 * Will call the keep to import the bits as needed.
646 *
647 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
648 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
649 */
650#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
651 do { \
652 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
653 { /* likely */ } \
654 else \
655 { \
656 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
657 AssertLogRelRC(rcCtxImport); \
658 } \
659 } while (0)
660
661/** @def IEM_CTX_IMPORT_JMP
662 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
663 *
664 * Will call the keep to import the bits as needed.
665 *
666 * Jumps on import failure.
667 *
668 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
669 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
670 */
671#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
672 do { \
673 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
674 { /* likely */ } \
675 else \
676 { \
677 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
678 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
679 } \
680 } while (0)
681
682
683
684/** @def IEM_GET_TARGET_CPU
685 * Gets the current IEMTARGETCPU value.
686 * @returns IEMTARGETCPU value.
687 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
688 */
689#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
690# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
691#else
692# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
693#endif
694
695/** @def IEM_GET_INSTR_LEN
696 * Gets the instruction length. */
697/** @todo Thumb mode. */
698#ifdef IEM_WITH_CODE_TLB
699# define IEM_GET_INSTR_LEN(a_pVCpu) (sizeof(uint32_t))
700#else
701# define IEM_GET_INSTR_LEN(a_pVCpu) (sizeof(uint32_t))
702#endif
703
704
705/**
706 * Shared per-VM IEM data.
707 */
708typedef struct IEM
709{
710 uint8_t bDummy;
711} IEM;
712
713
714
715/** @name IEM_ACCESS_XXX - Access details.
716 * @{ */
717#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
718#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
719#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
720#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
721#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
722#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
723#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
724#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
725#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
726#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
727/** The writes are partial, so if initialize the bounce buffer with the
728 * orignal RAM content. */
729#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
730/** Used in aMemMappings to indicate that the entry is bounce buffered. */
731#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
732/** Bounce buffer with ring-3 write pending, first page. */
733#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
734/** Bounce buffer with ring-3 write pending, second page. */
735#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
736/** Not locked, accessed via the TLB. */
737#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
738/** Valid bit mask. */
739#define IEM_ACCESS_VALID_MASK UINT32_C(0x00001fff)
740/** Shift count for the TLB flags (upper word). */
741#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
742
743/** Read+write data alias. */
744#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
745/** Write data alias. */
746#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
747/** Read data alias. */
748#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
749/** Instruction fetch alias. */
750#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
751/** Stack write alias. */
752#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
753/** Stack read alias. */
754#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
755/** Stack read+write alias. */
756#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
757/** Read system table alias. */
758#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
759/** Read+write system table alias. */
760#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
761/** @} */
762
763/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
764#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
765
766/** @def IEM_DECL_IMPL_TYPE
767 * For typedef'ing an instruction implementation function.
768 *
769 * @param a_RetType The return type.
770 * @param a_Name The name of the type.
771 * @param a_ArgList The argument list enclosed in parentheses.
772 */
773
774/** @def IEM_DECL_IMPL_DEF
775 * For defining an instruction implementation function.
776 *
777 * @param a_RetType The return type.
778 * @param a_Name The name of the type.
779 * @param a_ArgList The argument list enclosed in parentheses.
780 */
781
782#if __cplusplus >= 201700 /* P0012R1 support */
783# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
784 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
785# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
786 a_RetType VBOXCALL a_Name a_ArgList RT_NOEXCEPT
787# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
788 a_RetType VBOXCALL a_Name a_ArgList RT_NOEXCEPT
789
790#else
791# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
792 a_RetType (VBOXCALL a_Name) a_ArgList
793# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
794 a_RetType VBOXCALL a_Name a_ArgList
795# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
796 a_RetType VBOXCALL a_Name a_ArgList
797
798#endif
799
800/** @name C instruction implementations for anything slightly complicated.
801 * @{ */
802
803/**
804 * For typedef'ing or declaring a C instruction implementation function taking
805 * no extra arguments.
806 *
807 * @param a_Name The name of the type.
808 */
809# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
810 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
811/**
812 * For defining a C instruction implementation function taking no extra
813 * arguments.
814 *
815 * @param a_Name The name of the function
816 */
817# define IEM_CIMPL_DEF_0(a_Name) \
818 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
819/**
820 * Prototype version of IEM_CIMPL_DEF_0.
821 */
822# define IEM_CIMPL_PROTO_0(a_Name) \
823 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
824/**
825 * For calling a C instruction implementation function taking no extra
826 * arguments.
827 *
828 * This special call macro adds default arguments to the call and allow us to
829 * change these later.
830 *
831 * @param a_fn The name of the function.
832 */
833# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
834
835/**
836 * For typedef'ing or declaring a C instruction implementation function taking
837 * one extra argument.
838 *
839 * @param a_Name The name of the type.
840 * @param a_Type0 The argument type.
841 * @param a_Arg0 The argument name.
842 */
843# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
844 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
845/**
846 * For defining a C instruction implementation function taking one extra
847 * argument.
848 *
849 * @param a_Name The name of the function
850 * @param a_Type0 The argument type.
851 * @param a_Arg0 The argument name.
852 */
853# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
854 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
855/**
856 * Prototype version of IEM_CIMPL_DEF_1.
857 */
858# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
859 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
860/**
861 * For calling a C instruction implementation function taking one extra
862 * argument.
863 *
864 * This special call macro adds default arguments to the call and allow us to
865 * change these later.
866 *
867 * @param a_fn The name of the function.
868 * @param a0 The name of the 1st argument.
869 */
870# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
871
872/**
873 * For typedef'ing or declaring a C instruction implementation function taking
874 * two extra arguments.
875 *
876 * @param a_Name The name of the type.
877 * @param a_Type0 The type of the 1st argument
878 * @param a_Arg0 The name of the 1st argument.
879 * @param a_Type1 The type of the 2nd argument.
880 * @param a_Arg1 The name of the 2nd argument.
881 */
882# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
883 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
884/**
885 * For defining a C instruction implementation function taking two extra
886 * arguments.
887 *
888 * @param a_Name The name of the function.
889 * @param a_Type0 The type of the 1st argument
890 * @param a_Arg0 The name of the 1st argument.
891 * @param a_Type1 The type of the 2nd argument.
892 * @param a_Arg1 The name of the 2nd argument.
893 */
894# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
895 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
896/**
897 * Prototype version of IEM_CIMPL_DEF_2.
898 */
899# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
900 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
901/**
902 * For calling a C instruction implementation function taking two extra
903 * arguments.
904 *
905 * This special call macro adds default arguments to the call and allow us to
906 * change these later.
907 *
908 * @param a_fn The name of the function.
909 * @param a0 The name of the 1st argument.
910 * @param a1 The name of the 2nd argument.
911 */
912# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
913
914/**
915 * For typedef'ing or declaring a C instruction implementation function taking
916 * three extra arguments.
917 *
918 * @param a_Name The name of the type.
919 * @param a_Type0 The type of the 1st argument
920 * @param a_Arg0 The name of the 1st argument.
921 * @param a_Type1 The type of the 2nd argument.
922 * @param a_Arg1 The name of the 2nd argument.
923 * @param a_Type2 The type of the 3rd argument.
924 * @param a_Arg2 The name of the 3rd argument.
925 */
926# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
927 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
928/**
929 * For defining a C instruction implementation function taking three extra
930 * arguments.
931 *
932 * @param a_Name The name of the function.
933 * @param a_Type0 The type of the 1st argument
934 * @param a_Arg0 The name of the 1st argument.
935 * @param a_Type1 The type of the 2nd argument.
936 * @param a_Arg1 The name of the 2nd argument.
937 * @param a_Type2 The type of the 3rd argument.
938 * @param a_Arg2 The name of the 3rd argument.
939 */
940# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
941 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
942/**
943 * Prototype version of IEM_CIMPL_DEF_3.
944 */
945# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
946 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
947/**
948 * For calling a C instruction implementation function taking three extra
949 * arguments.
950 *
951 * This special call macro adds default arguments to the call and allow us to
952 * change these later.
953 *
954 * @param a_fn The name of the function.
955 * @param a0 The name of the 1st argument.
956 * @param a1 The name of the 2nd argument.
957 * @param a2 The name of the 3rd argument.
958 */
959# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
960
961
962/**
963 * For typedef'ing or declaring a C instruction implementation function taking
964 * four extra arguments.
965 *
966 * @param a_Name The name of the type.
967 * @param a_Type0 The type of the 1st argument
968 * @param a_Arg0 The name of the 1st argument.
969 * @param a_Type1 The type of the 2nd argument.
970 * @param a_Arg1 The name of the 2nd argument.
971 * @param a_Type2 The type of the 3rd argument.
972 * @param a_Arg2 The name of the 3rd argument.
973 * @param a_Type3 The type of the 4th argument.
974 * @param a_Arg3 The name of the 4th argument.
975 */
976# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
977 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
978/**
979 * For defining a C instruction implementation function taking four extra
980 * arguments.
981 *
982 * @param a_Name The name of the function.
983 * @param a_Type0 The type of the 1st argument
984 * @param a_Arg0 The name of the 1st argument.
985 * @param a_Type1 The type of the 2nd argument.
986 * @param a_Arg1 The name of the 2nd argument.
987 * @param a_Type2 The type of the 3rd argument.
988 * @param a_Arg2 The name of the 3rd argument.
989 * @param a_Type3 The type of the 4th argument.
990 * @param a_Arg3 The name of the 4th argument.
991 */
992# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
993 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
994 a_Type2 a_Arg2, a_Type3 a_Arg3))
995/**
996 * Prototype version of IEM_CIMPL_DEF_4.
997 */
998# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
999 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
1000 a_Type2 a_Arg2, a_Type3 a_Arg3))
1001/**
1002 * For calling a C instruction implementation function taking four extra
1003 * arguments.
1004 *
1005 * This special call macro adds default arguments to the call and allow us to
1006 * change these later.
1007 *
1008 * @param a_fn The name of the function.
1009 * @param a0 The name of the 1st argument.
1010 * @param a1 The name of the 2nd argument.
1011 * @param a2 The name of the 3rd argument.
1012 * @param a3 The name of the 4th argument.
1013 */
1014# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
1015
1016
1017/**
1018 * For typedef'ing or declaring a C instruction implementation function taking
1019 * five extra arguments.
1020 *
1021 * @param a_Name The name of the type.
1022 * @param a_Type0 The type of the 1st argument
1023 * @param a_Arg0 The name of the 1st argument.
1024 * @param a_Type1 The type of the 2nd argument.
1025 * @param a_Arg1 The name of the 2nd argument.
1026 * @param a_Type2 The type of the 3rd argument.
1027 * @param a_Arg2 The name of the 3rd argument.
1028 * @param a_Type3 The type of the 4th argument.
1029 * @param a_Arg3 The name of the 4th argument.
1030 * @param a_Type4 The type of the 5th argument.
1031 * @param a_Arg4 The name of the 5th argument.
1032 */
1033# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
1034 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
1035 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
1036 a_Type3 a_Arg3, a_Type4 a_Arg4))
1037/**
1038 * For defining a C instruction implementation function taking five extra
1039 * arguments.
1040 *
1041 * @param a_Name The name of the function.
1042 * @param a_Type0 The type of the 1st argument
1043 * @param a_Arg0 The name of the 1st argument.
1044 * @param a_Type1 The type of the 2nd argument.
1045 * @param a_Arg1 The name of the 2nd argument.
1046 * @param a_Type2 The type of the 3rd argument.
1047 * @param a_Arg2 The name of the 3rd argument.
1048 * @param a_Type3 The type of the 4th argument.
1049 * @param a_Arg3 The name of the 4th argument.
1050 * @param a_Type4 The type of the 5th argument.
1051 * @param a_Arg4 The name of the 5th argument.
1052 */
1053# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
1054 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
1055 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
1056/**
1057 * Prototype version of IEM_CIMPL_DEF_5.
1058 */
1059# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
1060 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
1061 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
1062/**
1063 * For calling a C instruction implementation function taking five extra
1064 * arguments.
1065 *
1066 * This special call macro adds default arguments to the call and allow us to
1067 * change these later.
1068 *
1069 * @param a_fn The name of the function.
1070 * @param a0 The name of the 1st argument.
1071 * @param a1 The name of the 2nd argument.
1072 * @param a2 The name of the 3rd argument.
1073 * @param a3 The name of the 4th argument.
1074 * @param a4 The name of the 5th argument.
1075 */
1076# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
1077
1078/** @} */
1079
1080
1081/** @name Opcode Decoder Function Types.
1082 * @{ */
1083
1084# if 0 /** @todo r=bird: This upsets doxygen. Generally, these macros and types probably won't change with the target arch.
1085 * Nor will probably the TLB definitions. So, we need some better splitting of this code. */
1086/** @typedef PFNIEMOP
1087 * Pointer to an opcode decoder function.
1088 */
1089
1090/** @def FNIEMOP_DEF
1091 * Define an opcode decoder function.
1092 *
1093 * We're using macors for this so that adding and removing parameters as well as
1094 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
1095 *
1096 * @param a_Name The function name.
1097 */
1098#endif
1099
1100#if defined(__GNUC__) && defined(RT_ARCH_X86)
1101typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
1102# define FNIEMOP_DEF(a_Name) \
1103 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
1104# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1105 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
1106# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1107 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
1108
1109#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
1110typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
1111# define FNIEMOP_DEF(a_Name) \
1112 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1113# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1114 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
1115# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1116 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
1117
1118#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
1119typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
1120# define FNIEMOP_DEF(a_Name) \
1121 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
1122# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1123 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
1124# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1125 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
1126
1127#else
1128typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
1129# define FNIEMOP_DEF(a_Name) \
1130 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1131# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1132 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
1133# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1134 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
1135
1136#endif
1137
1138/**
1139 * Call an opcode decoder function.
1140 *
1141 * We're using macors for this so that adding and removing parameters can be
1142 * done as we please. See FNIEMOP_DEF.
1143 */
1144#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
1145
1146/**
1147 * Call a common opcode decoder function taking one extra argument.
1148 *
1149 * We're using macors for this so that adding and removing parameters can be
1150 * done as we please. See FNIEMOP_DEF_1.
1151 */
1152#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
1153
1154/**
1155 * Call a common opcode decoder function taking one extra argument.
1156 *
1157 * We're using macors for this so that adding and removing parameters can be
1158 * done as we please. See FNIEMOP_DEF_1.
1159 */
1160#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
1161/** @} */
1162
1163
1164/** @name Misc Helpers
1165 * @{ */
1166
1167/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
1168 * due to GCC lacking knowledge about the value range of a switch. */
1169#if RT_CPLUSPLUS_PREREQ(202000)
1170# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
1171#else
1172# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
1173#endif
1174
1175/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
1176#if RT_CPLUSPLUS_PREREQ(202000)
1177# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
1178#else
1179# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
1180#endif
1181
1182/**
1183 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
1184 * occation.
1185 */
1186#ifdef LOG_ENABLED
1187# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
1188 do { \
1189 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
1190 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
1191 } while (0)
1192#else
1193# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
1194 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
1195#endif
1196
1197/**
1198 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
1199 * occation using the supplied logger statement.
1200 *
1201 * @param a_LoggerArgs What to log on failure.
1202 */
1203#ifdef LOG_ENABLED
1204# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
1205 do { \
1206 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
1207 /*LogFunc(a_LoggerArgs);*/ \
1208 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
1209 } while (0)
1210#else
1211# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
1212 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
1213#endif
1214
1215/** @} */
1216
1217void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu);
1218
1219
1220/** @name Raising Exceptions.
1221 * @{ */
1222VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
1223 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
1224#ifdef IEM_WITH_SETJMP
1225DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
1226 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
1227#endif
1228VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
1229VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
1230VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
1231VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
1232#ifdef IEM_WITH_SETJMP
1233DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
1234#endif
1235VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
1236VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
1237#ifdef IEM_WITH_SETJMP
1238DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
1239#endif
1240VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
1241
1242IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
1243IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
1244
1245/**
1246 * Macro for calling iemCImplRaiseDivideError().
1247 *
1248 * This enables us to add/remove arguments and force different levels of
1249 * inlining as we wish.
1250 *
1251 * @return Strict VBox status code.
1252 */
1253#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(iemCImplRaiseDivideError)
1254
1255/**
1256 * Macro for calling iemCImplRaiseInvalidOpcode().
1257 *
1258 * This enables us to add/remove arguments and force different levels of
1259 * inlining as we wish.
1260 *
1261 * @return Strict VBox status code.
1262 */
1263#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(iemCImplRaiseInvalidOpcode)
1264/** @} */
1265
1266/** @name Memory access.
1267 * @{ */
1268
1269VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
1270 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
1271VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
1272#ifndef IN_RING3
1273VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
1274#endif
1275void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
1276VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
1277VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
1278VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
1279
1280VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1281VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1282VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1283VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1284VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1285VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1286VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1287VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1288VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1289#ifdef IEM_WITH_SETJMP
1290uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1291uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1292uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1293uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1294void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1295void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1296void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1297void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1298#endif
1299
1300VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1301VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1302VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1303VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1304
1305VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
1306VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
1307VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
1308VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
1309VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
1310VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
1311#ifdef IEM_WITH_SETJMP
1312void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
1313void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
1314void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
1315void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
1316void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
1317void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
1318#endif
1319
1320VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
1321 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
1322VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
1323VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
1324VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
1325VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
1326VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1327VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1328VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1329VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
1330VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
1331 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
1332VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
1333 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT;
1334VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
1335VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
1336VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
1337VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
1338VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1339VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1340VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1341/** @} */
1342
1343/** @} */
1344
1345RT_C_DECLS_END
1346
1347#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_armv8_h */
1348
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette