VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal-armv8.h

最後變更 在這個檔案是 108278,由 vboxsync 提交於 4 週 前

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 58.6 KB
 
1/* $Id: IEMInternal-armv8.h 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Internal header file, ARMv8 variant.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_armv8_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_armv8_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41
42
43RT_C_DECLS_BEGIN
44
45
46/** @defgroup grp_iem_int Internals
47 * @ingroup grp_iem
48 * @internal
49 * @{
50 */
51
52/** For expanding symbol in slickedit and other products tagging and
53 * crossreferencing IEM symbols. */
54#ifndef IEM_STATIC
55# define IEM_STATIC static
56#endif
57
58/** @def IEM_WITH_THROW_CATCH
59 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
60 * mode code.
61 *
62 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
63 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
64 * result value improving by more than 1%. (Best out of three.)
65 *
66 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
67 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
68 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
69 * Linux, but it should be quite a bit faster for normal code.
70 */
71#if (defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
72 || defined(DOXYGEN_RUNNING)
73# define IEM_WITH_THROW_CATCH
74#endif
75
76/** @def IEM_DO_LONGJMP
77 *
78 * Wrapper around longjmp / throw.
79 *
80 * @param a_pVCpu The CPU handle.
81 * @param a_rc The status code jump back with / throw.
82 */
83#ifdef IEM_WITH_THROW_CATCH
84# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
85#else
86# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
87#endif
88
89/** For use with IEM function that may do a longjmp (when enabled).
90 *
91 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
92 * attribute. So, we indicate that function that may be part of a longjmp may
93 * throw "exceptions" and that the compiler should definitely not generate and
94 * std::terminate calling unwind code.
95 *
96 * Here is one example of this ending in std::terminate:
97 * @code{.txt}
9800 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
9901 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
10002 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
10103 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
10204 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
10305 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
10406 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
10507 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
10608 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
10709 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1080a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1090b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1100c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1110d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1120e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1130f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
11410 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
115 @endcode
116 *
117 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
118 */
119#if defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH)
120# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
121#else
122# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
123#endif
124
125/** @def IEM_CFG_TARGET_CPU
126 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
127 *
128 * By default we allow this to be configured by the user via the
129 * CPUM/GuestCpuName config string, but this comes at a slight cost during
130 * decoding. So, for applications of this code where there is no need to
131 * be dynamic wrt target CPU, just modify this define.
132 */
133#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
134# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
135#endif
136
137//#define IEM_WITH_CODE_TLB // - work in progress
138//#define IEM_WITH_DATA_TLB // - work in progress
139
140
141//#define IEM_LOG_MEMORY_WRITES
142
143#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
144/** Instruction statistics. */
145typedef struct IEMINSTRSTATS
146{
147# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
148/** @todo # include "IEMInstructionStatisticsTmpl.h" */
149 uint8_t bDummy;
150# undef IEM_DO_INSTR_STAT
151} IEMINSTRSTATS;
152#else
153struct IEMINSTRSTATS;
154typedef struct IEMINSTRSTATS IEMINSTRSTATS;
155#endif
156/** Pointer to IEM instruction statistics. */
157typedef IEMINSTRSTATS *PIEMINSTRSTATS;
158
159
160/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
161 * @{ */
162#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native result; Intel EFLAGS when on non-x86 hosts. */
163#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 1 /**< Reserved/dummy entry slot that's the same as 0. */
164#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 1 /**< For masking the index before use. */
165/** Selects the right variant from a_aArray.
166 * pVCpu is implicit in the caller context. */
167#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
168 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
169/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
170 * be used because the host CPU does not support the operation. */
171#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
172 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
173/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
174 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
175 * into the two.
176 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
177#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
178# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
179 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
180#else
181# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
182 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
183#endif
184/** @} */
185
186/**
187 * Branch types.
188 */
189typedef enum IEMBRANCH
190{
191 IEMBRANCH_JUMP = 1,
192 IEMBRANCH_CALL,
193 IEMBRANCH_TRAP,
194 IEMBRANCH_SOFTWARE_INT,
195 IEMBRANCH_HARDWARE_INT
196} IEMBRANCH;
197AssertCompileSize(IEMBRANCH, 4);
198
199
200/**
201 * INT instruction types.
202 */
203typedef enum IEMINT
204{
205 /** INT n instruction (opcode 0xcd imm). */
206 IEMINT_INTN = 0,
207 /** Single byte INT3 instruction (opcode 0xcc). */
208 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
209 /** Single byte INTO instruction (opcode 0xce). */
210 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
211 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
212 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
213} IEMINT;
214AssertCompileSize(IEMINT, 4);
215
216
217typedef struct IEMTLBENTRY
218{
219 /** The TLB entry tag.
220 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
221 * is ASSUMING a virtual address width of 48 bits.
222 *
223 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
224 *
225 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
226 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
227 * revision wraps around though, the tags needs to be zeroed.
228 *
229 * @note Try use SHRD instruction? After seeing
230 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
231 *
232 * @todo This will need to be reorganized for 57-bit wide virtual address and
233 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
234 * have to move the TLB entry versioning entirely to the
235 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
236 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
237 * consumed by PCID and ASID (12 + 6 = 18).
238 */
239 uint64_t uTag;
240 /** Access flags and physical TLB revision.
241 *
242 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
243 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
244 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
245 * - Bit 3 - pgm phys/virt - not directly writable.
246 * - Bit 4 - pgm phys page - not directly readable.
247 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
248 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
249 * - Bit 7 - tlb entry - pMappingR3 member not valid.
250 * - Bits 63 thru 8 are used for the physical TLB revision number.
251 *
252 * We're using complemented bit meanings here because it makes it easy to check
253 * whether special action is required. For instance a user mode write access
254 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
255 * non-zero result would mean special handling needed because either it wasn't
256 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
257 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
258 * need to check any PTE flag.
259 */
260 uint64_t fFlagsAndPhysRev;
261 /** The guest physical page address. */
262 uint64_t GCPhys;
263 /** Pointer to the ring-3 mapping. */
264 R3PTRTYPE(uint8_t *) pbMappingR3;
265#if HC_ARCH_BITS == 32
266 uint32_t u32Padding1;
267#endif
268} IEMTLBENTRY;
269AssertCompileSize(IEMTLBENTRY, 32);
270/** Pointer to an IEM TLB entry. */
271typedef IEMTLBENTRY *PIEMTLBENTRY;
272
273/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
274 * @{ */
275#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
276#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
277#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
278#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
279#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
280#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
281#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
282#define IEMTLBE_F_PT_LARGE_PAGE RT_BIT_64(7) /**< Page tables: Large 2 or 4 MiB page (for flushing). */
283#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(8) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
284#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(9) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
285#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(10) /**< Phys page: Code page. */
286#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffff800) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
287/** @} */
288
289
290/** The TLB size (power of two).
291 * We initially chose 256 because that way we can obtain the result directly
292 * from a 8-bit register without an additional AND instruction.
293 * See also @bugref{10687}. */
294#define IEMTLB_ENTRY_COUNT 256
295#define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 8
296
297/** TLB slot format spec (assumes uint32_t or unsigned value). */
298#if IEMTLB_ENTRY_COUNT <= 0x100 / 2
299# define IEMTLB_SLOT_FMT "%02x"
300#elif IEMTLB_ENTRY_COUNT <= 0x1000 / 2
301# define IEMTLB_SLOT_FMT "%03x"
302#elif IEMTLB_ENTRY_COUNT <= 0x10000 / 2
303# define IEMTLB_SLOT_FMT "%04x"
304#else
305# define IEMTLB_SLOT_FMT "%05x"
306#endif
307
308
309/**
310 * An IEM TLB.
311 *
312 * We've got two of these, one for data and one for instructions.
313 */
314typedef struct IEMTLB
315{
316 /** The TLB entries.
317 * We've choosen 256 because that way we can obtain the result directly from a
318 * 8-bit register without an additional AND instruction. */
319 IEMTLBENTRY aEntries[IEMTLB_ENTRY_COUNT];
320 /** The TLB revision.
321 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
322 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
323 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
324 * (The revision zero indicates an invalid TLB entry.)
325 *
326 * The initial value is choosen to cause an early wraparound. */
327 uint64_t uTlbRevision;
328 /** The TLB physical address revision - shadow of PGM variable.
329 *
330 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
331 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
332 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
333 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
334 *
335 * The initial value is choosen to cause an early wraparound. */
336 uint64_t volatile uTlbPhysRev;
337
338 /* Statistics: */
339
340 /** TLB hits (VBOX_WITH_STATISTICS only). */
341 uint64_t cTlbHits;
342 /** TLB misses. */
343 uint32_t cTlbMisses;
344 /** Slow read path. */
345 uint32_t cTlbSlowCodeReadPath;
346#if 0
347 /** TLB misses because of tag mismatch. */
348 uint32_t cTlbMissesTag;
349 /** TLB misses because of virtual access violation. */
350 uint32_t cTlbMissesVirtAccess;
351 /** TLB misses because of dirty bit. */
352 uint32_t cTlbMissesDirty;
353 /** TLB misses because of MMIO */
354 uint32_t cTlbMissesMmio;
355 /** TLB misses because of write access handlers. */
356 uint32_t cTlbMissesWriteHandler;
357 /** TLB misses because no r3(/r0) mapping. */
358 uint32_t cTlbMissesMapping;
359#endif
360 /** Alignment padding. */
361 uint32_t au32Padding[3+5];
362} IEMTLB;
363AssertCompileSizeAlignment(IEMTLB, 64);
364/** The width (in bits) of the address portion of the TLB tag. */
365#define IEMTLB_TAG_ADDR_WIDTH 36
366/** IEMTLB::uTlbRevision increment. */
367#define IEMTLB_REVISION_INCR RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH)
368/** IEMTLB::uTlbRevision mask. */
369#define IEMTLB_REVISION_MASK (~(RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH) - 1))
370
371/** IEMTLB::uTlbPhysRev increment.
372 * @sa IEMTLBE_F_PHYS_REV */
373#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
374/**
375 * Calculates the TLB tag for a virtual address.
376 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
377 * @param a_pTlb The TLB.
378 * @param a_GCPtr The virtual address.
379 */
380#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
381/**
382 * Calculates the TLB tag for a virtual address but without TLB revision.
383 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
384 * @param a_GCPtr The virtual address.
385 */
386#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
387/**
388 * Converts a TLB tag value into a TLB index.
389 * @returns Index into IEMTLB::aEntries.
390 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
391 */
392#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
393/**
394 * Converts a TLB tag value into a TLB index.
395 * @returns Index into IEMTLB::aEntries.
396 * @param a_pTlb The TLB.
397 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
398 */
399#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
400
401
402/**
403 * The per-CPU IEM state.
404 *
405 * @todo This is just a STUB currently!
406 */
407typedef struct IEMCPU
408{
409 /** Info status code that needs to be propagated to the IEM caller.
410 * This cannot be passed internally, as it would complicate all success
411 * checks within the interpreter making the code larger and almost impossible
412 * to get right. Instead, we'll store status codes to pass on here. Each
413 * source of these codes will perform appropriate sanity checks. */
414 int32_t rcPassUp; /* 0x00 */
415
416 /** The current CPU execution mode (CS). */
417 IEMMODE enmCpuMode; /* 0x04 */
418 /** The Exception Level (EL). */
419 uint8_t uEl; /* 0x05 */
420
421 /** Whether to bypass access handlers or not. */
422 bool fBypassHandlers : 1; /* 0x06.0 */
423 /** Whether there are pending hardware instruction breakpoints. */
424 bool fPendingInstructionBreakpoints : 1; /* 0x06.2 */
425 /** Whether there are pending hardware data breakpoints. */
426 bool fPendingDataBreakpoints : 1; /* 0x06.3 */
427
428 /* Unused/padding */
429 bool fUnused; /* 0x07 */
430
431 /** @name Decoder state.
432 * @{ */
433#ifndef IEM_WITH_OPAQUE_DECODER_STATE
434 /** The current instruction being executed. */
435 uint32_t u32Insn;
436 uint8_t abOpaqueDecoder[0x48 - 0x4 - 0x8];
437#else /* IEM_WITH_OPAQUE_DECODER_STATE */
438 uint8_t abOpaqueDecoder[0x48 - 0x8];
439#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
440 /** @} */
441
442
443 /** The flags of the current exception / interrupt. */
444 uint32_t fCurXcpt; /* 0x48, 0x48 */
445 /** The current exception / interrupt. */
446 uint8_t uCurXcpt;
447 /** Exception / interrupt recursion depth. */
448 int8_t cXcptRecursions;
449
450 /** The number of active guest memory mappings. */
451 uint8_t cActiveMappings;
452 /** The next unused mapping index. */
453 uint8_t iNextMapping;
454 /** Records for tracking guest memory mappings. */
455 struct
456 {
457 /** The address of the mapped bytes. */
458 void *pv;
459 /** The access flags (IEM_ACCESS_XXX).
460 * IEM_ACCESS_INVALID if the entry is unused. */
461 uint32_t fAccess;
462#if HC_ARCH_BITS == 64
463 uint32_t u32Alignment4; /**< Alignment padding. */
464#endif
465 } aMemMappings[3];
466
467 /** Locking records for the mapped memory. */
468 union
469 {
470 PGMPAGEMAPLOCK Lock;
471 uint64_t au64Padding[2];
472 } aMemMappingLocks[3];
473
474 /** Bounce buffer info.
475 * This runs in parallel to aMemMappings. */
476 struct
477 {
478 /** The physical address of the first byte. */
479 RTGCPHYS GCPhysFirst;
480 /** The physical address of the second page. */
481 RTGCPHYS GCPhysSecond;
482 /** The number of bytes in the first page. */
483 uint16_t cbFirst;
484 /** The number of bytes in the second page. */
485 uint16_t cbSecond;
486 /** Whether it's unassigned memory. */
487 bool fUnassigned;
488 /** Explicit alignment padding. */
489 bool afAlignment5[3];
490 } aMemBbMappings[3];
491
492 /* Ensure that aBounceBuffers are aligned at a 32 byte boundrary. */
493 uint64_t abAlignment7[1];
494
495 /** Bounce buffer storage.
496 * This runs in parallel to aMemMappings and aMemBbMappings. */
497 struct
498 {
499 uint8_t ab[512];
500 } aBounceBuffers[3];
501
502
503 /** Pointer set jump buffer - ring-3 context. */
504 R3PTRTYPE(jmp_buf *) pJmpBufR3;
505
506 /** The error code for the current exception / interrupt. */
507 uint32_t uCurXcptErr;
508
509 /** @name Statistics
510 * @{ */
511 /** The number of instructions we've executed. */
512 uint32_t cInstructions;
513 /** The number of potential exits. */
514 uint32_t cPotentialExits;
515 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
516 uint32_t cRetInstrNotImplemented;
517 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
518 uint32_t cRetAspectNotImplemented;
519 /** Counts informational statuses returned (other than VINF_SUCCESS). */
520 uint32_t cRetInfStatuses;
521 /** Counts other error statuses returned. */
522 uint32_t cRetErrStatuses;
523 /** Number of times rcPassUp has been used. */
524 uint32_t cRetPassUpStatus;
525 /** Number of times RZ left with instruction commit pending for ring-3. */
526 uint32_t cPendingCommit;
527 /** Number of long jumps. */
528 uint32_t cLongJumps;
529 /** @} */
530
531 /** @name Target CPU information.
532 * @{ */
533#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
534 /** The target CPU. */
535 uint8_t uTargetCpu;
536#else
537 uint8_t bTargetCpuPadding;
538#endif
539 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
540 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
541 * native host support and the 2nd for when there is.
542 *
543 * The two values are typically indexed by a g_CpumHostFeatures bit.
544 *
545 * This is for instance used for the BSF & BSR instructions where AMD and
546 * Intel CPUs produce different EFLAGS. */
547 uint8_t aidxTargetCpuEflFlavour[2];
548
549 uint8_t bPadding[5];
550
551 /** The CPU vendor. */
552 CPUMCPUVENDOR enmCpuVendor;
553 /** @} */
554
555 /** @name Host CPU information.
556 * @{ */
557 /** The CPU vendor. */
558 CPUMCPUVENDOR enmHostCpuVendor;
559 /** @} */
560
561 /** Data TLB.
562 * @remarks Must be 64-byte aligned. */
563 IEMTLB DataTlb;
564 /** Instruction TLB.
565 * @remarks Must be 64-byte aligned. */
566 IEMTLB CodeTlb;
567
568 /** Exception statistics. */
569 STAMCOUNTER aStatXcpts[32];
570 /** Interrupt statistics. */
571 uint32_t aStatInts[256];
572
573#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
574 /** Instruction statistics for ring-3. */
575 IEMINSTRSTATS StatsR3;
576#endif
577} IEMCPU;
578AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48);
579AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 8);
580AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 16);
581AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 32);
582AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
583AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
584AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
585
586/** Pointer to the per-CPU IEM state. */
587typedef IEMCPU *PIEMCPU;
588/** Pointer to the const per-CPU IEM state. */
589typedef IEMCPU const *PCIEMCPU;
590
591
592/** @def IEM_GET_CTX
593 * Gets the guest CPU context for the calling EMT.
594 * @returns PCPUMCTX
595 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
596 */
597#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
598
599/** @def IEM_CTX_ASSERT
600 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
601 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
602 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
603 */
604#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
605 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", (a_pVCpu)->cpum.GstCtx.fExtrn, \
606 (a_fExtrnMbz)))
607
608/** @def IEM_CTX_IMPORT_RET
609 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
610 *
611 * Will call the keep to import the bits as needed.
612 *
613 * Returns on import failure.
614 *
615 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
616 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
617 */
618#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
619 do { \
620 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
621 { /* likely */ } \
622 else \
623 { \
624 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
625 AssertRCReturn(rcCtxImport, rcCtxImport); \
626 } \
627 } while (0)
628
629/** @def IEM_CTX_IMPORT_NORET
630 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
631 *
632 * Will call the keep to import the bits as needed.
633 *
634 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
635 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
636 */
637#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
638 do { \
639 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
640 { /* likely */ } \
641 else \
642 { \
643 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
644 AssertLogRelRC(rcCtxImport); \
645 } \
646 } while (0)
647
648/** @def IEM_CTX_IMPORT_JMP
649 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
650 *
651 * Will call the keep to import the bits as needed.
652 *
653 * Jumps on import failure.
654 *
655 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
656 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
657 */
658#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
659 do { \
660 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
661 { /* likely */ } \
662 else \
663 { \
664 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
665 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
666 } \
667 } while (0)
668
669
670
671/** @def IEM_GET_TARGET_CPU
672 * Gets the current IEMTARGETCPU value.
673 * @returns IEMTARGETCPU value.
674 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
675 */
676#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
677# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
678#else
679# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
680#endif
681
682/** @def IEM_GET_INSTR_LEN
683 * Gets the instruction length. */
684/** @todo Thumb mode. */
685#ifdef IEM_WITH_CODE_TLB
686# define IEM_GET_INSTR_LEN(a_pVCpu) (sizeof(uint32_t))
687#else
688# define IEM_GET_INSTR_LEN(a_pVCpu) (sizeof(uint32_t))
689#endif
690
691
692/**
693 * Shared per-VM IEM data.
694 */
695typedef struct IEM
696{
697 uint8_t bDummy;
698} IEM;
699
700
701
702/** @name IEM_ACCESS_XXX - Access details.
703 * @{ */
704#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
705#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
706#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
707#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
708#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
709#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
710#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
711#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
712#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
713#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
714/** The writes are partial, so if initialize the bounce buffer with the
715 * orignal RAM content. */
716#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
717/** Used in aMemMappings to indicate that the entry is bounce buffered. */
718#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
719/** Bounce buffer with ring-3 write pending, first page. */
720#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
721/** Bounce buffer with ring-3 write pending, second page. */
722#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
723/** Not locked, accessed via the TLB. */
724#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
725/** Valid bit mask. */
726#define IEM_ACCESS_VALID_MASK UINT32_C(0x00001fff)
727/** Shift count for the TLB flags (upper word). */
728#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
729
730/** Read+write data alias. */
731#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
732/** Write data alias. */
733#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
734/** Read data alias. */
735#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
736/** Instruction fetch alias. */
737#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
738/** Stack write alias. */
739#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
740/** Stack read alias. */
741#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
742/** Stack read+write alias. */
743#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
744/** Read system table alias. */
745#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
746/** Read+write system table alias. */
747#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
748/** @} */
749
750/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
751#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
752
753/** @def IEM_DECL_IMPL_TYPE
754 * For typedef'ing an instruction implementation function.
755 *
756 * @param a_RetType The return type.
757 * @param a_Name The name of the type.
758 * @param a_ArgList The argument list enclosed in parentheses.
759 */
760
761/** @def IEM_DECL_IMPL_DEF
762 * For defining an instruction implementation function.
763 *
764 * @param a_RetType The return type.
765 * @param a_Name The name of the type.
766 * @param a_ArgList The argument list enclosed in parentheses.
767 */
768
769#if __cplusplus >= 201700 /* P0012R1 support */
770# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
771 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
772# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
773 a_RetType VBOXCALL a_Name a_ArgList RT_NOEXCEPT
774# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
775 a_RetType VBOXCALL a_Name a_ArgList RT_NOEXCEPT
776
777#else
778# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
779 a_RetType (VBOXCALL a_Name) a_ArgList
780# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
781 a_RetType VBOXCALL a_Name a_ArgList
782# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
783 a_RetType VBOXCALL a_Name a_ArgList
784
785#endif
786
787/** @name C instruction implementations for anything slightly complicated.
788 * @{ */
789
790/**
791 * For typedef'ing or declaring a C instruction implementation function taking
792 * no extra arguments.
793 *
794 * @param a_Name The name of the type.
795 */
796# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
797 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
798/**
799 * For defining a C instruction implementation function taking no extra
800 * arguments.
801 *
802 * @param a_Name The name of the function
803 */
804# define IEM_CIMPL_DEF_0(a_Name) \
805 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
806/**
807 * Prototype version of IEM_CIMPL_DEF_0.
808 */
809# define IEM_CIMPL_PROTO_0(a_Name) \
810 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
811/**
812 * For calling a C instruction implementation function taking no extra
813 * arguments.
814 *
815 * This special call macro adds default arguments to the call and allow us to
816 * change these later.
817 *
818 * @param a_fn The name of the function.
819 */
820# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
821
822/**
823 * For typedef'ing or declaring a C instruction implementation function taking
824 * one extra argument.
825 *
826 * @param a_Name The name of the type.
827 * @param a_Type0 The argument type.
828 * @param a_Arg0 The argument name.
829 */
830# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
831 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
832/**
833 * For defining a C instruction implementation function taking one extra
834 * argument.
835 *
836 * @param a_Name The name of the function
837 * @param a_Type0 The argument type.
838 * @param a_Arg0 The argument name.
839 */
840# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
841 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
842/**
843 * Prototype version of IEM_CIMPL_DEF_1.
844 */
845# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
846 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
847/**
848 * For calling a C instruction implementation function taking one extra
849 * argument.
850 *
851 * This special call macro adds default arguments to the call and allow us to
852 * change these later.
853 *
854 * @param a_fn The name of the function.
855 * @param a0 The name of the 1st argument.
856 */
857# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
858
859/**
860 * For typedef'ing or declaring a C instruction implementation function taking
861 * two extra arguments.
862 *
863 * @param a_Name The name of the type.
864 * @param a_Type0 The type of the 1st argument
865 * @param a_Arg0 The name of the 1st argument.
866 * @param a_Type1 The type of the 2nd argument.
867 * @param a_Arg1 The name of the 2nd argument.
868 */
869# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
870 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
871/**
872 * For defining a C instruction implementation function taking two extra
873 * arguments.
874 *
875 * @param a_Name The name of the function.
876 * @param a_Type0 The type of the 1st argument
877 * @param a_Arg0 The name of the 1st argument.
878 * @param a_Type1 The type of the 2nd argument.
879 * @param a_Arg1 The name of the 2nd argument.
880 */
881# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
882 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
883/**
884 * Prototype version of IEM_CIMPL_DEF_2.
885 */
886# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
887 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
888/**
889 * For calling a C instruction implementation function taking two extra
890 * arguments.
891 *
892 * This special call macro adds default arguments to the call and allow us to
893 * change these later.
894 *
895 * @param a_fn The name of the function.
896 * @param a0 The name of the 1st argument.
897 * @param a1 The name of the 2nd argument.
898 */
899# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
900
901/**
902 * For typedef'ing or declaring a C instruction implementation function taking
903 * three extra arguments.
904 *
905 * @param a_Name The name of the type.
906 * @param a_Type0 The type of the 1st argument
907 * @param a_Arg0 The name of the 1st argument.
908 * @param a_Type1 The type of the 2nd argument.
909 * @param a_Arg1 The name of the 2nd argument.
910 * @param a_Type2 The type of the 3rd argument.
911 * @param a_Arg2 The name of the 3rd argument.
912 */
913# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
914 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
915/**
916 * For defining a C instruction implementation function taking three extra
917 * arguments.
918 *
919 * @param a_Name The name of the function.
920 * @param a_Type0 The type of the 1st argument
921 * @param a_Arg0 The name of the 1st argument.
922 * @param a_Type1 The type of the 2nd argument.
923 * @param a_Arg1 The name of the 2nd argument.
924 * @param a_Type2 The type of the 3rd argument.
925 * @param a_Arg2 The name of the 3rd argument.
926 */
927# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
928 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
929/**
930 * Prototype version of IEM_CIMPL_DEF_3.
931 */
932# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
933 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
934/**
935 * For calling a C instruction implementation function taking three extra
936 * arguments.
937 *
938 * This special call macro adds default arguments to the call and allow us to
939 * change these later.
940 *
941 * @param a_fn The name of the function.
942 * @param a0 The name of the 1st argument.
943 * @param a1 The name of the 2nd argument.
944 * @param a2 The name of the 3rd argument.
945 */
946# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
947
948
949/**
950 * For typedef'ing or declaring a C instruction implementation function taking
951 * four extra arguments.
952 *
953 * @param a_Name The name of the type.
954 * @param a_Type0 The type of the 1st argument
955 * @param a_Arg0 The name of the 1st argument.
956 * @param a_Type1 The type of the 2nd argument.
957 * @param a_Arg1 The name of the 2nd argument.
958 * @param a_Type2 The type of the 3rd argument.
959 * @param a_Arg2 The name of the 3rd argument.
960 * @param a_Type3 The type of the 4th argument.
961 * @param a_Arg3 The name of the 4th argument.
962 */
963# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
964 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
965/**
966 * For defining a C instruction implementation function taking four extra
967 * arguments.
968 *
969 * @param a_Name The name of the function.
970 * @param a_Type0 The type of the 1st argument
971 * @param a_Arg0 The name of the 1st argument.
972 * @param a_Type1 The type of the 2nd argument.
973 * @param a_Arg1 The name of the 2nd argument.
974 * @param a_Type2 The type of the 3rd argument.
975 * @param a_Arg2 The name of the 3rd argument.
976 * @param a_Type3 The type of the 4th argument.
977 * @param a_Arg3 The name of the 4th argument.
978 */
979# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
980 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
981 a_Type2 a_Arg2, a_Type3 a_Arg3))
982/**
983 * Prototype version of IEM_CIMPL_DEF_4.
984 */
985# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
986 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
987 a_Type2 a_Arg2, a_Type3 a_Arg3))
988/**
989 * For calling a C instruction implementation function taking four extra
990 * arguments.
991 *
992 * This special call macro adds default arguments to the call and allow us to
993 * change these later.
994 *
995 * @param a_fn The name of the function.
996 * @param a0 The name of the 1st argument.
997 * @param a1 The name of the 2nd argument.
998 * @param a2 The name of the 3rd argument.
999 * @param a3 The name of the 4th argument.
1000 */
1001# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
1002
1003
1004/**
1005 * For typedef'ing or declaring a C instruction implementation function taking
1006 * five extra arguments.
1007 *
1008 * @param a_Name The name of the type.
1009 * @param a_Type0 The type of the 1st argument
1010 * @param a_Arg0 The name of the 1st argument.
1011 * @param a_Type1 The type of the 2nd argument.
1012 * @param a_Arg1 The name of the 2nd argument.
1013 * @param a_Type2 The type of the 3rd argument.
1014 * @param a_Arg2 The name of the 3rd argument.
1015 * @param a_Type3 The type of the 4th argument.
1016 * @param a_Arg3 The name of the 4th argument.
1017 * @param a_Type4 The type of the 5th argument.
1018 * @param a_Arg4 The name of the 5th argument.
1019 */
1020# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
1021 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
1022 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
1023 a_Type3 a_Arg3, a_Type4 a_Arg4))
1024/**
1025 * For defining a C instruction implementation function taking five extra
1026 * arguments.
1027 *
1028 * @param a_Name The name of the function.
1029 * @param a_Type0 The type of the 1st argument
1030 * @param a_Arg0 The name of the 1st argument.
1031 * @param a_Type1 The type of the 2nd argument.
1032 * @param a_Arg1 The name of the 2nd argument.
1033 * @param a_Type2 The type of the 3rd argument.
1034 * @param a_Arg2 The name of the 3rd argument.
1035 * @param a_Type3 The type of the 4th argument.
1036 * @param a_Arg3 The name of the 4th argument.
1037 * @param a_Type4 The type of the 5th argument.
1038 * @param a_Arg4 The name of the 5th argument.
1039 */
1040# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
1041 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
1042 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
1043/**
1044 * Prototype version of IEM_CIMPL_DEF_5.
1045 */
1046# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
1047 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
1048 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
1049/**
1050 * For calling a C instruction implementation function taking five extra
1051 * arguments.
1052 *
1053 * This special call macro adds default arguments to the call and allow us to
1054 * change these later.
1055 *
1056 * @param a_fn The name of the function.
1057 * @param a0 The name of the 1st argument.
1058 * @param a1 The name of the 2nd argument.
1059 * @param a2 The name of the 3rd argument.
1060 * @param a3 The name of the 4th argument.
1061 * @param a4 The name of the 5th argument.
1062 */
1063# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
1064
1065/** @} */
1066
1067
1068/** @name Opcode Decoder Function Types.
1069 * @{ */
1070
1071# if 0 /** @todo r=bird: This upsets doxygen. Generally, these macros and types probably won't change with the target arch.
1072 * Nor will probably the TLB definitions. So, we need some better splitting of this code. */
1073/** @typedef PFNIEMOP
1074 * Pointer to an opcode decoder function.
1075 */
1076
1077/** @def FNIEMOP_DEF
1078 * Define an opcode decoder function.
1079 *
1080 * We're using macors for this so that adding and removing parameters as well as
1081 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
1082 *
1083 * @param a_Name The function name.
1084 */
1085#endif
1086
1087#if defined(__GNUC__) && defined(RT_ARCH_X86)
1088typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
1089# define FNIEMOP_DEF(a_Name) \
1090 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
1091# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1092 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
1093# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1094 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
1095
1096#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
1097typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
1098# define FNIEMOP_DEF(a_Name) \
1099 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1100# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1101 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
1102# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1103 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
1104
1105#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
1106typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
1107# define FNIEMOP_DEF(a_Name) \
1108 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
1109# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1110 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
1111# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1112 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
1113
1114#else
1115typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
1116# define FNIEMOP_DEF(a_Name) \
1117 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1118# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
1119 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
1120# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
1121 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
1122
1123#endif
1124
1125/**
1126 * Call an opcode decoder function.
1127 *
1128 * We're using macors for this so that adding and removing parameters can be
1129 * done as we please. See FNIEMOP_DEF.
1130 */
1131#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
1132
1133/**
1134 * Call a common opcode decoder function taking one extra argument.
1135 *
1136 * We're using macors for this so that adding and removing parameters can be
1137 * done as we please. See FNIEMOP_DEF_1.
1138 */
1139#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
1140
1141/**
1142 * Call a common opcode decoder function taking one extra argument.
1143 *
1144 * We're using macors for this so that adding and removing parameters can be
1145 * done as we please. See FNIEMOP_DEF_1.
1146 */
1147#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
1148/** @} */
1149
1150
1151/** @name Misc Helpers
1152 * @{ */
1153
1154/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
1155 * due to GCC lacking knowledge about the value range of a switch. */
1156#if RT_CPLUSPLUS_PREREQ(202000)
1157# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
1158#else
1159# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
1160#endif
1161
1162/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
1163#if RT_CPLUSPLUS_PREREQ(202000)
1164# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
1165#else
1166# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
1167#endif
1168
1169/**
1170 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
1171 * occation.
1172 */
1173#ifdef LOG_ENABLED
1174# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
1175 do { \
1176 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
1177 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
1178 } while (0)
1179#else
1180# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
1181 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
1182#endif
1183
1184/**
1185 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
1186 * occation using the supplied logger statement.
1187 *
1188 * @param a_LoggerArgs What to log on failure.
1189 */
1190#ifdef LOG_ENABLED
1191# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
1192 do { \
1193 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
1194 /*LogFunc(a_LoggerArgs);*/ \
1195 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
1196 } while (0)
1197#else
1198# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
1199 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
1200#endif
1201
1202/** @} */
1203
1204void iemInitPendingBreakpointsSlow(PVMCPUCC pVCpu);
1205
1206
1207/** @name Raising Exceptions.
1208 * @{ */
1209VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
1210 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
1211DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
1212 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
1213VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
1214VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
1215VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
1216VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
1217DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
1218VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
1219VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
1220DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
1221VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
1222
1223IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
1224IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
1225
1226/**
1227 * Macro for calling iemCImplRaiseDivideError().
1228 *
1229 * This enables us to add/remove arguments and force different levels of
1230 * inlining as we wish.
1231 *
1232 * @return Strict VBox status code.
1233 */
1234#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(iemCImplRaiseDivideError)
1235
1236/**
1237 * Macro for calling iemCImplRaiseInvalidOpcode().
1238 *
1239 * This enables us to add/remove arguments and force different levels of
1240 * inlining as we wish.
1241 *
1242 * @return Strict VBox status code.
1243 */
1244#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(iemCImplRaiseInvalidOpcode)
1245/** @} */
1246
1247/** @name Memory access.
1248 * @{ */
1249
1250VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
1251 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
1252VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
1253#ifndef IN_RING3
1254VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
1255#endif
1256void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
1257VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
1258VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
1259VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
1260
1261VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1262VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1263VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1264VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1265VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1266VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1267VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1268VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1269VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1270uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1271uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1272uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1273uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1274void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1275void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1276void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1277void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
1278
1279VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1280VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1281VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1282VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
1283
1284VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
1285VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
1286VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
1287VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
1288VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
1289VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
1290void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
1291void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
1292void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
1293void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
1294void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
1295void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
1296
1297VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
1298 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
1299VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
1300VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
1301VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
1302VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
1303VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1304VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1305VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1306VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
1307VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
1308 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
1309VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
1310 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT;
1311VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
1312VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
1313VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
1314VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
1315VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1316VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1317VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
1318/** @} */
1319
1320/** @} */
1321
1322RT_C_DECLS_END
1323
1324#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_armv8_h */
1325
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette