VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMN8veRecompilerTlbLookup.h@ 105167

最後變更 在這個檔案從105167是 105167,由 vboxsync 提交於 9 月 前

VMM/IEM: Fixed incorrect type for the fAlignMaskAndClt parameter of iemNativeEmitTlbLookup(), introduced in r163594. bugref:10687

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 60.4 KB
 
1/* $Id: IEMN8veRecompilerTlbLookup.h 105167 2024-07-05 21:33:33Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Native Recompiler TLB Lookup Code Emitter.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompilerTlbLookup_h
29#define VMM_INCLUDED_SRC_include_IEMN8veRecompilerTlbLookup_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include "IEMN8veRecompiler.h"
35#include "IEMN8veRecompilerEmit.h"
36
37
38/** @defgroup grp_iem_n8ve_re_tlblookup Native Recompiler TLB Lookup Code Emitter
39 * @ingroup grp_iem_n8ve_re
40 * @{
41 */
42
43/*
44 * TLB Lookup config.
45 */
46#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_ARM64)
47# define IEMNATIVE_WITH_TLB_LOOKUP
48#endif
49#ifdef IEMNATIVE_WITH_TLB_LOOKUP
50# define IEMNATIVE_WITH_TLB_LOOKUP_FETCH
51#endif
52#ifdef IEMNATIVE_WITH_TLB_LOOKUP
53# define IEMNATIVE_WITH_TLB_LOOKUP_STORE
54#endif
55#ifdef IEMNATIVE_WITH_TLB_LOOKUP
56# define IEMNATIVE_WITH_TLB_LOOKUP_MAPPED
57#endif
58#ifdef IEMNATIVE_WITH_TLB_LOOKUP
59# define IEMNATIVE_WITH_TLB_LOOKUP_PUSH
60#endif
61#ifdef IEMNATIVE_WITH_TLB_LOOKUP
62# define IEMNATIVE_WITH_TLB_LOOKUP_POP
63#endif
64
65
66/**
67 * This must be instantiate *before* branching off to the lookup code,
68 * so that register spilling and whatnot happens for everyone.
69 */
70typedef struct IEMNATIVEEMITTLBSTATE
71{
72 bool const fSkip;
73 uint8_t const idxRegPtrHlp; /**< We don't support immediate variables with register assignment, so this a tmp reg alloc. */
74 uint8_t const idxRegPtr;
75 uint8_t const idxRegSegBase;
76 uint8_t const idxRegSegLimit;
77 uint8_t const idxRegSegAttrib;
78 uint8_t const idxReg1;
79 uint8_t const idxReg2;
80#if defined(RT_ARCH_ARM64)
81 uint8_t const idxReg3;
82/** @def IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
83 * Use LDP and STDP to reduce number of instructions accessing memory at the
84 * cost of using more registers. This will typically reduce the number of
85 * instructions emitted as well.
86 * @todo Profile this and ensure that it performs the same or better.
87 */
88# define IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
89# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
90 uint8_t const idxReg4;
91 uint8_t const idxReg5;
92# endif
93#endif
94 uint64_t const uAbsPtr;
95
96 IEMNATIVEEMITTLBSTATE(PIEMRECOMPILERSTATE a_pReNative, uint32_t *a_poff, uint8_t a_idxVarGCPtrMem,
97 uint8_t a_iSegReg, uint8_t a_cbMem, uint8_t a_offDisp = 0)
98#ifdef IEMNATIVE_WITH_TLB_LOOKUP
99 /* 32-bit and 64-bit wraparound will require special handling, so skip these for absolute addresses. */
100 : fSkip( a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].enmKind
101 == kIemNativeVarKind_Immediate
102 && ( (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT
103 ? (uint64_t)(UINT32_MAX - a_cbMem - a_offDisp)
104 : (uint64_t)(UINT64_MAX - a_cbMem - a_offDisp))
105 < a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].u.uValue)
106#else
107 : fSkip(true)
108#endif
109#if defined(RT_ARCH_AMD64) /* got good immediate encoding, otherwise we just load the address in a reg immediately. */
110 , idxRegPtrHlp(UINT8_MAX)
111#else
112 , idxRegPtrHlp( a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].enmKind
113 != kIemNativeVarKind_Immediate
114 || fSkip
115 ? UINT8_MAX
116 : iemNativeRegAllocTmpImm(a_pReNative, a_poff,
117 a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].u.uValue))
118#endif
119 , idxRegPtr( a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].enmKind
120 != kIemNativeVarKind_Immediate
121 && !fSkip
122 ? iemNativeVarRegisterAcquire(a_pReNative, a_idxVarGCPtrMem, a_poff,
123 true /*fInitialized*/, IEMNATIVE_CALL_ARG2_GREG)
124 : idxRegPtrHlp)
125 , idxRegSegBase(a_iSegReg == UINT8_MAX || fSkip
126 ? UINT8_MAX
127 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(a_iSegReg)))
128 , idxRegSegLimit((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
129 ? UINT8_MAX
130 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg)))
131 , idxRegSegAttrib((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
132 ? UINT8_MAX
133 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg)))
134 , idxReg1(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
135 , idxReg2(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
136#if defined(RT_ARCH_ARM64)
137 , idxReg3(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
138# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
139 , idxReg4(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
140 , idxReg5(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
141# endif
142#endif
143 , uAbsPtr( a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].enmKind
144 != kIemNativeVarKind_Immediate
145 || fSkip
146 ? UINT64_MAX
147 : a_pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(a_idxVarGCPtrMem)].u.uValue)
148
149 {
150 RT_NOREF(a_cbMem, a_offDisp);
151 }
152
153 /* Alternative constructor for PUSH and POP where we don't have a GCPtrMem
154 variable, only a register derived from the guest RSP. */
155 IEMNATIVEEMITTLBSTATE(PIEMRECOMPILERSTATE a_pReNative, uint8_t a_idxRegPtr, uint32_t *a_poff,
156 uint8_t a_iSegReg, uint8_t a_cbMem)
157#ifdef IEMNATIVE_WITH_TLB_LOOKUP
158 : fSkip(false)
159#else
160 : fSkip(true)
161#endif
162 , idxRegPtrHlp(UINT8_MAX)
163 , idxRegPtr(a_idxRegPtr)
164 , idxRegSegBase(a_iSegReg == UINT8_MAX || fSkip
165 ? UINT8_MAX
166 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(a_iSegReg)))
167 , idxRegSegLimit((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
168 ? UINT8_MAX
169 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg)))
170 , idxRegSegAttrib((a_iSegReg == UINT8_MAX || (a_pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT) || fSkip
171 ? UINT8_MAX
172 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_ATTRIB(a_iSegReg)))
173 , idxReg1(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
174 , idxReg2(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
175#if defined(RT_ARCH_ARM64)
176 , idxReg3(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
177# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
178 , idxReg4(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
179 , idxReg5(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
180# endif
181#endif
182 , uAbsPtr(UINT64_MAX)
183
184 {
185 RT_NOREF_PV(a_cbMem);
186 }
187
188 /* Alternative constructor for the code TLB lookups where we implictly use RIP
189 variable, only a register derived from the guest RSP. */
190 IEMNATIVEEMITTLBSTATE(PIEMRECOMPILERSTATE a_pReNative, bool a_fFlat, uint32_t *a_poff)
191#ifdef IEMNATIVE_WITH_TLB_LOOKUP
192 : fSkip(false)
193#else
194 : fSkip(true)
195#endif
196 , idxRegPtrHlp(UINT8_MAX)
197 , idxRegPtr(iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, kIemNativeGstReg_Pc))
198 , idxRegSegBase(a_fFlat || fSkip
199 ? UINT8_MAX
200 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS)))
201 , idxRegSegLimit(/*a_fFlat || fSkip
202 ? UINT8_MAX
203 : iemNativeRegAllocTmpForGuestReg(a_pReNative, a_poff, IEMNATIVEGSTREG_SEG_LIMIT(X86_SREG_CS))*/
204 UINT8_MAX)
205 , idxRegSegAttrib(UINT8_MAX)
206 , idxReg1(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
207 , idxReg2(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
208#if defined(RT_ARCH_ARM64)
209 , idxReg3(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
210# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
211 , idxReg4(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
212 , idxReg5(!fSkip ? iemNativeRegAllocTmp(a_pReNative, a_poff) : UINT8_MAX)
213# endif
214#endif
215 , uAbsPtr(UINT64_MAX)
216
217 {
218 }
219
220 void freeRegsAndReleaseVars(PIEMRECOMPILERSTATE a_pReNative, uint8_t idxVarGCPtrMem = UINT8_MAX, bool fIsCode = false) const
221 {
222 if (!fIsCode)
223 {
224 if (idxRegPtr != UINT8_MAX)
225 {
226 if (idxRegPtrHlp == UINT8_MAX)
227 {
228 if (idxVarGCPtrMem != UINT8_MAX)
229 iemNativeVarRegisterRelease(a_pReNative, idxVarGCPtrMem);
230 }
231 else
232 {
233 Assert(idxRegPtrHlp == idxRegPtr);
234 iemNativeRegFreeTmpImm(a_pReNative, idxRegPtrHlp);
235 }
236 }
237 else
238 Assert(idxRegPtrHlp == UINT8_MAX);
239 }
240 else
241 {
242 Assert(idxVarGCPtrMem == UINT8_MAX);
243 Assert(idxRegPtrHlp == UINT8_MAX);
244 iemNativeRegFreeTmp(a_pReNative, idxRegPtr); /* RIP */
245 }
246 if (idxRegSegBase != UINT8_MAX)
247 iemNativeRegFreeTmp(a_pReNative, idxRegSegBase);
248 if (idxRegSegLimit != UINT8_MAX)
249 iemNativeRegFreeTmp(a_pReNative, idxRegSegLimit);
250 if (idxRegSegAttrib != UINT8_MAX)
251 iemNativeRegFreeTmp(a_pReNative, idxRegSegAttrib);
252#if defined(RT_ARCH_ARM64)
253# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
254 iemNativeRegFreeTmp(a_pReNative, idxReg5);
255 iemNativeRegFreeTmp(a_pReNative, idxReg4);
256# endif
257 iemNativeRegFreeTmp(a_pReNative, idxReg3);
258#endif
259 iemNativeRegFreeTmp(a_pReNative, idxReg2);
260 iemNativeRegFreeTmp(a_pReNative, idxReg1);
261
262 }
263
264 uint32_t getRegsNotToSave() const
265 {
266 if (!fSkip)
267 return RT_BIT_32(idxReg1)
268 | RT_BIT_32(idxReg2)
269#if defined(RT_ARCH_ARM64)
270 | RT_BIT_32(idxReg3)
271# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
272 | RT_BIT_32(idxReg4)
273 | RT_BIT_32(idxReg5)
274# endif
275#endif
276 ;
277 return 0;
278 }
279
280 /** This is only for avoid assertions. */
281 uint32_t getActiveRegsWithShadows(bool fCode = false) const
282 {
283#ifdef VBOX_STRICT
284 if (!fSkip)
285 return (idxRegSegBase != UINT8_MAX ? RT_BIT_32(idxRegSegBase) : 0)
286 | (idxRegSegLimit != UINT8_MAX ? RT_BIT_32(idxRegSegLimit) : 0)
287 | (idxRegSegAttrib != UINT8_MAX ? RT_BIT_32(idxRegSegAttrib) : 0)
288 | (fCode ? RT_BIT_32(idxRegPtr) : 0);
289#else
290 RT_NOREF_PV(fCode);
291#endif
292 return 0;
293 }
294} IEMNATIVEEMITTLBSTATE;
295
296DECLASM(void) iemNativeHlpAsmSafeWrapCheckTlbLookup(void);
297
298
299#ifdef IEMNATIVE_WITH_TLB_LOOKUP
300/**
301 *
302 * @returns New @a off value.
303 * @param pReNative .
304 * @param off .
305 * @param pTlbState .
306 * @param iSegReg .
307 * @param cbMem .
308 * @param fAlignMaskAndCtl The low 8-bit is the alignment mask, ie. a
309 * 128-bit aligned access passes 15. This is only
310 * applied to ring-3 code, when dictated by the
311 * control bits and for atomic accesses.
312 *
313 * The other bits are used for alignment control:
314 * - IEM_MEMMAP_F_ALIGN_GP
315 * - IEM_MEMMAP_F_ALIGN_SSE
316 * - IEM_MEMMAP_F_ALIGN_GP_OR_AC
317 * Any non-zero upper bits means we will go to
318 * tlbmiss on anything out of alignment according
319 * to the mask in the low 8 bits.
320 * @param fAccess .
321 * @param idxLabelTlbLookup .
322 * @param idxLabelTlbMiss .
323 * @param idxRegMemResult .
324 * @param offDisp .
325 * @tparam a_fDataTlb .
326 * @tparam a_fNoReturn .
327 */
328template<bool const a_fDataTlb, bool const a_fNoReturn = false>
329DECL_INLINE_THROW(uint32_t)
330iemNativeEmitTlbLookup(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVEEMITTLBSTATE const * const pTlbState,
331 uint8_t iSegReg, uint8_t cbMem, uint32_t fAlignMaskAndCtl, uint32_t fAccess,
332 uint32_t idxLabelTlbLookup, uint32_t idxLabelTlbMiss, uint8_t idxRegMemResult,
333 uint8_t offDisp = 0)
334{
335 Assert(!pTlbState->fSkip);
336 uint32_t const offVCpuTlb = a_fDataTlb ? RT_UOFFSETOF(VMCPUCC, iem.s.DataTlb) : RT_UOFFSETOF(VMCPUCC, iem.s.CodeTlb);
337# if defined(RT_ARCH_AMD64)
338 uint8_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 512);
339# elif defined(RT_ARCH_ARM64)
340 uint32_t * const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 96);
341# endif
342
343 /*
344 * The expand down check isn't use all that much, so we emit here to keep
345 * the lookup straighter.
346 */
347 /* check_expand_down: ; complicted! */
348 uint32_t const offCheckExpandDown = off;
349 uint32_t offFixupLimitDone = 0;
350 if (a_fDataTlb && iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
351 {
352off = iemNativeEmitBrkEx(pCodeBuf, off, 1); /** @todo this needs testing */
353 /* cmp seglim, regptr */
354 if (pTlbState->idxRegPtr != UINT8_MAX && offDisp == 0)
355 off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, pTlbState->idxRegPtr);
356 else if (pTlbState->idxRegPtr == UINT8_MAX)
357 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxRegSegLimit,
358 (uint32_t)(pTlbState->uAbsPtr + offDisp));
359 else if (cbMem == 1)
360 off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, pTlbState->idxReg2);
361 else
362 { /* use idxRegMemResult to calc the displaced address. */
363 off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, idxRegMemResult, pTlbState->idxRegPtr, offDisp);
364 off = iemNativeEmitCmpGpr32WithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit, idxRegMemResult);
365 }
366 /* ja tlbmiss */
367 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
368
369 /* reg1 = segattr & X86DESCATTR_D (0x4000) */
370 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxRegSegAttrib, X86DESCATTR_D);
371 /* xor reg1, X86DESCATTR_D */
372 off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1, X86DESCATTR_D);
373 /* shl reg1, 2 (16 - 14) */
374 AssertCompile((X86DESCATTR_D << 2) == UINT32_C(0x10000));
375 off = iemNativeEmitShiftGpr32LeftEx(pCodeBuf, off, pTlbState->idxReg1, 2);
376 /* dec reg1 (=> 0xffff if D=0; 0xffffffff if D=1) */
377 off = iemNativeEmitSubGpr32ImmEx(pCodeBuf, off, pTlbState->idxReg1, 1);
378 /* cmp reg1, reg2 (64-bit) / imm (32-bit) */
379 if (pTlbState->idxRegPtr != UINT8_MAX)
380 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1,
381 cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr);
382 else
383 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1,
384 (uint32_t)(pTlbState->uAbsPtr + offDisp + cbMem - 1)); /* fSkip=true on overflow. */
385 /* jbe tlbmiss */
386 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_be);
387 /* jmp limitdone */
388 offFixupLimitDone = off;
389 off = iemNativeEmitJmpToFixedEx(pCodeBuf, off, off + 256 /* force near */);
390 }
391
392 /*
393 * Snippet for checking whether misaligned accesses are within the
394 * page (see step 2).
395 *
396 * This sequence is 1 instruction longer than the strict alignment test,
397 * and since most accesses are correctly aligned it is better to do it
398 * this way. Runs of r163597 seems to indicate there was a regression
399 * when placing this code in the main code flow.
400 */
401 uint8_t const idxRegFlatPtr = iSegReg != UINT8_MAX || pTlbState->idxRegPtr == UINT8_MAX || offDisp != 0
402 ? idxRegMemResult : pTlbState->idxRegPtr; /* (not immediately ready for tlblookup use) */
403 uint8_t const fAlignMask = a_fDataTlb ? (uint8_t)fAlignMaskAndCtl : 0;
404 if (a_fDataTlb)
405 {
406 Assert(!(fAlignMaskAndCtl & ~(UINT32_C(0xff) | IEM_MEMMAP_F_ALIGN_SSE | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_GP_OR_AC)));
407 Assert(RT_IS_POWER_OF_TWO(fAlignMask + 1U));
408 Assert(cbMem == fAlignMask + 1U || !(fAccess & IEM_ACCESS_ATOMIC));
409 Assert(cbMem < 128); /* alignment test assumptions */
410 }
411
412 uint32_t offMisalignedAccess = UINT32_MAX;
413 uint32_t offFixupMisalignedAccessJmpBack = UINT32_MAX;
414 if ( a_fDataTlb
415 && !(fAlignMaskAndCtl & ~UINT32_C(0xff))
416 && !(fAccess & IEM_ACCESS_ATOMIC)
417 && cbMem > 1
418 && RT_IS_POWER_OF_TWO(cbMem)
419 && !(pReNative->fExec & IEM_F_X86_AC))
420 {
421 /* tlbmisaligned: */
422 offMisalignedAccess = off;
423 /* reg1 = regflat & 0xfff */
424 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK);
425 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */
426 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - cbMem);
427 /* jbe short jmpback */
428 offFixupMisalignedAccessJmpBack = off;
429 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 256 /*near*/, kIemNativeInstrCond_be);
430# ifdef IEM_WITH_TLB_STATISTICS
431 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
432 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissCrossPage));
433# endif
434 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
435 }
436
437 /* The ODD TLB entry is checked last when CR4.PGE=0 or when not in ring-0. */
438 bool const fEvenFirst = (pReNative->fExec & IEM_F_X86_CPL_MASK) != 0
439 || !(pReNative->pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE);
440 bool const fIncCheckAltTlbe = (pReNative->fExec & IEM_F_X86_CPL_MASK) == 0;
441
442 /*
443 * Snippet for checking the alternative TLBE entry when CR4.PGE=1 and
444 * for doing statistics.
445 *
446 * This code assists step 3c, so look down there for register assignments.
447 */
448 /* checkalttlbe_and_missedtagstats: */
449 uint32_t const offCheckAltTlbeAndMissedTagStats = off;
450 uint32_t offFixupCheckAltTlbeJmpBack = UINT32_MAX / 2;
451 if (fIncCheckAltTlbe)
452 {
453# ifdef RT_ARCH_AMD64
454 /* Update pTlbe: reg2 = fEvenFirst ? reg2 + sizeof(IEMTLBENTRY) : reg2 - sizeof(IEMTLBENTRY); */
455 pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg2 < 8 ? 0 : X86_OP_REX_R | X86_OP_REX_B);
456 pCodeBuf[off++] = 0x8d; /* LEA r64,m64 */
457 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg2, pTlbState->idxReg2,
458 fEvenFirst ? (int32_t)sizeof(IEMTLBENTRY) : -(int32_t)sizeof(IEMTLBENTRY));
459
460 /* reg1 = reg1 & ~IEMTLB_REVISION_MASK; */
461 off = iemNativeEmitShiftGprLeftEx(pCodeBuf, off, pTlbState->idxReg1, 16 + GUEST_PAGE_SHIFT);
462 off = iemNativeEmitShiftGprRightEx(pCodeBuf, off, pTlbState->idxReg1, 16 + GUEST_PAGE_SHIFT);
463 /* or reg1, [qword pVCpu->iem.s.DataTlb.uTlbRevisionGlobal/uTlbRevision] */
464 pCodeBuf[off++] = pTlbState->idxReg1 < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
465 pCodeBuf[off++] = 0x0b; /* OR r64,r/m64 */
466 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, pTlbState->idxReg1,
467 fEvenFirst ? offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevisionGlobal)
468 : offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevision));
469
470 /* cmp reg1, [reg2] */
471 pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R) | (pTlbState->idxReg2 < 8 ? 0 : X86_OP_REX_B);
472 pCodeBuf[off++] = 0x3b;
473 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, uTag));
474
475# elif defined(RT_ARCH_ARM64)
476 /* reg3 = uTlbRevision/uTlbRevisionGlobal; (We've ditched reg4 already, so have to get it via pVCpu.) */
477 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg3,
478 fEvenFirst ? offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevisionGlobal)
479 : offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevision));
480
481 /* reg1 = reg1 & ~IEMTLB_REVISION_MASK; */
482 AssertCompile(UINT64_C(0x0000000fffffffff) == ~IEMTLB_REVISION_MASK);
483 Assert(Armv8A64ConvertImmRImmS2Mask64(0x63, 0) == ~IEMTLB_REVISION_MASK);
484 pCodeBuf[off++] = Armv8A64MkInstrAndImm(pTlbState->idxReg1, pTlbState->idxReg1, 0x63, 0);
485
486 /* reg1 |= reg3 (uTlbRevision/uTlbRevisionGlobal); */
487 pCodeBuf[off++] = Armv8A64MkInstrOrr(pTlbState->idxReg1, pTlbState->idxReg1, pTlbState->idxReg3);
488
489 /* reg2 = reg2 +/- sizeof(IEMTLBENTRY); via preindexing.
490 reg3 = uTag; [pair: reg4 = fFlagsAndPhysRev;] */
491 AssertCompileMemberOffset(IEMTLBENTRY, uTag, 0);
492# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
493 AssertCompileAdjacentMembers(IEMTLBENTRY, uTag, fFlagsAndPhysRev);
494 pCodeBuf[off++] = Armv8A64MkInstrLdPairGpr(pTlbState->idxReg3, pTlbState->idxReg4, pTlbState->idxReg2,
495 fEvenFirst ? (int)sizeof(IEMTLBENTRY) / 8 : -(int)sizeof(IEMTLBENTRY) / 8,
496 kArm64InstrStLdPairType_PreIndex);
497# else
498 pCodeBuf[off++] = Armv8A64MkInstrStrLdrPreIndex9(kArmv8A64InstrLdStType_Ld_Dword, pTlbState->idxReg3, pTlbState->idxReg2,
499 fEvenFirst ? (int)sizeof(IEMTLBENTRY) / 8 : -(int)sizeof(IEMTLBENTRY) / 8);
500# endif
501 /* cmp reg1, reg3; (uRev | Hash(FlatPtr), IEMTLBENTRY::uTag)*/
502 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
503
504# else
505# error "portme"
506# endif
507 /* je near jumpback_checkalttlbe */
508 offFixupCheckAltTlbeJmpBack = off;
509 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 256, kIemNativeInstrCond_e);
510 }
511
512# ifdef IEM_WITH_TLB_STATISTICS
513 /* inc stat */
514 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
515 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissTag));
516# endif
517# ifndef IEM_WITH_TLB_STATISTICS
518 if (fIncCheckAltTlbe)
519# endif
520 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
521 off = iemNativeEmitBrkEx(pCodeBuf, off, 0x7679);
522
523 /*
524 * tlblookup:
525 */
526 iemNativeLabelDefine(pReNative, idxLabelTlbLookup, off);
527# if defined(RT_ARCH_ARM64) && 0
528 off = iemNativeEmitBrkEx(pCodeBuf, off, 0);
529# endif
530
531 /*
532 * 1. Segmentation.
533 *
534 * 1a. Check segment limit and attributes if non-flat 32-bit code. This is complicated.
535 *
536 * This can be skipped for code TLB lookups because limit is checked by jmp, call,
537 * ret, and iret prior to making it. It is also checked by the helpers prior to
538 * doing TLB loading.
539 */
540 if (a_fDataTlb && iSegReg != UINT8_MAX && (pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) != IEMMODE_64BIT)
541 {
542 /* Check that we've got a segment loaded and that it allows the access.
543 For write access this means a writable data segment.
544 For read-only accesses this means a readable code segment or any data segment. */
545 if (fAccess & IEM_ACCESS_TYPE_WRITE)
546 {
547 uint32_t const fMustBe1 = X86DESCATTR_P | X86DESCATTR_DT | X86_SEL_TYPE_WRITE;
548 uint32_t const fMustBe0 = X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE;
549 /* reg1 = segattrs & (must1|must0) */
550 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,
551 pTlbState->idxRegSegAttrib, fMustBe1 | fMustBe0);
552 /* cmp reg1, must1 */
553 AssertCompile(fMustBe1 <= UINT16_MAX);
554 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, fMustBe1);
555 /* jne tlbmiss */
556 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
557 }
558 else
559 {
560 /* U | !P |!DT |!CD | RW |
561 16 | 8 | 4 | 3 | 1 |
562 -------------------------------
563 0 | 0 | 0 | 0 | 0 | execute-only code segment. - must be excluded
564 0 | 0 | 0 | 0 | 1 | execute-read code segment.
565 0 | 0 | 0 | 1 | 0 | read-only data segment.
566 0 | 0 | 0 | 1 | 1 | read-write data segment. - last valid combination
567 */
568 /* reg1 = segattrs & (relevant attributes) */
569 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxRegSegAttrib,
570 X86DESCATTR_UNUSABLE | X86DESCATTR_P | X86DESCATTR_DT
571 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE);
572 /* xor reg1, X86DESCATTR_P | X86DESCATTR_DT | X86_SEL_TYPE_CODE ; place C=1 RW=0 at the bottom & limit the range.
573 ; EO-code=0, ER-code=2, RO-data=8, RW-data=10 */
574#ifdef RT_ARCH_ARM64
575 off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1, X86DESCATTR_DT | X86_SEL_TYPE_CODE);
576 off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1, X86DESCATTR_P);
577#else
578 off = iemNativeEmitXorGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg1,
579 X86DESCATTR_P | X86DESCATTR_DT | X86_SEL_TYPE_CODE);
580#endif
581 /* sub reg1, X86_SEL_TYPE_WRITE ; EO-code=-2, ER-code=0, RO-data=6, RW-data=8 */
582 off = iemNativeEmitSubGpr32ImmEx(pCodeBuf, off, pTlbState->idxReg1, X86_SEL_TYPE_WRITE /* ER-code */);
583 /* cmp reg1, X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE */
584 AssertCompile(X86_SEL_TYPE_CODE == 8);
585 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, X86_SEL_TYPE_CODE);
586 /* ja tlbmiss */
587 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
588 }
589
590 /* If we're accessing more than one byte or if we're working with a non-zero offDisp,
591 put the last address we'll be accessing in idxReg2 (64-bit). */
592 if ((cbMem > 1 || offDisp != 0) && pTlbState->idxRegPtr != UINT8_MAX)
593 {
594 if (!offDisp)
595 /* reg2 = regptr + cbMem - 1; 64-bit result so we can fend of wraparounds/overflows. */
596 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ cbMem - 1);
597 else
598 {
599 /* reg2 = (uint32_t)(regptr + offDisp) + cbMem - 1;. */
600 off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off,
601 pTlbState->idxReg2,/*=*/ pTlbState->idxRegPtr,/*+*/ + offDisp);
602 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, pTlbState->idxReg2, cbMem - 1);
603 }
604 }
605
606 /*
607 * Check the limit. If this is a write access, we know that it's a
608 * data segment and includes the expand_down bit. For read-only accesses
609 * we need to check that code/data=0 and expanddown=1 before continuing.
610 */
611 if (fAccess & IEM_ACCESS_TYPE_WRITE)
612 {
613 /* test segattrs, X86_SEL_TYPE_DOWN */
614 AssertCompile(X86_SEL_TYPE_DOWN < 128);
615 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, pTlbState->idxRegSegAttrib, X86_SEL_TYPE_DOWN);
616 /* jnz check_expand_down */
617 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offCheckExpandDown, kIemNativeInstrCond_ne);
618 }
619 else
620 {
621 /* reg1 = segattr & (code | down) */
622 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,
623 pTlbState->idxRegSegAttrib, X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN);
624 /* cmp reg1, down */
625 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, X86_SEL_TYPE_DOWN);
626 /* je check_expand_down */
627 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offCheckExpandDown, kIemNativeInstrCond_e);
628 }
629
630 /* expand_up:
631 cmp seglim, regptr/reg2/imm */
632 if (pTlbState->idxRegPtr != UINT8_MAX)
633 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxRegSegLimit,
634 cbMem > 1 || offDisp != 0 ? pTlbState->idxReg2 : pTlbState->idxRegPtr);
635 else
636 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxRegSegLimit,
637 (uint32_t)pTlbState->uAbsPtr + offDisp + cbMem - 1U); /* fSkip=true on overflow. */
638 /* jbe tlbmiss */
639 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_be);
640
641 /* limitdone: */
642 iemNativeFixupFixedJump(pReNative, offFixupLimitDone, off);
643 }
644
645 /* 1b. Add the segment base. We use idxRegMemResult for the ptr register if
646 this step is required or if the address is a constant (simplicity) or
647 if offDisp is non-zero. */
648 if (iSegReg != UINT8_MAX)
649 {
650 Assert(idxRegFlatPtr != pTlbState->idxRegPtr);
651 /* regflat = segbase + regptr/imm */
652 if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
653 {
654 Assert(iSegReg >= X86_SREG_FS);
655 if (pTlbState->idxRegPtr != UINT8_MAX)
656 {
657 off = iemNativeEmitGprEqGprPlusGprEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegSegBase, pTlbState->idxRegPtr);
658 if (offDisp != 0)
659 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, idxRegFlatPtr, offDisp);
660 }
661 else
662 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegSegBase,
663 pTlbState->uAbsPtr + offDisp);
664 }
665 else if (pTlbState->idxRegPtr != UINT8_MAX)
666 {
667 off = iemNativeEmitGpr32EqGprPlusGprEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegSegBase, pTlbState->idxRegPtr);
668 if (offDisp != 0)
669 off = iemNativeEmitAddGpr32ImmEx(pCodeBuf, off, idxRegFlatPtr, offDisp);
670 }
671 else
672 off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr,
673 pTlbState->idxRegSegBase, (uint32_t)pTlbState->uAbsPtr + offDisp);
674 }
675 else if (pTlbState->idxRegPtr == UINT8_MAX)
676 {
677 if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
678 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->uAbsPtr + offDisp);
679 else
680 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, idxRegFlatPtr, (uint32_t)pTlbState->uAbsPtr + offDisp);
681 }
682 else if (offDisp != 0)
683 {
684 Assert(idxRegFlatPtr != pTlbState->idxRegPtr);
685 if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
686 off = iemNativeEmitGprEqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegPtr, offDisp);
687 else
688 off = iemNativeEmitGpr32EqGprPlusImmEx(pCodeBuf, off, idxRegFlatPtr, pTlbState->idxRegPtr, offDisp);
689 }
690 else
691 Assert(idxRegFlatPtr == pTlbState->idxRegPtr);
692
693 /*
694 * 2. Check that the address doesn't cross a page boundrary and doesn't
695 * have alignment issues (not applicable to code).
696 *
697 * For regular accesses (non-SSE/AVX & atomic stuff) we only need to
698 * check for #AC in ring-3 code. To simplify this, the need for AC
699 * checking is indicated by IEM_F_X86_AC in IEMCPU::fExec.
700 *
701 * The caller informs us about about SSE/AVX aligned accesses via the
702 * upper bits of fAlignMaskAndCtl and atomic accesses via fAccess.
703 */
704 if (a_fDataTlb)
705 {
706 if (offMisalignedAccess != UINT32_MAX)
707 {
708#ifdef RT_ARCH_ARM64
709 if (cbMem == 2)
710 {
711 /* tbnz regflatptr, #0, tlbmiss */
712 pCodeBuf[off++] = Armv8A64MkInstrTbnz((int32_t)offMisalignedAccess - (int32_t)off, idxRegFlatPtr, 0);
713 }
714 else
715#endif
716 {
717 /* test regflat, fAlignMask */
718 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, cbMem - 1);
719 /* jnz tlbmiss */
720 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offMisalignedAccess, kIemNativeInstrCond_ne);
721 }
722 /** @todo ARM64: two byte access checks can be reduced to single instruction */
723 iemNativeFixupFixedJump(pReNative, offFixupMisalignedAccessJmpBack, off);
724 }
725 else
726 {
727 /*
728 * 2a. Strict alignment check using fAlignMask for atomic, strictly
729 * aligned stuff (SSE & AVX) and AC=1 (ring-3).
730 */
731 bool const fStrictAlignmentCheck = fAlignMask
732 && ( (fAlignMaskAndCtl & ~UINT32_C(0xff))
733 || (fAccess & IEM_ACCESS_ATOMIC)
734 || (pReNative->fExec & IEM_F_X86_AC) );
735 if (fStrictAlignmentCheck)
736 {
737 /* test regflat, fAlignMask */
738 off = iemNativeEmitTestAnyBitsInGpr8Ex(pCodeBuf, off, idxRegFlatPtr, fAlignMask);
739
740#ifndef IEM_WITH_TLB_STATISTICS
741 /* jnz tlbmiss */
742 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
743#else
744 /* jz 1F; inc stat; jmp tlbmiss */
745 uint32_t const offFixup1 = off;
746 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
747 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
748 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissAlignment));
749 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
750 iemNativeFixupFixedJump(pReNative, offFixup1, off);
751#endif
752 }
753
754 /*
755 * 2b. Check that it's not crossing page a boundrary if the access is
756 * larger than the aligment mask or if we didn't do the strict
757 * alignment check above.
758 */
759 if ( cbMem > 1
760 && ( !fStrictAlignmentCheck
761 || cbMem > fAlignMask + 1U))
762 {
763 /* reg1 = regflat & 0xfff */
764 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1,/*=*/ idxRegFlatPtr,/*&*/ GUEST_PAGE_OFFSET_MASK);
765 /* cmp reg1, GUEST_PAGE_SIZE - cbMem */
766 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, pTlbState->idxReg1, GUEST_PAGE_SIZE - cbMem);
767#ifndef IEM_WITH_TLB_STATISTICS
768 /* ja tlbmiss */
769 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
770#else
771 /* jbe 1F; inc stat; jmp tlbmiss */
772 uint32_t const offFixup1 = off;
773 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be);
774 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
775 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissCrossPage));
776 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
777 iemNativeFixupFixedJump(pReNative, offFixup1, off);
778#endif
779 }
780 }
781 }
782 else
783 Assert(fAlignMaskAndCtl == 0);
784
785 /*
786 * 3. TLB lookup.
787 *
788 * 3a. Calculate the TLB tag value (IEMTLB_CALC_TAG_NO_REV).
789 * In 64-bit mode we will also check for non-canonical addresses here.
790 */
791 if ((pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT)
792 {
793# if defined(RT_ARCH_AMD64)
794 /* mov reg1, regflat */
795 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, pTlbState->idxReg1, idxRegFlatPtr);
796 /* rol reg1, 16 */
797 off = iemNativeEmitRotateGprLeftEx(pCodeBuf, off, pTlbState->idxReg1, 16);
798 /** @todo Would 'movsx reg2, word reg1' and working on reg2 in dwords be faster? */
799 /* inc word reg1 */
800 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
801 if (pTlbState->idxReg1 >= 8)
802 pCodeBuf[off++] = X86_OP_REX_B;
803 pCodeBuf[off++] = 0xff;
804 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, pTlbState->idxReg1 & 7);
805 /* cmp word reg1, 1 */
806 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
807 if (pTlbState->idxReg1 >= 8)
808 pCodeBuf[off++] = X86_OP_REX_B;
809 pCodeBuf[off++] = 0x83;
810 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, pTlbState->idxReg1 & 7);
811 pCodeBuf[off++] = 1;
812# ifndef IEM_WITH_TLB_STATISTICS
813 /* ja tlbmiss */
814 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_nbe);
815# else
816 /* jbe 1F; inc stat; jmp tlbmiss */
817 uint32_t const offFixup1 = off;
818 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_be);
819 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
820 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissNonCanonical));
821 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
822 iemNativeFixupFixedJump(pReNative, offFixup1, off);
823# endif
824 /* shr reg1, 16 + GUEST_PAGE_SHIFT */
825 off = iemNativeEmitShiftGprRightEx(pCodeBuf, off, pTlbState->idxReg1, 16 + GUEST_PAGE_SHIFT);
826
827# elif defined(RT_ARCH_ARM64)
828 /* lsr reg1, regflat, #48 */
829 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(pTlbState->idxReg1, idxRegFlatPtr, 48);
830 /* add reg1, reg1, #1 */
831 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(pTlbState->idxReg1, pTlbState->idxReg1, 1, false /*f64Bit*/);
832 /* tst reg1, #0xfffe */
833 Assert(Armv8A64ConvertImmRImmS2Mask32(14, 31) == 0xfffe);
834 pCodeBuf[off++] = Armv8A64MkInstrTstImm(pTlbState->idxReg1, 14, 31, false /*f64Bit*/);
835# ifndef IEM_WITH_TLB_STATISTICS
836 /* b.ne tlbmiss */
837 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
838# else
839 /* b.eq 1F; inc stat; jmp tlbmiss */
840 uint32_t const offFixup1 = off;
841 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
842 off = iemNativeEmitIncU32CounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
843 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissNonCanonical));
844 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
845 iemNativeFixupFixedJump(pReNative, offFixup1, off);
846# endif
847
848 /* ubfx reg1, regflat, #12, #36 */
849 pCodeBuf[off++] = Armv8A64MkInstrUbfx(pTlbState->idxReg1, idxRegFlatPtr, GUEST_PAGE_SHIFT, 48 - GUEST_PAGE_SHIFT);
850# else
851# error "Port me"
852# endif
853 }
854 else
855 {
856 /* reg1 = (uint32_t)(regflat >> 12) */
857 off = iemNativeEmitGpr32EqGprShiftRightImmEx(pCodeBuf, off, pTlbState->idxReg1, idxRegFlatPtr, GUEST_PAGE_SHIFT);
858 }
859
860 /* or reg1, [qword pVCpu->iem.s.DataTlb.uTlbRevision] */
861# if defined(RT_ARCH_AMD64)
862 pCodeBuf[off++] = pTlbState->idxReg1 < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R;
863 pCodeBuf[off++] = 0x0b; /* OR r64,r/m64 */
864 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, pTlbState->idxReg1,
865 fEvenFirst ? offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevision)
866 : offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevisionGlobal));
867# else
868# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
869 /* Load uTlbRevision[Global] into reg3 and uTlbPhysRev into reg5.
870 We load the pointer for IEMTLB::aEntries[!fEvenFirst] into reg4 and use
871 it for addressing here and later when calculating pTble (saves one
872 instruction, simplifies odd-first). */
873 AssertCompileMemberAlignment(IEMTLB, uTlbRevision, 16); /* It is said that misaligned pair loads doesn't perform well. */
874 AssertCompileAdjacentMembers(IEMTLB, uTlbRevision, uTlbPhysRev);
875 AssertCompileAdjacentMembers(IEMTLB, uTlbPhysRev, uTlbRevisionGlobal);
876 AssertCompile(RTASSERT_OFFSET_OF(IEMTLB, uTlbPhysRev) < RTASSERT_OFFSET_OF(IEMTLB, aEntries));
877 AssertCompile(RTASSERT_OFFSET_OF(VMCPUCC, iem.s.DataTlb.aEntries) < _64K);
878 uint32_t const offEntries = offVCpuTlb + RT_UOFFSETOF(IEMTLB, aEntries) + (fEvenFirst ? 0 : sizeof(IEMTLBENTRY));
879 if (offEntries < _64K)
880 {
881 pCodeBuf[off++] = Armv8A64MkInstrMovZ(pTlbState->idxReg4, offEntries);
882 pCodeBuf[off++] = Armv8A64MkInstrAddReg(pTlbState->idxReg4, IEMNATIVE_REG_FIXED_PVMCPU, pTlbState->idxReg4);
883 }
884 else
885 {
886 AssertCompileMemberAlignment(VMCPUCC, iem.s.CodeTlb.aEntries, 32);
887 AssertCompileMemberAlignment(IEMTLB, aEntries, 32);
888 AssertCompileSizeAlignment(IEMTLBENTRY, 32);
889 AssertCompile(RTASSERT_OFFSET_OF(VMCPUCC, iem.s.CodeTlb.aEntries) < _64K*32U);
890
891 pCodeBuf[off++] = Armv8A64MkInstrMovZ(pTlbState->idxReg4, offEntries >> 5);
892 pCodeBuf[off++] = Armv8A64MkInstrAddReg(pTlbState->idxReg4, IEMNATIVE_REG_FIXED_PVMCPU, pTlbState->idxReg4,
893 true /*64Bit*/, false /*fSetFlags*/, 5 /*cShift*/, kArmv8A64InstrShift_Lsl);
894 }
895 AssertCompile(RTASSERT_OFFSET_OF(IEMTLB, aEntries) < 64U*8U - sizeof(IEMTLBENTRY));
896 if (fEvenFirst)
897 pCodeBuf[off++] = Armv8A64MkInstrLdPairGpr(pTlbState->idxReg3, pTlbState->idxReg5, pTlbState->idxReg4,
898 (RT_OFFSETOF(IEMTLB, uTlbRevision) - RT_OFFSETOF(IEMTLB, aEntries)) / 8);
899 else /* This isn't 128-bit aligned, hope that doesn't hurt too much... */
900 pCodeBuf[off++] = Armv8A64MkInstrLdPairGpr(pTlbState->idxReg5, pTlbState->idxReg3, pTlbState->idxReg4,
901 ( RT_OFFSETOF(IEMTLB, uTlbPhysRev) - RT_OFFSETOF(IEMTLB, aEntries)
902 - (int)sizeof(IEMTLBENTRY)) / 8);
903# else
904 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg3,
905 fEvenFirst ? offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevision)
906 : offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbRevisionGlobal));
907# endif
908 off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
909# endif
910
911 /*
912 * 3b. Calc pTlbe.
913 */
914# if !defined(RT_ARCH_ARM64) || !defined(IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR)
915 uint32_t const offTlbEntriesAdjusted = offVCpuTlb + RT_UOFFSETOF(IEMTLB, aEntries) + (fEvenFirst ? 0 : sizeof(IEMTLBENTRY));
916# endif
917# if defined(RT_ARCH_AMD64)
918# if IEMTLB_ENTRY_COUNT == 256
919 /* movzx reg2, byte reg1 */
920 off = iemNativeEmitLoadGprFromGpr8Ex(pCodeBuf, off, pTlbState->idxReg2, pTlbState->idxReg1);
921# else
922 /* mov reg2, reg1 */
923 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, pTlbState->idxReg2, pTlbState->idxReg1);
924 /* and reg2, IEMTLB_ENTRY_COUNT - 1U */
925 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, pTlbState->idxReg2, IEMTLB_ENTRY_COUNT - 1U);
926# endif
927 /* shl reg2, 6 ; reg2 *= sizeof(IEMTLBENTRY) * 2 */
928 AssertCompileSize(IEMTLBENTRY, 32);
929 off = iemNativeEmitShiftGprLeftEx(pCodeBuf, off, pTlbState->idxReg2, 6);
930 /* lea reg2, [&pVCpu->iem.s.DataTlb.aEntries[!fEvenFirst] + reg2] */
931 AssertCompile(IEMNATIVE_REG_FIXED_PVMCPU < 8);
932 pCodeBuf[off++] = pTlbState->idxReg2 < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_X | X86_OP_REX_R;
933 pCodeBuf[off++] = 0x8d;
934 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, pTlbState->idxReg2 & 7, 4 /*SIB*/);
935 pCodeBuf[off++] = X86_SIB_MAKE(IEMNATIVE_REG_FIXED_PVMCPU & 7, pTlbState->idxReg2 & 7, 0);
936 pCodeBuf[off++] = RT_BYTE1(offTlbEntriesAdjusted);
937 pCodeBuf[off++] = RT_BYTE2(offTlbEntriesAdjusted);
938 pCodeBuf[off++] = RT_BYTE3(offTlbEntriesAdjusted);
939 pCodeBuf[off++] = RT_BYTE4(offTlbEntriesAdjusted);
940
941# elif defined(RT_ARCH_ARM64)
942 /* reg2 = (reg1 & tlbmask) << 6 */
943 AssertCompileSize(IEMTLBENTRY, 32);
944 pCodeBuf[off++] = Armv8A64MkInstrUbfiz(pTlbState->idxReg2, pTlbState->idxReg1, 6, IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO);
945# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
946 /* reg2 += &pVCpu->iem.s.[Data|Code]Tlb.aEntries[!fEvenFirst] */
947 pCodeBuf[off++] = Armv8A64MkInstrAddReg(pTlbState->idxReg2, pTlbState->idxReg2, pTlbState->idxReg4);
948# else
949 /* reg2 += offsetof(VMCPUCC, iem.s.DataTlb.aEntries[!fEvenFirst]) */
950 off = iemNativeEmitAddGprImmEx(pCodeBuf, off, pTlbState->idxReg2, offTlbEntriesAdjusted, pTlbState->idxReg3 /*iGprTmp*/);
951 /* reg2 += pVCpu */
952 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, pTlbState->idxReg2, IEMNATIVE_REG_FIXED_PVMCPU);
953# endif
954# else
955# error "Port me"
956# endif
957
958 /*
959 * 3c. Compare the TLBE.uTag with the one from 2a (reg1).
960 */
961# if defined(RT_ARCH_AMD64)
962 /* cmp reg1, [reg2] */
963 pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R) | (pTlbState->idxReg2 < 8 ? 0 : X86_OP_REX_B);
964 pCodeBuf[off++] = 0x3b;
965 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, uTag));
966# elif defined(RT_ARCH_ARM64)
967 /* reg3 = uTag; [pair: reg4 = fFlagsAndPhysRev;] */
968# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
969 AssertCompileMemberAlignment(IEMTLBENTRY, uTag, 16); /* It is said that misaligned pair loads doesn't perform well. */
970 AssertCompile(RT_UOFFSETOF(IEMTLBENTRY, uTag) + sizeof(uint64_t) == RT_UOFFSETOF(IEMTLBENTRY, fFlagsAndPhysRev));
971 pCodeBuf[off++] = Armv8A64MkInstrLdPairGpr(pTlbState->idxReg3, pTlbState->idxReg4,
972 pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, uTag) / 8);
973# else
974 off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg3, pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, uTag));
975# endif
976 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
977# else
978# error "Port me"
979# endif
980 /* jne checkalttlbe_and_missedtagstats */
981# ifndef IEM_WITH_TLB_STATISTICS
982 if (!fIncCheckAltTlbe)
983 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
984 else
985# endif
986 {
987 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, offCheckAltTlbeAndMissedTagStats, kIemNativeInstrCond_ne);
988 if (fIncCheckAltTlbe)
989 iemNativeFixupFixedJump(pReNative, offFixupCheckAltTlbeJmpBack, off);
990 }
991
992 /*
993 * 4. Check TLB page table level access flags and physical page revision #.
994 */
995 /* mov reg1, mask */
996 AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
997 uint64_t const fNoUser = (((pReNative->fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK) + 1) & IEMTLBE_F_PT_NO_USER;
998 uint64_t fTlbe = IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PT_NO_ACCESSED
999 | fNoUser;
1000 if (fAccess & IEM_ACCESS_TYPE_EXEC)
1001 fTlbe |= IEMTLBE_F_PT_NO_EXEC /*| IEMTLBE_F_PG_NO_READ?*/;
1002 if (fAccess & IEM_ACCESS_TYPE_READ)
1003 fTlbe |= IEMTLBE_F_PG_NO_READ;
1004 if (fAccess & IEM_ACCESS_TYPE_WRITE)
1005 fTlbe |= IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY;
1006 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, fTlbe);
1007# if defined(RT_ARCH_AMD64)
1008 /* and reg1, [reg2->fFlagsAndPhysRev] */
1009 pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R) | (pTlbState->idxReg2 < 8 ? 0 : X86_OP_REX_B);
1010 pCodeBuf[off++] = 0x23;
1011 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1,
1012 pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, fFlagsAndPhysRev));
1013
1014 /* cmp reg1, [pVCpu->iem.s.DataTlb.uTlbPhysRev] */
1015 pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_R);
1016 pCodeBuf[off++] = 0x3b;
1017 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, pTlbState->idxReg1, IEMNATIVE_REG_FIXED_PVMCPU,
1018 offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbPhysRev));
1019# elif defined(RT_ARCH_ARM64)
1020# ifdef IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR
1021 pCodeBuf[off++] = Armv8A64MkInstrAnd(pTlbState->idxReg1, pTlbState->idxReg1, pTlbState->idxReg4);
1022 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg5);
1023# else
1024 off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg3,
1025 pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, fFlagsAndPhysRev));
1026 pCodeBuf[off++] = Armv8A64MkInstrAnd(pTlbState->idxReg1, pTlbState->idxReg1, pTlbState->idxReg3);
1027 off = iemNativeEmitLoadGprFromVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg3, offVCpuTlb + RT_UOFFSETOF(IEMTLB, uTlbPhysRev));
1028 off = iemNativeEmitCmpGprWithGprEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg3);
1029# endif
1030# else
1031# error "Port me"
1032# endif
1033# ifndef IEM_WITH_TLB_STATISTICS
1034 /* jne tlbmiss */
1035 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss, kIemNativeInstrCond_ne);
1036# else
1037 /* je 2F; inc stat; jmp tlbmiss */
1038 uint32_t const offFixup2 = off;
1039 off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off + 16, kIemNativeInstrCond_e);
1040 off = iemNativeEmitIncStamCounterInVCpuEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
1041 offVCpuTlb + RT_UOFFSETOF(IEMTLB, cTlbNativeMissFlagsAndPhysRev));
1042 off = iemNativeEmitJmpToLabelEx(pReNative, pCodeBuf, off, idxLabelTlbMiss);
1043 iemNativeFixupFixedJump(pReNative, offFixup2, off);
1044# endif
1045
1046 /*
1047 * 5. Check that pbMappingR3 isn't NULL (paranoia) and calculate the
1048 * resulting pointer.
1049 *
1050 * For code TLB lookups we have some more work to do here to set various
1051 * IEMCPU members and we return a GCPhys address rather than a host pointer.
1052 */
1053# if defined(RT_ARCH_ARM64) && defined(IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR)
1054 if (!a_fDataTlb)
1055 {
1056 /* ldp reg4, reg1, [reg2->GCPhys+pbMappingR3] */
1057 AssertCompileMemberAlignment(IEMTLBENTRY, GCPhys, 16);
1058 AssertCompileAdjacentMembers(IEMTLBENTRY, GCPhys, pbMappingR3);
1059 pCodeBuf[off++] = Armv8A64MkInstrLdPairGpr(pTlbState->idxReg4, pTlbState->idxReg1,
1060 pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, GCPhys) / 8);
1061 }
1062 else
1063# endif
1064 {
1065 /* mov reg1, [reg2->pbMappingR3] */
1066 off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg1, pTlbState->idxReg2,
1067 RT_UOFFSETOF(IEMTLBENTRY, pbMappingR3));
1068 }
1069 /* if (!reg1) goto tlbmiss; */
1070 /** @todo eliminate the need for this test? */
1071 off = iemNativeEmitTestIfGprIsZeroAndJmpToLabelEx(pReNative, pCodeBuf, off, pTlbState->idxReg1,
1072 true /*f64Bit*/, idxLabelTlbMiss);
1073
1074 if (a_fDataTlb)
1075 {
1076 if (idxRegFlatPtr == idxRegMemResult) /* See step 1b. */
1077 {
1078 /* and result, 0xfff */
1079 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegMemResult, GUEST_PAGE_OFFSET_MASK);
1080 }
1081 else
1082 {
1083 Assert(idxRegFlatPtr == pTlbState->idxRegPtr);
1084 /* result = regflat & 0xfff */
1085 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, idxRegMemResult, idxRegFlatPtr, GUEST_PAGE_OFFSET_MASK);
1086 }
1087
1088 /* add result, reg1 */
1089 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, idxRegMemResult, pTlbState->idxReg1);
1090 }
1091 else
1092 {
1093 /*
1094 * Code TLB use a la iemOpcodeFetchBytesJmp - keep reg2 pointing to the TLBE.
1095 *
1096 * Note. We do not need to set offCurInstrStart or offInstrNextByte.
1097 */
1098# if !defined(RT_ARCH_ARM64) || !defined(IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR)
1099# ifdef RT_ARCH_AMD64
1100 uint8_t const idxReg3 = UINT8_MAX;
1101# else
1102 uint8_t const idxReg3 = pTlbState->idxReg3;
1103# endif
1104 /* Set pbInstrBuf first since we've got it loaded already. */
1105 off = iemNativeEmitStoreGprToVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg1,
1106 RT_UOFFSETOF(VMCPUCC, iem.s.pbInstrBuf), idxReg3);
1107 /* Set uInstrBufPc to (FlatPC & ~GUEST_PAGE_OFFSET_MASK). */
1108 off = iemNativeEmitGprEqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg1, idxRegFlatPtr, ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK);
1109 off = iemNativeEmitStoreGprToVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg1,
1110 RT_UOFFSETOF(VMCPUCC, iem.s.uInstrBufPc), idxReg3);
1111 /* Set cbInstrBufTotal to GUEST_PAGE_SIZE. */ /** @todo this is a simplifications. Calc right size using CS.LIM and EIP? */
1112 off = iemNativeEmitStoreImmToVCpuU16Ex(pCodeBuf, off, GUEST_PAGE_SIZE, RT_UOFFSETOF(VMCPUCC, iem.s.cbInstrBufTotal),
1113 pTlbState->idxReg1, idxReg3);
1114 /* Now set GCPhysInstrBuf last as we'll be returning it in idxRegMemResult. */
1115# if defined(RT_ARCH_ARM64) && defined(IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR)
1116 off = iemNativeEmitStoreGprToVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg4,
1117 RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf), idxReg3);
1118# else
1119 off = iemNativeEmitLoadGprByGprU64Ex(pCodeBuf, off, pTlbState->idxReg1,
1120 pTlbState->idxReg2, RT_UOFFSETOF(IEMTLBENTRY, GCPhys));
1121 off = iemNativeEmitStoreGprToVCpuU64Ex(pCodeBuf, off, pTlbState->idxReg1,
1122 RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf), idxReg3);
1123# endif
1124# else
1125 /* ARM64: Same as above but using STP. This ASSUMES that we can trash
1126 the 6 bytes following iem.s.cbInstrBufTotal! */
1127 AssertCompileMemberAlignment(VMCPUCC, iem.s.pbInstrBuf, 16);
1128 AssertCompileAdjacentMembers(VMCPUCC, iem.s.pbInstrBuf, iem.s.uInstrBufPc);
1129 AssertCompile(RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf) < 512);
1130 /* idxReg1 = reg2->pbMappingR3 (see previous LDP) */
1131 /* idxReg3 = FlatPC & ~GUEST_PAGE_OFFSET_MASK. */
1132 off = iemNativeEmitGprEqGprAndImmEx(pCodeBuf, off, pTlbState->idxReg3, idxRegFlatPtr, ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK);
1133 pCodeBuf[off++] = Armv8A64MkInstrStPairGpr(pTlbState->idxReg1, pTlbState->idxReg3,
1134 IEMNATIVE_REG_FIXED_PVMCPU, RT_UOFFSETOF(VMCPUCC, iem.s.pbInstrBuf) / 8);
1135
1136 AssertCompileMemberAlignment(VMCPUCC, iem.s.GCPhysInstrBuf, 16);
1137 AssertCompileAdjacentMembers(VMCPUCC, iem.s.GCPhysInstrBuf, iem.s.cbInstrBufTotal);
1138 AssertCompile(RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf) < 512);
1139# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1140 AssertCompileAdjacentMembers(VMCPUCC, iem.s.cbInstrBufTotal, iem.s.offCurInstrStart);
1141 AssertCompileAdjacentMembers(VMCPUCC, iem.s.offCurInstrStart, iem.s.fPrefixes); /* these two will be set to ~0. */
1142# endif
1143 /* idxReg4 = reg2->GCPhys (see previous LDP) */
1144 /* idxReg3 = GUEST_PAGE_SIZE | UINT64_C(0xffffffffffff0000) */
1145 pCodeBuf[off++] = Armv8A64MkInstrMovN(pTlbState->idxReg3, ~GUEST_PAGE_SIZE & 0xffff);
1146 pCodeBuf[off++] = Armv8A64MkInstrStPairGpr(pTlbState->idxReg4, pTlbState->idxReg3,
1147 IEMNATIVE_REG_FIXED_PVMCPU, RT_UOFFSETOF(VMCPUCC, iem.s.GCPhysInstrBuf) / 8);
1148# endif
1149 if (!a_fNoReturn) /* (We skip this for iemNativeEmitBltLoadTlbAfterBranch.) */
1150 {
1151 /* Set idxRegMemResult. */
1152 if (idxRegFlatPtr == idxRegMemResult) /* See step 1b. */
1153 off = iemNativeEmitAndGpr32ByImmEx(pCodeBuf, off, idxRegMemResult, GUEST_PAGE_OFFSET_MASK);
1154 else
1155 off = iemNativeEmitGpr32EqGprAndImmEx(pCodeBuf, off, idxRegMemResult, idxRegFlatPtr, GUEST_PAGE_OFFSET_MASK);
1156# if defined(RT_ARCH_ARM64) && defined(IEMNATIVE_WITH_TLB_LOOKUP_LOAD_STORE_PAIR)
1157 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, idxRegMemResult, pTlbState->idxReg4);
1158# else
1159 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, idxRegMemResult, pTlbState->idxReg1);
1160# endif
1161 }
1162 }
1163
1164# if 0
1165 /*
1166 * To verify the result we call a helper function.
1167 *
1168 * It's like the state logging, so parameters are passed on the stack.
1169 * iemNativeHlpAsmSafeWrapCheckTlbLookup(pVCpu, result, addr, seg | (cbMem << 8) | (fAccess << 16))
1170 */
1171# ifdef RT_ARCH_AMD64
1172 if (a_fDataTlb)
1173 {
1174 /* push seg | (cbMem << 8) | (fAccess << 16) */
1175 pCodeBuf[off++] = 0x68;
1176 pCodeBuf[off++] = iSegReg;
1177 pCodeBuf[off++] = cbMem;
1178 pCodeBuf[off++] = RT_BYTE1(fAccess);
1179 pCodeBuf[off++] = RT_BYTE2(fAccess);
1180 /* push pTlbState->idxRegPtr / immediate address. */
1181 if (pTlbState->idxRegPtr != UINT8_MAX)
1182 {
1183 if (pTlbState->idxRegPtr >= 8)
1184 pCodeBuf[off++] = X86_OP_REX_B;
1185 pCodeBuf[off++] = 0x50 + (pTlbState->idxRegPtr & 7);
1186 }
1187 else
1188 {
1189 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, pTlbState->uAbsPtr);
1190 if (pTlbState->idxReg1 >= 8)
1191 pCodeBuf[off++] = X86_OP_REX_B;
1192 pCodeBuf[off++] = 0x50 + (pTlbState->idxReg1 & 7);
1193 }
1194 /* push idxRegMemResult */
1195 if (idxRegMemResult >= 8)
1196 pCodeBuf[off++] = X86_OP_REX_B;
1197 pCodeBuf[off++] = 0x50 + (idxRegMemResult & 7);
1198 /* push pVCpu */
1199 pCodeBuf[off++] = 0x50 + IEMNATIVE_REG_FIXED_PVMCPU;
1200 /* mov reg1, helper */
1201 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, pTlbState->idxReg1, (uintptr_t)iemNativeHlpAsmSafeWrapCheckTlbLookup);
1202 /* call [reg1] */
1203 pCodeBuf[off++] = X86_OP_REX_W | (pTlbState->idxReg1 < 8 ? 0 : X86_OP_REX_B);
1204 pCodeBuf[off++] = 0xff;
1205 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, pTlbState->idxReg1 & 7);
1206 /* The stack is cleaned up by helper function. */
1207 }
1208
1209# else
1210# error "Port me"
1211# endif
1212# endif
1213
1214 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1215
1216 return off;
1217}
1218#endif /* IEMNATIVE_WITH_TLB_LOOKUP */
1219
1220
1221/** @} */
1222
1223#endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompilerTlbLookup_h */
1224
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette