VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThreadedFunctionsBltIn.cpp@ 100731

最後變更 在這個檔案從100731是 100731,由 vboxsync 提交於 20 月 前

VMM/IEM: More on recompiling branch instruction. bugref:10369

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 23.4 KB
 
1/* $Id: IEMAllThreadedFunctionsBltIn.cpp 100731 2023-07-28 22:22:22Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.alldomusa.eu.org.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3);
76 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo different status code... */
77}
78
79
80
81/**
82 * Built-in function that compares the fExec mask against uParam0.
83 */
84IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckMode,
85 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
86{
87 uint32_t const fExpectedExec = (uint32_t)uParam0;
88 if (pVCpu->iem.s.fExec == fExpectedExec)
89 return VINF_SUCCESS;
90 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x)\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
91 fExpectedExec, pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec));
92 RT_NOREF(uParam1, uParam2);
93 return VINF_IEM_REEXEC_MODE_CHANGED;
94}
95
96
97DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
98{
99 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
100 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
101 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
102 if (idxPage == 0)
103 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
104 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
105 return pTb->aGCPhysPages[idxPage - 1];
106}
107
108
109/**
110 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
111 * number of functions.
112 */
113#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
114 if (RT_LIKELY(pVCpu->cpum.GstCtx.eip - pVCpu->cpum.GstCtx.cs.u32Limit >= cbInstr)) \
115 { /* likely */ } \
116 else \
117 { \
118 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
119 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
120 return iemRaiseGeneralProtectionFault0(pVCpu); \
121 } \
122 } while(0)
123
124/**
125 * Macro that implements opcode (re-)checking.
126 */
127#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
128 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
129 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
130 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
131 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
132 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
133 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
134 { /* likely */ } \
135 else \
136 { \
137 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
138 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
139 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
140 RT_NOREF(a_cbInstr); \
141 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
142 } \
143 } while(0)
144
145/**
146 * Macro that implements TLB loading and updating pbInstrBuf updating for an
147 * instruction crossing into a new page.
148 *
149 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
150 */
151#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
152 pVCpu->iem.s.pbInstrBuf = NULL; \
153 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
154 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
155 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
156 \
157 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
158 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
159 && pVCpu->iem.s.pbInstrBuf)) \
160 { /* likely */ } \
161 else \
162 { \
163 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
164 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
165 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
166 RT_NOREF(a_cbInstr); \
167 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
168 } \
169 } while(0)
170
171/**
172 * Macro that implements TLB loading and updating pbInstrBuf updating when
173 * branching or when crossing a page on an instruction boundrary.
174 *
175 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
176 * it is an inter-page branch and also check the page offset.
177 *
178 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
179 */
180#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
181 /* Is RIP within the current code page? */ \
182 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
183 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
184 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
185 if (off < pVCpu->iem.s.cbInstrBufTotal) \
186 { \
187 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
188 Assert(pVCpu->iem.s.pbInstrBuf); \
189 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
190 | pTb->aRanges[(a_idxRange)].offPhysPage; \
191 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
192 { /* we're good */ } \
193 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
194 { \
195 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
196 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
197 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
198 RT_NOREF(a_cbInstr); \
199 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
200 } \
201 else \
202 { \
203 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
204 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
205 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
206 RT_NOREF(a_cbInstr); \
207 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
208 } \
209 } \
210 else \
211 { \
212 /* Must translate new RIP. */ \
213 pVCpu->iem.s.pbInstrBuf = NULL; \
214 pVCpu->iem.s.offCurInstrStart = 0; \
215 pVCpu->iem.s.offInstrNextByte = 0; \
216 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
217 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
218 \
219 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
220 | pTb->aRanges[(a_idxRange)].offPhysPage; \
221 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
222 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
223 && pVCpu->iem.s.pbInstrBuf) \
224 { /* likely */ } \
225 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
226 && pVCpu->iem.s.pbInstrBuf) \
227 { \
228 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
229 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
230 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
231 RT_NOREF(a_cbInstr); \
232 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
233 } \
234 else \
235 { \
236 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
237 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
238 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
239 RT_NOREF(a_cbInstr); \
240 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
241 } \
242 } \
243 } while(0)
244
245/**
246 * Macro that implements PC check after a conditional branch.
247 */
248#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
249 /* Is RIP within the current code page? */ \
250 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
251 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
252 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
253 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
254 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
255 | pTb->aRanges[(a_idxRange)].offPhysPage; \
256 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
257 && off < pVCpu->iem.s.cbInstrBufTotal) \
258 { /* we're good */ } \
259 else \
260 { \
261 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
262 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
263 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
264 RT_NOREF(a_cbInstr); \
265 return VINF_IEM_REEXEC_MODE_CHANGED; /** @todo new status code? */ \
266 } \
267 } while(0)
268
269
270/**
271 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
272 * raising a \#GP(0) if this isn't the case.
273 */
274IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLim,
275 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
276{
277 uint32_t const cbInstr = (uint32_t)uParam0;
278 RT_NOREF(uParam1, uParam2);
279 BODY_CHECK_CS_LIM(cbInstr);
280 return VINF_SUCCESS;
281}
282
283
284/**
285 * Built-in function for re-checking opcodes and CS.LIM after an instruction
286 * that may have modified them.
287 */
288IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodes,
289 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
290{
291 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
292 uint32_t const cbInstr = (uint32_t)uParam0;
293 uint32_t const idxRange = (uint32_t)uParam1;
294 uint32_t const offRange = (uint32_t)uParam2;
295 BODY_CHECK_CS_LIM(cbInstr);
296 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
297 return VINF_SUCCESS;
298}
299
300
301/**
302 * Built-in function for re-checking opcodes after an instruction that may have
303 * modified them.
304 */
305IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodes,
306 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
307{
308 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
309 uint32_t const cbInstr = (uint32_t)uParam0;
310 uint32_t const idxRange = (uint32_t)uParam1;
311 uint32_t const offRange = (uint32_t)uParam2;
312 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
313 return VINF_SUCCESS;
314}
315
316
317/*
318 * Post-branching checkers.
319 */
320
321/**
322 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
323 * after conditional branching within the same page.
324 *
325 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
326 */
327IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes,
328 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
329{
330 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
331 uint32_t const cbInstr = (uint32_t)uParam0;
332 uint32_t const idxRange = (uint32_t)uParam1;
333 uint32_t const offRange = (uint32_t)uParam2;
334 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
335 BODY_CHECK_CS_LIM(cbInstr);
336 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
337 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
338 //LogFunc(("okay\n"));
339 return VINF_SUCCESS;
340}
341
342
343/**
344 * Built-in function for checking the PC and checking opcodes after conditional
345 * branching within the same page.
346 *
347 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
348 */
349IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckPcAndOpcodes,
350 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
351{
352 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
353 uint32_t const cbInstr = (uint32_t)uParam0;
354 uint32_t const idxRange = (uint32_t)uParam1;
355 uint32_t const offRange = (uint32_t)uParam2;
356 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
357 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
358 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
359 //LogFunc(("okay\n"));
360 return VINF_SUCCESS;
361}
362
363
364/**
365 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
366 * transitioning to a different code page.
367 *
368 * The code page transition can either be natural over onto the next page (with
369 * the instruction starting at page offset zero) or by means of branching.
370 *
371 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
372 */
373IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb,
374 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
375{
376 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
377 uint32_t const cbInstr = (uint32_t)uParam0;
378 uint32_t const idxRange = (uint32_t)uParam1;
379 uint32_t const offRange = (uint32_t)uParam2;
380 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
381 BODY_CHECK_CS_LIM(cbInstr);
382 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
383 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
384 //LogFunc(("okay\n"));
385 return VINF_SUCCESS;
386}
387
388
389/**
390 * Built-in function for loading TLB and checking opcodes when transitioning to
391 * a different code page.
392 *
393 * The code page transition can either be natural over onto the next page (with
394 * the instruction starting at page offset zero) or by means of branching.
395 *
396 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
397 */
398IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb,
399 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
400{
401 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
402 uint32_t const cbInstr = (uint32_t)uParam0;
403 uint32_t const idxRange = (uint32_t)uParam1;
404 uint32_t const offRange = (uint32_t)uParam2;
405 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
406 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
407 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
408 //LogFunc(("okay\n"));
409 return VINF_SUCCESS;
410}
411
412
413
414/*
415 * Natural page crossing checkers.
416 */
417
418/**
419 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
420 * both pages when transitioning to a different code page.
421 *
422 * This is used when the previous instruction requires revalidation of opcodes
423 * bytes and the current instruction stries a page boundrary with opcode bytes
424 * in both the old and new page.
425 *
426 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
427 */
428IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb,
429 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
430{
431 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
432 uint32_t const cbInstr = (uint32_t)uParam0;
433 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
434 uint32_t const idxRange1 = (uint32_t)uParam1;
435 uint32_t const offRange1 = (uint32_t)uParam2;
436 uint32_t const idxRange2 = idxRange1 + 1;
437 BODY_CHECK_CS_LIM(cbInstr);
438 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
439 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
440 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Built-in function for loading TLB and checking opcodes on both pages when
447 * transitioning to a different code page.
448 *
449 * This is used when the previous instruction requires revalidation of opcodes
450 * bytes and the current instruction stries a page boundrary with opcode bytes
451 * in both the old and new page.
452 *
453 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
454 */
455IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb,
456 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
457{
458 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
459 uint32_t const cbInstr = (uint32_t)uParam0;
460 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
461 uint32_t const idxRange1 = (uint32_t)uParam1;
462 uint32_t const offRange1 = (uint32_t)uParam2;
463 uint32_t const idxRange2 = idxRange1 + 1;
464 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
465 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
466 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
473 * advancing naturally to a different code page.
474 *
475 * Only opcodes on the new page is checked.
476 *
477 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
478 */
479IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb,
480 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
481{
482 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
483 uint32_t const cbInstr = (uint32_t)uParam0;
484 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
485 uint32_t const idxRange1 = (uint32_t)uParam1;
486 //uint32_t const offRange1 = (uint32_t)uParam2;
487 uint32_t const idxRange2 = idxRange1 + 1;
488 BODY_CHECK_CS_LIM(cbInstr);
489 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
490 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
491 RT_NOREF(uParam2);
492 return VINF_SUCCESS;
493}
494
495
496/**
497 * Built-in function for loading TLB and checking opcodes when advancing
498 * naturally to a different code page.
499 *
500 * Only opcodes on the new page is checked.
501 *
502 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
503 */
504IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb,
505 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
506{
507 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
508 uint32_t const cbInstr = (uint32_t)uParam0;
509 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
510 uint32_t const idxRange1 = (uint32_t)uParam1;
511 //uint32_t const offRange1 = (uint32_t)uParam2;
512 uint32_t const idxRange2 = idxRange1 + 1;
513 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
514 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
515 RT_NOREF(uParam2);
516 return VINF_SUCCESS;
517}
518
519
520/**
521 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
522 * advancing naturally to a different code page with first instr at byte 0.
523 *
524 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
525 */
526IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb,
527 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
528{
529 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
530 uint32_t const cbInstr = (uint32_t)uParam0;
531 uint32_t const idxRange = (uint32_t)uParam1;
532 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
533 BODY_CHECK_CS_LIM(cbInstr);
534 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
535 Assert(pVCpu->iem.s.offCurInstrStart == 0);
536 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
537 return VINF_SUCCESS;
538}
539
540
541/**
542 * Built-in function for loading TLB and checking opcodes when advancing
543 * naturally to a different code page with first instr at byte 0.
544 *
545 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
546 */
547IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb,
548 (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
549{
550 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
551 uint32_t const cbInstr = (uint32_t)uParam0;
552 uint32_t const idxRange = (uint32_t)uParam1;
553 Assert(uParam2 == 0 /*offRange*/); RT_NOREF(uParam2);
554 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
555 Assert(pVCpu->iem.s.offCurInstrStart == 0);
556 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
557 return VINF_SUCCESS;
558}
559
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette