VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 102876

最後變更 在這個檔案從102876是 102790,由 vboxsync 提交於 15 月 前

VMM/IEM: Emit TLB lookup for POP GPR instructions. bugref:10371

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 25.7 KB
 
1/* $Id: IEMAllMemRWTmpl.cpp.h 102790 2024-01-09 01:41:28Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45
46
47/**
48 * Standard fetch function.
49 *
50 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
51 * is defined.
52 */
53VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
54 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
55{
56 /* The lazy approach for now... */
57 uint8_t bUnmapInfo;
58 TMPL_MEM_TYPE const *puSrc;
59 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
60 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
61 if (rc == VINF_SUCCESS)
62 {
63 *puDst = *puSrc;
64 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
65 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
66 }
67 return rc;
68}
69
70
71#ifdef IEM_WITH_SETJMP
72/**
73 * Safe/fallback fetch function that longjmps on error.
74 */
75# ifdef TMPL_MEM_BY_REF
76void
77RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
78{
79# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
80 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
81# endif
82 uint8_t bUnmapInfo;
83 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem,
84 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
85 *pDst = *pSrc;
86 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
87 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
88}
89# else /* !TMPL_MEM_BY_REF */
90TMPL_MEM_TYPE
91RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
92{
93# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
94 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
95# endif
96 uint8_t bUnmapInfo;
97 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem,
98 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
99 TMPL_MEM_TYPE const uRet = *puSrc;
100 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
101 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
102 return uRet;
103}
104# endif /* !TMPL_MEM_BY_REF */
105#endif /* IEM_WITH_SETJMP */
106
107
108
109/**
110 * Standard store function.
111 *
112 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
113 * is defined.
114 */
115VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
116#ifdef TMPL_MEM_BY_REF
117 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
118#else
119 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
120#endif
121{
122 /* The lazy approach for now... */
123 uint8_t bUnmapInfo;
124 TMPL_MEM_TYPE *puDst;
125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst),
126 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
127 if (rc == VINF_SUCCESS)
128 {
129#ifdef TMPL_MEM_BY_REF
130 *puDst = *pValue;
131#else
132 *puDst = uValue;
133#endif
134 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
135#ifdef TMPL_MEM_BY_REF
136 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
137#else
138 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
139#endif
140 }
141 return rc;
142}
143
144
145#ifdef IEM_WITH_SETJMP
146/**
147 * Stores a data byte, longjmp on error.
148 *
149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
150 * @param iSegReg The index of the segment register to use for
151 * this access. The base and limits are checked.
152 * @param GCPtrMem The address of the guest memory.
153 * @param uValue The value to store.
154 */
155void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
156#ifdef TMPL_MEM_BY_REF
157 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
158#else
159 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
160#endif
161{
162# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
163 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
164# endif
165#ifdef TMPL_MEM_BY_REF
166 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
167#else
168 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
169#endif
170 uint8_t bUnmapInfo;
171 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem,
172 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
173#ifdef TMPL_MEM_BY_REF
174 *puDst = *pValue;
175#else
176 *puDst = uValue;
177#endif
178 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
179}
180#endif /* IEM_WITH_SETJMP */
181
182
183#ifdef IEM_WITH_SETJMP
184
185/**
186 * Maps a data buffer for read+write direct access (or via a bounce buffer),
187 * longjmp on error.
188 *
189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
190 * @param pbUnmapInfo Pointer to unmap info variable.
191 * @param iSegReg The index of the segment register to use for
192 * this access. The base and limits are checked.
193 * @param GCPtrMem The address of the guest memory.
194 */
195TMPL_MEM_TYPE *
196RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
197 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
198{
199# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
200 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
201# endif
202 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
203 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
204 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
205 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
206}
207
208
209/**
210 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
211 * longjmp on error.
212 *
213 * @param pVCpu The cross context virtual CPU structure of the calling thread.
214 * @param pbUnmapInfo Pointer to unmap info variable.
215 * @param iSegReg The index of the segment register to use for
216 * this access. The base and limits are checked.
217 * @param GCPtrMem The address of the guest memory.
218 */
219TMPL_MEM_TYPE *
220RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
221 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
222{
223# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
224 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
225# endif
226 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
227 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
228 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
229 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
230}
231
232
233/**
234 * Maps a data buffer for readonly direct access (or via a bounce buffer),
235 * longjmp on error.
236 *
237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
238 * @param pbUnmapInfo Pointer to unmap info variable.
239 * @param iSegReg The index of the segment register to use for
240 * this access. The base and limits are checked.
241 * @param GCPtrMem The address of the guest memory.
242 */
243TMPL_MEM_TYPE const *
244RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
245 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
246{
247# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
248 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
249# endif
250 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
251 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
252 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem,
253 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
254}
255
256#endif /* IEM_WITH_SETJMP */
257
258
259#ifdef TMPL_MEM_WITH_STACK
260
261/**
262 * Pops a general purpose register off the stack.
263 *
264 * @returns Strict VBox status code.
265 * @param pVCpu The cross context virtual CPU structure of the
266 * calling thread.
267 * @param iGReg The GREG to load the popped value into.
268 */
269VBOXSTRICTRC RT_CONCAT(iemMemStackPopGReg,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iGReg) RT_NOEXCEPT
270{
271 Assert(iGReg < 16);
272
273 /* Increment the stack pointer. */
274 uint64_t uNewRsp;
275 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
276
277 /* Load the word the lazy way. */
278 uint8_t bUnmapInfo;
279 TMPL_MEM_TYPE const *puSrc;
280 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
281 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
282 if (rc == VINF_SUCCESS)
283 {
284 TMPL_MEM_TYPE const uValue = *puSrc;
285 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
286
287 /* Commit the register and new RSP values. */
288 if (rc == VINF_SUCCESS)
289 {
290 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
291 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
292 pVCpu->cpum.GstCtx.rsp = uNewRsp;
293 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
294 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
295 else
296 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
297 return VINF_SUCCESS;
298 }
299 }
300 return rc;
301}
302
303
304/**
305 * Pushes an item onto the stack, regular version.
306 *
307 * @returns Strict VBox status code.
308 * @param pVCpu The cross context virtual CPU structure of the
309 * calling thread.
310 * @param uValue The value to push.
311 */
312VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
313{
314 /* Increment the stack pointer. */
315 uint64_t uNewRsp;
316 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
317
318 /* Write the dword the lazy way. */
319 uint8_t bUnmapInfo;
320 TMPL_MEM_TYPE *puDst;
321 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
322 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
323 if (rc == VINF_SUCCESS)
324 {
325 *puDst = uValue;
326 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
327
328 /* Commit the new RSP value unless we an access handler made trouble. */
329 if (rc == VINF_SUCCESS)
330 {
331 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
332 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
333 pVCpu->cpum.GstCtx.rsp = uNewRsp;
334 return VINF_SUCCESS;
335 }
336 }
337
338 return rc;
339}
340
341
342/**
343 * Pops a generic item off the stack, regular version.
344 *
345 * This is used by C-implementation code.
346 *
347 * @returns Strict VBox status code.
348 * @param pVCpu The cross context virtual CPU structure of the
349 * calling thread.
350 * @param puValue Where to store the popped value.
351 */
352VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
353{
354 /* Increment the stack pointer. */
355 uint64_t uNewRsp;
356 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
357
358 /* Write the word the lazy way. */
359 uint8_t bUnmapInfo;
360 TMPL_MEM_TYPE const *puSrc;
361 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
362 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
363 if (rc == VINF_SUCCESS)
364 {
365 *puValue = *puSrc;
366 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
367
368 /* Commit the new RSP value. */
369 if (rc == VINF_SUCCESS)
370 {
371 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
372 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
373 pVCpu->cpum.GstCtx.rsp = uNewRsp;
374 return VINF_SUCCESS;
375 }
376 }
377 return rc;
378}
379
380
381/**
382 * Pushes an item onto the stack, using a temporary stack pointer.
383 *
384 * @returns Strict VBox status code.
385 * @param pVCpu The cross context virtual CPU structure of the
386 * calling thread.
387 * @param uValue The value to push.
388 * @param pTmpRsp Pointer to the temporary stack pointer.
389 */
390VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
391{
392 /* Increment the stack pointer. */
393 RTUINT64U NewRsp = *pTmpRsp;
394 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
395
396 /* Write the word the lazy way. */
397 uint8_t bUnmapInfo;
398 TMPL_MEM_TYPE *puDst;
399 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
400 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
401 if (rc == VINF_SUCCESS)
402 {
403 *puDst = uValue;
404 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
405
406 /* Commit the new RSP value unless we an access handler made trouble. */
407 if (rc == VINF_SUCCESS)
408 {
409 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
410 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
411 *pTmpRsp = NewRsp;
412 return VINF_SUCCESS;
413 }
414 }
415 return rc;
416}
417
418
419/**
420 * Pops an item off the stack, using a temporary stack pointer.
421 *
422 * @returns Strict VBox status code.
423 * @param pVCpu The cross context virtual CPU structure of the
424 * calling thread.
425 * @param puValue Where to store the popped value.
426 * @param pTmpRsp Pointer to the temporary stack pointer.
427 */
428VBOXSTRICTRC
429RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
430{
431 /* Increment the stack pointer. */
432 RTUINT64U NewRsp = *pTmpRsp;
433 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
434
435 /* Write the word the lazy way. */
436 uint8_t bUnmapInfo;
437 TMPL_MEM_TYPE const *puSrc;
438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
439 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
440 if (rc == VINF_SUCCESS)
441 {
442 *puValue = *puSrc;
443 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);
444
445 /* Commit the new RSP value. */
446 if (rc == VINF_SUCCESS)
447 {
448 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
449 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
450 *pTmpRsp = NewRsp;
451 return VINF_SUCCESS;
452 }
453 }
454 return rc;
455}
456
457
458# ifdef IEM_WITH_SETJMP
459
460/**
461 * Safe/fallback stack store function that longjmps on error.
462 */
463void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
464 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
465{
466# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
467 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
468# endif
469
470 uint8_t bUnmapInfo;
471 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
472 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
473 *puDst = uValue;
474 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
475
476 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
477}
478
479
480# ifdef TMPL_WITH_PUSH_SREG
481/**
482 * Safe/fallback stack SREG store function that longjmps on error.
483 */
484void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
485 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
486{
487# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
488 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
489# endif
490
491 /* bs3-cpu-weird-1 explores this instruction. AMD 3990X does it by the book,
492 with a zero extended DWORD write. While my Intel 10890XE goes all weird
493 in real mode where it will write a DWORD with the top word of EFLAGS in
494 the top half. In all other modes it does a WORD access. */
495
496 /** @todo Docs indicate the behavior changed maybe in Pentium or Pentium Pro.
497 * Check ancient hardware when it actually did change. */
498 uint8_t bUnmapInfo;
499 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
500 {
501 if (!IEM_IS_REAL_MODE(pVCpu))
502 {
503 /* WORD per intel specs. */
504 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
505 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
506 *puDst = (uint16_t)uValue;
507 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
508 Log12(("IEM WR 'word' SS|%RGv: %#06x [sreg/i]\n", GCPtrMem, (uint16_t)uValue));
509 }
510 else
511 {
512 /* DWORD real mode weirness observed on 10980XE. */
513 /** @todo Check this on other intel CPUs and when pushing registers other
514 * than FS (which all that bs3-cpu-weird-1 does atm). (Maybe this is
515 * something for the CPU profile... Hope not.) */
516 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
517 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1);
518 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK));
519 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
520 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg/ir]\n", GCPtrMem, uValue));
521 }
522 }
523 else
524 {
525 /* DWORD per spec. */
526 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem,
527 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1);
528 *puDst = uValue;
529 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
530 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue));
531 }
532}
533# endif /* TMPL_WITH_PUSH_SREG */
534
535
536/**
537 * Safe/fallback stack fetch function that longjmps on error.
538 */
539TMPL_MEM_TYPE RT_CONCAT3(iemMemFetchStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
540{
541# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
542 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
543# endif
544
545 /* Read the data. */
546 uint8_t bUnmapInfo;
547 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
548 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
549 TMPL_MEM_TYPE const uValue = *puSrc;
550 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
551
552 /* Commit the register and RSP values. */
553 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
554 return uValue;
555}
556
557
558/**
559 * Safe/fallback stack push function that longjmps on error.
560 */
561void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
562{
563# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
564 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
565# endif
566
567 /* Decrement the stack pointer (prep). */
568 uint64_t uNewRsp;
569 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
570
571 /* Write the data. */
572 uint8_t bUnmapInfo;
573 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
574 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
575 *puDst = uValue;
576 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
577
578 /* Commit the RSP change. */
579 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
580 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
581 pVCpu->cpum.GstCtx.rsp = uNewRsp;
582}
583
584
585/**
586 * Safe/fallback stack pop greg function that longjmps on error.
587 */
588void RT_CONCAT3(iemMemStackPopGReg,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP
589{
590# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
591 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
592# endif
593
594 /* Increment the stack pointer. */
595 uint64_t uNewRsp;
596 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
597
598 /* Read the data. */
599 uint8_t bUnmapInfo;
600 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS,
601 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
602 TMPL_MEM_TYPE const uValue = *puSrc;
603 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
604
605 /* Commit the register and RSP values. */
606 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " (r%u)\n",
607 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue, iGReg));
608 pVCpu->cpum.GstCtx.rsp = uNewRsp;
609 if (sizeof(TMPL_MEM_TYPE) != sizeof(uint16_t))
610 pVCpu->cpum.GstCtx.aGRegs[iGReg].u = uValue;
611 else
612 pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
613}
614
615# ifdef TMPL_WITH_PUSH_SREG
616/**
617 * Safe/fallback stack push function that longjmps on error.
618 */
619void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
620{
621# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
622 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
623# endif
624
625 /* Decrement the stack pointer (prep). */
626 uint64_t uNewRsp;
627 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
628
629 /* Write the data. */
630 /* The intel docs talks about zero extending the selector register
631 value. My actual intel CPU here might be zero extending the value
632 but it still only writes the lower word... */
633 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
634 * happens when crossing an electric page boundrary, is the high word checked
635 * for write accessibility or not? Probably it is. What about segment limits?
636 * It appears this behavior is also shared with trap error codes.
637 *
638 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
639 * ancient hardware when it actually did change. */
640 uint8_t bUnmapInfo;
641 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
642 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
643 *puDst = (uint16_t)uValue;
644 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
645
646 /* Commit the RSP change. */
647 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
648 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
649 pVCpu->cpum.GstCtx.rsp = uNewRsp;
650}
651# endif /* TMPL_WITH_PUSH_SREG */
652
653# endif /* IEM_WITH_SETJMP */
654
655#endif /* TMPL_MEM_WITH_STACK */
656
657/* clean up */
658#undef TMPL_MEM_TYPE
659#undef TMPL_MEM_TYPE_ALIGN
660#undef TMPL_MEM_FN_SUFF
661#undef TMPL_MEM_FMT_TYPE
662#undef TMPL_MEM_FMT_DESC
663#undef TMPL_WITH_PUSH_SREG
664
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette