VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMThreadedFunctions.cpp@ 99208

最後變更 在這個檔案從99208是 99024,由 vboxsync 提交於 2 年 前

VMM/IEM: scm fix. bugref:10369

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 17.0 KB
 
1/* $Id: IEMThreadedFunctions.cpp 99024 2023-03-17 19:53:28Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Threaded Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#define IEM_WITH_OPAQUE_DECODER_STATE
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70#include "IEMMc.h"
71
72#include "IEMThreadedFunctions.h"
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78
79/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param. */
80#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED(a_cbInstr) \
81 return iemRegAddToRipAndFinishingClearingRF(pVCpu, a_cbInstr)
82#undef IEM_MC_ADVANCE_RIP_AND_FINISH
83
84/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as param. */
85#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED(a_i8, a_cbInstr) \
86 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), pVCpu->iem.s.enmEffOpSize)
87#undef IEM_MC_REL_JMP_S8_AND_FINISH
88
89/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as param. */
90#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED(a_i16, a_cbInstr) \
91 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
92#undef IEM_MC_REL_JMP_S16_AND_FINISH
93
94/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as param. */
95#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED(a_i32, a_cbInstr) \
96 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), pVCpu->iem.s.enmEffOpSize)
97#undef IEM_MC_REL_JMP_S32_AND_FINISH
98
99/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
100# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \
101 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
102
103/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
104# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
105 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp)
106
107/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
108# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
109 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp)
110
111/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
112# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
113 (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm)
114
115/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
116# define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
117 (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm)
118
119/**
120 * Calculates the effective address of a ModR/M memory operand, 16-bit
121 * addressing variant.
122 *
123 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16.
124 *
125 * @returns The effective address.
126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
127 * @param bRm The ModRM byte.
128 * @param u16Disp The displacement byte/word, if any.
129 * RIP relative addressing.
130 */
131static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr16(PVMCPUCC pVCpu, uint8_t bRm, uint16_t u16Disp) RT_NOEXCEPT
132{
133 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: bRm=%#x\n", bRm));
134 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
135
136 /* Handle the disp16 form with no registers first. */
137 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
138 {
139 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16Disp));
140 return u16Disp;
141 }
142
143 /* Get the displacment. */
144 /** @todo we can eliminate this step by making u16Disp have this value
145 * already! */
146 uint16_t u16EffAddr;
147 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
148 {
149 case 0: u16EffAddr = 0; break;
150 case 1: u16EffAddr = (int16_t)(int8_t)u16Disp; break;
151 case 2: u16EffAddr = u16Disp; break;
152 default: AssertFailedStmt(u16EffAddr = 0);
153 }
154
155 /* Add the base and index registers to the disp. */
156 switch (bRm & X86_MODRM_RM_MASK)
157 {
158 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
159 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
160 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; break;
161 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; break;
162 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
163 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
164 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; break;
165 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
166 }
167
168 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr16: EffAddr=%#010RGv\n", (RTGCPTR)u16EffAddr));
169 return u16EffAddr;
170}
171
172
173/**
174 * Calculates the effective address of a ModR/M memory operand, 32-bit
175 * addressing variant.
176 *
177 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32 and
178 * IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT.
179 *
180 * @returns The effective address.
181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
182 * @param bRm The ModRM byte.
183 * @param bSib The SIB byte, if any.
184 * @param u32Disp The displacement byte/dword, if any.
185 */
186static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr32(PVMCPUCC pVCpu, uint8_t bRm, uint8_t bSib, uint32_t u32Disp) RT_NOEXCEPT
187{
188 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: bRm=%#x\n", bRm));
189 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
190
191 /* Handle the disp32 form with no registers first. */
192 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
193 {
194 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32Disp));
195 return u32Disp;
196 }
197
198 /* Get the register (or SIB) value. */
199 uint32_t u32EffAddr;
200 switch (bRm & X86_MODRM_RM_MASK)
201 {
202 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
203 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
204 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
205 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
206 case 4: /* SIB */
207 {
208 /* Get the index and scale it. */
209 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
210 {
211 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
212 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
213 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
214 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
215 case 4: u32EffAddr = 0; /*none */ break;
216 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
217 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
218 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
219 }
220 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
221
222 /* add base */
223 switch (bSib & X86_SIB_BASE_MASK)
224 {
225 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
226 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
227 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
228 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
229 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; break;
230 case 5:
231 if ((bRm & X86_MODRM_MOD_MASK) != 0)
232 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
233 else
234 u32EffAddr += u32Disp;
235 break;
236 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
237 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
238 }
239 break;
240 }
241 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
242 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
243 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
244 }
245
246 /* Get and add the displacement. */
247 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
248 {
249 case 0: break;
250 case 1: u32EffAddr += (int8_t)u32Disp; break;
251 case 2: u32EffAddr += u32Disp; break;
252 default: AssertFailed();
253 }
254
255 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr32: EffAddr=%#010RGv\n", (RTGCPTR)u32EffAddr));
256 return u32EffAddr;
257}
258
259
260/**
261 * Calculates the effective address of a ModR/M memory operand.
262 *
263 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64.
264 *
265 * @returns The effective address.
266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
267 * @param bRmEx The ModRM byte but with bit 3 set to REX.B and
268 * bit 4 to REX.X. The two bits are part of the
269 * REG sub-field, which isn't needed in this
270 * function.
271 * @param bSib The SIB byte, if any.
272 * @param u32Disp The displacement byte/word/dword, if any.
273 * @param cbInstr The size of the fully decoded instruction. Used
274 * for RIP relative addressing.
275 * @todo combine cbInstr and cbImm!
276 */
277static RTGCPTR iemOpHlpCalcRmEffAddrThreadedAddr64(PVMCPUCC pVCpu, uint8_t bRmEx, uint8_t bSib,
278 uint32_t u32Disp, uint8_t cbInstr) RT_NOEXCEPT
279{
280 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: bRmEx=%#x\n", bRmEx));
281 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
282
283 uint64_t u64EffAddr;
284
285 /* Handle the rip+disp32 form with no registers first. */
286 if ((bRmEx & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
287 {
288 u64EffAddr = (int32_t)u32Disp;
289 u64EffAddr += pVCpu->cpum.GstCtx.rip + cbInstr;
290 }
291 else
292 {
293 /* Get the register (or SIB) value. */
294 switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
295 {
296 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
297 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
298 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
299 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
300 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
301 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
302 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
303 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
304 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
305 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
306 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
307 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
308 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
309 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
310 /* SIB */
311 case 4:
312 case 12:
313 {
314 /* Get the index and scale it. */
315 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | ((bRmEx & 0x10) >> 1)) /* bRmEx[bit 4] = REX.X */
316 {
317 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
318 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
319 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
320 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
321 case 4: u64EffAddr = 0; /*none */ break;
322 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
323 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
324 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
325 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
326 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
327 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
328 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
329 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
330 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
331 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
332 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
333 }
334 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
335
336 /* add base */
337 switch ((bSib & X86_SIB_BASE_MASK) | (bRmEx & 0x8)) /* bRmEx[bit 3] = REX.B */
338 {
339 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
340 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
341 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
342 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
343 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; break;
344 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
345 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
346 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
347 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
348 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
349 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
350 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
351 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
352 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
353 /* complicated encodings */
354 case 5:
355 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
356 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
357 else
358 u64EffAddr += (int32_t)u32Disp;
359 break;
360 case 13:
361 if ((bRmEx & X86_MODRM_MOD_MASK) != 0)
362 u64EffAddr += pVCpu->cpum.GstCtx.r13;
363 else
364 u64EffAddr += (int32_t)u32Disp;
365 break;
366 }
367 break;
368 }
369 }
370
371 /* Get and add the displacement. */
372 switch ((bRmEx >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
373 {
374 case 0: break;
375 case 1: u64EffAddr += (int8_t)u32Disp; break;
376 case 2: u64EffAddr += (int32_t)u32Disp; break;
377 default: AssertFailed();
378 }
379 }
380
381 Log5(("iemOpHlpCalcRmEffAddrThreadedAddr64: EffAddr=%#010RGv\n", u64EffAddr));
382 return u64EffAddr;
383}
384
385
386
387/*
388 * The threaded functions.
389 */
390#include "IEMThreadedFunctions.cpp.h"
391
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette