VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMRC/SELMRC.cpp@ 55895

最後變更 在這個檔案從55895是 55895,由 vboxsync 提交於 10 年 前

Added pvUser to the raw-mode context virtual handler callbacks.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 25.1 KB
 
1/* $Id: SELMRC.cpp 55895 2015-05-17 19:42:38Z vboxsync $ */
2/** @file
3 * SELM - The Selector Manager, Guest Context.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_SELM
22#include <VBox/vmm/selm.h>
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/trpm.h>
26#include "SELMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/vmm.h>
29#include <VBox/vmm/pgm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36
37#include "SELMInline.h"
38
39
40/*******************************************************************************
41* Global Variables *
42*******************************************************************************/
43#ifdef LOG_ENABLED
44/** Segment register names. */
45static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };
46#endif
47
48
49#ifdef SELM_TRACK_GUEST_GDT_CHANGES
50/**
51 * Synchronizes one GDT entry (guest -> shadow).
52 *
53 * @returns VBox strict status code (appropriate for trap handling and GC
54 * return).
55 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
56 * @retval VINF_SELM_SYNC_GDT
57 * @retval VINF_EM_RESCHEDULE_REM
58 *
59 * @param pVM Pointer to the VM.
60 * @param pVCpu The current virtual CPU.
61 * @param pRegFrame Trap register frame.
62 * @param iGDTEntry The GDT entry to sync.
63 *
64 * @remarks Caller checks that this isn't the LDT entry!
65 */
66static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
67{
68 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu)));
69
70 /*
71 * Validate the offset.
72 */
73 VBOXGDTR GdtrGuest;
74 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
75 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
76 if ( iGDTEntry >= SELM_GDT_ELEMENTS
77 || offEntry > GdtrGuest.cbGdt)
78 return VINF_SUCCESS; /* ignore */
79
80 /*
81 * Read the guest descriptor.
82 */
83 X86DESC Desc;
84 int rc = MMGCRamRead(pVM, &Desc, (uint8_t *)(uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
85 if (RT_FAILURE(rc))
86 {
87 rc = PGMPhysSimpleReadGCPtr(pVCpu, &Desc, (uintptr_t)GdtrGuest.pGdt + offEntry, sizeof(X86DESC));
88 if (RT_FAILURE(rc))
89 {
90 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
91 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
92 return VINF_EM_RESCHEDULE_REM;
93 }
94 }
95
96 /*
97 * Check for conflicts.
98 */
99 RTSEL Sel = iGDTEntry << X86_SEL_SHIFT;
100 Assert( !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] & ~X86_SEL_MASK_OFF_RPL)
101 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] & ~X86_SEL_MASK_OFF_RPL)
102 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] & ~X86_SEL_MASK_OFF_RPL)
103 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] & ~X86_SEL_MASK_OFF_RPL)
104 && !(pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] & ~X86_SEL_MASK_OFF_RPL));
105 if ( pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] == Sel
106 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] == Sel
107 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] == Sel
108 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] == Sel
109 || pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] == Sel)
110 {
111 if (Desc.Gen.u1Present)
112 {
113 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: detected conflict!!\n", Sel, &Desc));
114 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
115 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
116 return VINF_SELM_SYNC_GDT; /** @todo this status code is ignored, unfortunately. */
117 }
118 Log(("selmRCSyncGDTEntry: Sel=%d Desc=%.8Rhxs: potential conflict (still not present)!\n", Sel, &Desc));
119
120 /* Note: we can't continue below or else we'll change the shadow descriptor!! */
121 /* When the guest makes the selector present, then we'll do a GDT sync. */
122 return VINF_SUCCESS;
123 }
124
125 /*
126 * Convert the guest selector to a shadow selector and update the shadow GDT.
127 */
128 selmGuestToShadowDesc(pVM, &Desc);
129 PX86DESC pShwDescr = &pVM->selm.s.paGdtRC[iGDTEntry];
130 //Log(("O: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(*pShwDescr)), X86DESC_LIMIT(*pShwDescr), (pShwDescr->au32[1] >> 8) & 0xFFFF ));
131 //Log(("N: base=%08X limit=%08X attr=%04X\n", X86DESC_BASE(Desc)), X86DESC_LIMIT(Desc), (Desc.au32[1] >> 8) & 0xFFFF ));
132 *pShwDescr = Desc;
133
134 /*
135 * Detect and mark stale registers.
136 */
137 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
138 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
139 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
140 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
141 {
142 if (Sel == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
143 {
144 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
145 {
146 if (selmIsSRegStale32(&paSReg[iSReg], &Desc, iSReg))
147 {
148 Log(("GDT write to selector in %s register %04X (now stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
149 paSReg[iSReg].fFlags |= CPUMSELREG_FLAGS_STALE;
150 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */
151 /* rcStrict = VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */
152 rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
153 }
154 else if (paSReg[iSReg].fFlags & CPUMSELREG_FLAGS_STALE)
155 {
156 Log(("GDT write to selector in %s register %04X (no longer stale)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
157 paSReg[iSReg].fFlags &= ~CPUMSELREG_FLAGS_STALE;
158 }
159 else
160 Log(("GDT write to selector in %s register %04X (no important change)\n", g_aszSRegNms[iSReg], paSReg[iSReg].Sel));
161 }
162 else
163 Log(("GDT write to selector in %s register %04X (out of sync)\n", paSReg[iSReg].Sel));
164 }
165 }
166
167 /** @todo Detect stale LDTR as well? */
168
169 return rcStrict;
170}
171
172
173/**
174 * Synchronizes any segment registers refering to the given GDT entry.
175 *
176 * This is called before any changes performed and shadowed, so it's possible to
177 * look in both the shadow and guest descriptor table entries for hidden
178 * register content.
179 *
180 * @param pVM Pointer to the VM.
181 * @param pVCpu The current virtual CPU.
182 * @param pRegFrame Trap register frame.
183 * @param iGDTEntry The GDT entry to sync.
184 */
185static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)
186{
187 /*
188 * Validate the offset.
189 */
190 VBOXGDTR GdtrGuest;
191 CPUMGetGuestGDTR(pVCpu, &GdtrGuest);
192 unsigned offEntry = iGDTEntry * sizeof(X86DESC);
193 if ( iGDTEntry >= SELM_GDT_ELEMENTS
194 || offEntry > GdtrGuest.cbGdt)
195 return;
196
197 /*
198 * Sync outdated segment registers using this entry.
199 */
200 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry];
201 uint32_t uCpl = CPUMGetGuestCPL(pVCpu);
202 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);
203 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx);
204 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++)
205 {
206 if (iGDTEntry == (paSReg[iSReg].Sel & X86_SEL_MASK_OFF_RPL))
207 {
208 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &paSReg[iSReg]))
209 {
210 if (selmIsShwDescGoodForSReg(&paSReg[iSReg], pDesc, iSReg, uCpl))
211 {
212 selmLoadHiddenSRegFromShadowDesc(&paSReg[iSReg], pDesc);
213 Log(("selmRCSyncGDTSegRegs: Updated %s\n", g_aszSRegNms[iSReg]));
214 }
215 else
216 Log(("selmRCSyncGDTSegRegs: Bad shadow descriptor %#x (for %s): %.8Rhxs \n",
217 iGDTEntry, g_aszSRegNms[iSReg], pDesc));
218 }
219 }
220 }
221
222}
223
224
225/**
226 * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT.
227 *
228 * @returns VBox status code (appropriate for trap handling and GC return).
229 * @param pVM Pointer to the VM.
230 * @param uErrorCode CPU Error code.
231 * @param pRegFrame Trap register frame.
232 * @param pvFault The fault address (cr2).
233 * @param pvRange The base address of the handled virtual range.
234 * @param offRange The offset of the access into this range.
235 * (If it's a EIP range this is the EIP, if not it's pvFault.)
236 */
237DECLEXPORT(int) selmRCGuestGDTWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
238 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
239{
240 PVMCPU pVCpu = VMMGetCpu0(pVM);
241 LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
242 NOREF(pvRange); NOREF(pvUser);
243
244 /*
245 * Check if any selectors might be affected.
246 */
247 unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT;
248 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1);
249 if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1)
250 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1);
251
252 /*
253 * Attempt to emulate the instruction and sync the affected entries.
254 */
255 uint32_t cb;
256 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
257 if (RT_SUCCESS(rc) && cb)
258 {
259 /* Check if the LDT was in any way affected. Do not sync the
260 shadow GDT if that's the case or we might have trouble in
261 the world switcher (or so they say). */
262 unsigned const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT;
263 unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT;
264 if ( iGDTE1 == iLdt
265 || iGDTE2 == iLdt)
266 {
267 Log(("LDTR selector change -> fall back to HC!!\n"));
268 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
269 rc = VINF_SELM_SYNC_GDT;
270 /** @todo Implement correct stale LDT handling. */
271 }
272 else
273 {
274 /* Sync the shadow GDT and continue provided the update didn't
275 cause any segment registers to go stale in any way. */
276 int rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1);
277 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
278 {
279 if (rc == VINF_SUCCESS)
280 rc = rc2;
281
282 if (iGDTE1 != iGDTE2)
283 {
284 rc2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2);
285 if (rc == VINF_SUCCESS)
286 rc = rc2;
287 }
288
289 if (rc2 == VINF_SUCCESS || rc2 == VINF_EM_RESCHEDULE_REM)
290 {
291 /* VINF_EM_RESCHEDULE_REM - bad idea if we're in a patch. */
292 if (rc2 == VINF_EM_RESCHEDULE_REM)
293 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
294 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled);
295 return rc;
296 }
297 }
298
299 /* sync failed, return to ring-3 and resync the GDT. */
300 if (rc == VINF_SUCCESS || RT_FAILURE(rc2))
301 rc = rc2;
302 }
303 }
304 else
305 {
306 Assert(RT_FAILURE(rc));
307 if (rc == VERR_EM_INTERPRETER)
308 rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT;
309 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
310 }
311
312 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled);
313 return rc;
314}
315#endif /* SELM_TRACK_GUEST_GDT_CHANGES */
316
317
318#ifdef SELM_TRACK_GUEST_LDT_CHANGES
319/**
320 * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT.
321 *
322 * @returns VBox status code (appropriate for trap handling and GC return).
323 * @param pVM Pointer to the VM.
324 * @param uErrorCode CPU Error code.
325 * @param pRegFrame Trap register frame.
326 * @param pvFault The fault address (cr2).
327 * @param pvRange The base address of the handled virtual range.
328 * @param offRange The offset of the access into this range.
329 * (If it's a EIP range this is the EIP, if not it's pvFault.)
330 * @param pvUser Unused.
331 */
332DECLEXPORT(int) selmRCGuestLDTWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
333 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
334{
335 /** @todo To be implemented. */
336 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
337 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
338
339 VMCPU_FF_SET(VMMGetCpu0(pVM), VMCPU_FF_SELM_SYNC_LDT);
340 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT);
341 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT;
342}
343#endif
344
345
346#ifdef SELM_TRACK_GUEST_TSS_CHANGES
347/**
348 * Read wrapper used by selmRCGuestTSSWriteHandler.
349 * @returns VBox status code (appropriate for trap handling and GC return).
350 * @param pVM Pointer to the VM.
351 * @param pvDst Where to put the bits we read.
352 * @param pvSrc Guest address to read from.
353 * @param cb The number of bytes to read.
354 */
355DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb)
356{
357 PVMCPU pVCpu = VMMGetCpu0(pVM);
358
359 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
360 if (RT_SUCCESS(rc))
361 return VINF_SUCCESS;
362
363 /** @todo use different fallback? */
364 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc);
365 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc));
366 if (rc == VINF_SUCCESS)
367 {
368 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb);
369 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc));
370 }
371 return rc;
372}
373
374/**
375 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS.
376 *
377 * @returns VBox status code (appropriate for trap handling and GC return).
378 * @param pVM Pointer to the VM.
379 * @param uErrorCode CPU Error code.
380 * @param pRegFrame Trap register frame.
381 * @param pvFault The fault address (cr2).
382 * @param pvRange The base address of the handled virtual range.
383 * @param offRange The offset of the access into this range.
384 * (If it's a EIP range this is the EIP, if not it's pvFault.)
385 * @param pvUser Unused.
386 */
387DECLEXPORT(int) selmRCGuestTSSWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
388 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
389{
390 PVMCPU pVCpu = VMMGetCpu0(pVM);
391 LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange));
392 NOREF(pvRange); NOREF(pvUser);
393
394 /*
395 * Try emulate the access.
396 */
397 uint32_t cb;
398 int rc = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb);
399 if ( RT_SUCCESS(rc)
400 && cb)
401 {
402 rc = VINF_SUCCESS;
403
404 /*
405 * If it's on the same page as the esp0 and ss0 fields or actually one of them,
406 * then check if any of these has changed.
407 */
408 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss;
409 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0)
410 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
411 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1
412 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
413 )
414 {
415 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
416 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0));
417 pVM->selm.s.Tss.esp1 = pGuestTss->esp0;
418 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1;
419 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
420 }
421#ifdef VBOX_WITH_RAW_RING1
422 else if ( EMIsRawRing1Enabled(pVM)
423 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1)
424 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange)
425 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2
426 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */
427 )
428 {
429 Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n",
430 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1));
431 pVM->selm.s.Tss.esp2 = pGuestTss->esp1;
432 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2;
433 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
434 }
435#endif
436 /* Handle misaligned TSS in a safe manner (just in case). */
437 else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0)
438 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0))
439 {
440 struct
441 {
442 uint32_t esp0;
443 uint16_t ss0;
444 uint16_t padding_ss0;
445 } s;
446 AssertCompileSize(s, 8);
447 rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s));
448 if ( rc == VINF_SUCCESS
449 && ( s.esp0 != pVM->selm.s.Tss.esp1
450 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */
451 )
452 {
453 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n",
454 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0));
455 pVM->selm.s.Tss.esp1 = s.esp0;
456 pVM->selm.s.Tss.ss1 = s.ss0 | 1;
457 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged);
458 }
459 }
460
461 /*
462 * If VME is enabled we need to check if the interrupt redirection bitmap
463 * needs updating.
464 */
465 if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap)
466 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME))
467 {
468 if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap))
469 {
470 uint16_t offIoBitmap = pGuestTss->offIoBitmap;
471 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap)
472 {
473 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
474 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
475 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
476 }
477 else
478 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap));
479 }
480 else
481 {
482 /** @todo not sure how the partial case is handled; probably not allowed */
483 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap);
484 if ( offIntRedirBitmap <= offRange
485 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb
486 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss)
487 {
488 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n",
489 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb));
490
491 /** @todo only update the changed part. */
492 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++)
493 {
494 rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8],
495 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8);
496 if (rc != VINF_SUCCESS)
497 break;
498 }
499 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir);
500 }
501 }
502 }
503
504 /* Return to ring-3 for a full resync if any of the above fails... (?) */
505 if (rc != VINF_SUCCESS)
506 {
507 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
508 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
509 if (RT_SUCCESS(rc))
510 rc = VINF_SUCCESS;
511 }
512
513 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled);
514 }
515 else
516 {
517 AssertMsg(RT_FAILURE(rc), ("cb=%u rc=%#x\n", cb, rc));
518 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
519 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled);
520 if (rc == VERR_EM_INTERPRETER)
521 rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT;
522 }
523 return rc;
524}
525#endif /* SELM_TRACK_GUEST_TSS_CHANGES */
526
527
528#ifdef SELM_TRACK_SHADOW_GDT_CHANGES
529/**
530 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT.
531 *
532 * @returns VBox status code (appropriate for trap handling and GC return).
533 * @param pVM Pointer to the VM.
534 * @param uErrorCode CPU Error code.
535 * @param pRegFrame Trap register frame.
536 * @param pvFault The fault address (cr2).
537 * @param pvRange The base address of the handled virtual range.
538 * @param offRange The offset of the access into this range.
539 * (If it's a EIP range this is the EIP, if not it's pvFault.)
540 * @param pvUser Unused.
541 */
542DECLEXPORT(int) selmRCShadowGDTWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
543 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
544{
545 LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
546 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
547 return VERR_SELM_SHADOW_GDT_WRITE;
548}
549#endif
550
551
552#ifdef SELM_TRACK_SHADOW_LDT_CHANGES
553/**
554 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.
555 *
556 * @returns VBox status code (appropriate for trap handling and GC return).
557 * @param pVM Pointer to the VM.
558 * @param uErrorCode CPU Error code.
559 * @param pRegFrame Trap register frame.
560 * @param pvFault The fault address (cr2).
561 * @param pvRange The base address of the handled virtual range.
562 * @param offRange The offset of the access into this range.
563 * (If it's a EIP range this is the EIP, if not it's pvFault.)
564 * @param pvUser Unused.
565 */
566DECLEXPORT(int) selmRCShadowLDTWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
567 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
568{
569 LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
570 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE));
571 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
572 return VERR_SELM_SHADOW_LDT_WRITE;
573}
574#endif
575
576
577#ifdef SELM_TRACK_SHADOW_TSS_CHANGES
578/**
579 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS.
580 *
581 * @returns VBox status code (appropriate for trap handling and GC return).
582 * @param pVM Pointer to the VM.
583 * @param uErrorCode CPU Error code.
584 * @param pRegFrame Trap register frame.
585 * @param pvFault The fault address (cr2).
586 * @param pvRange The base address of the handled virtual range.
587 * @param offRange The offset of the access into this range.
588 * (If it's a EIP range this is the EIP, if not it's pvFault.)
589 * @param pvUser Unused.
590 */
591DECLEXPORT(int) selmRCShadowTSSWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
592 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)
593{
594 LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange));
595 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser);
596 return VERR_SELM_SHADOW_TSS_WRITE;
597}
598#endif
599
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette