VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllGst.h@ 93554

最後變更 在這個檔案從93554是 93554,由 vboxsync 提交於 3 年 前

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 18.5 KB
 
1/* $Id: PGMAllGst.h 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * VBox - Page Manager, Guest Paging Template - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Internal Functions *
21*********************************************************************************************************************************/
22RT_C_DECLS_BEGIN
23#if PGM_GST_TYPE == PGM_TYPE_32BIT \
24 || PGM_GST_TYPE == PGM_TYPE_PAE \
25 || PGM_GST_TYPE == PGM_TYPE_AMD64
26DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk);
27#endif
28PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk);
29PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
30
31#ifdef IN_RING3 /* r3 only for now. */
32PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3);
33PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta);
34PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu);
35#endif
36RT_C_DECLS_END
37
38
39/**
40 * Enters the guest mode.
41 *
42 * @returns VBox status code.
43 * @param pVCpu The cross context virtual CPU structure.
44 * @param GCPhysCR3 The physical address from the CR3 register.
45 */
46PGM_GST_DECL(int, Enter)(PVMCPUCC pVCpu, RTGCPHYS GCPhysCR3)
47{
48 /*
49 * Map and monitor CR3
50 */
51 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
52 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
53 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
54 return g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
55}
56
57
58/**
59 * Exits the guest mode.
60 *
61 * @returns VBox status code.
62 * @param pVCpu The cross context virtual CPU structure.
63 */
64PGM_GST_DECL(int, Exit)(PVMCPUCC pVCpu)
65{
66 uintptr_t idxBth = pVCpu->pgm.s.idxBothModeData;
67 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
68 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
69 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
70}
71
72
73#if PGM_GST_TYPE == PGM_TYPE_32BIT \
74 || PGM_GST_TYPE == PGM_TYPE_PAE \
75 || PGM_GST_TYPE == PGM_TYPE_AMD64
76
77
78DECLINLINE(int) PGM_GST_NAME(WalkReturnNotPresent)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
79{
80 NOREF(iLevel); NOREF(pVCpu);
81 pWalk->fNotPresent = true;
82 pWalk->uLevel = (uint8_t)iLevel;
83 return VERR_PAGE_TABLE_NOT_PRESENT;
84}
85
86DECLINLINE(int) PGM_GST_NAME(WalkReturnBadPhysAddr)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel, int rc)
87{
88 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc); NOREF(pVCpu);
89 pWalk->fBadPhysAddr = true;
90 pWalk->uLevel = (uint8_t)iLevel;
91 return VERR_PAGE_TABLE_NOT_PRESENT;
92}
93
94DECLINLINE(int) PGM_GST_NAME(WalkReturnRsvdError)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, int iLevel)
95{
96 NOREF(pVCpu);
97 pWalk->fRsvdError = true;
98 pWalk->uLevel = (uint8_t)iLevel;
99 return VERR_PAGE_TABLE_NOT_PRESENT;
100}
101
102
103/**
104 * Performs a guest page table walk.
105 *
106 * @returns VBox status code.
107 * @retval VINF_SUCCESS on success.
108 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
109 *
110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
111 * @param GCPtr The guest virtual address to walk by.
112 * @param pWalk The page walk info.
113 * @param pGstWalk The guest mode specific page walk info.
114 */
115DECLINLINE(int) PGM_GST_NAME(Walk)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PGSTPTWALK pGstWalk)
116{
117 int rc;
118
119#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
120/** @def PGM_GST_SLAT_WALK
121 * Macro to perform guest second-level address translation (EPT or Nested).
122 *
123 * @param a_pVCpu The cross context virtual CPU structure of the calling
124 * EMT.
125 * @param a_GCPtrNested The nested-guest linear address that caused the
126 * second-level translation.
127 * @param a_GCPhysNested The nested-guest physical address to translate.
128 * @param a_GCPhysOut Where to store the guest-physical address (result).
129 */
130# define PGM_GST_SLAT_WALK(a_pVCpu, a_GCPtrNested, a_GCPhysNested, a_GCPhysOut, a_pWalk) \
131 do { \
132 if ((a_pVCpu)->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT) \
133 { \
134 PGMPTWALK SlatWalk; \
135 PGMPTWALKGST SlatGstWalk; \
136 int const rcX = pgmGstSlatWalk(a_pVCpu, a_GCPhysNested, true /* fIsLinearAddrValid */, a_GCPtrNested, &SlatWalk, \
137 &SlatGstWalk); \
138 if (RT_SUCCESS(rcX)) \
139 (a_GCPhysOut) = SlatWalk.GCPhys; \
140 else \
141 { \
142 *(a_pWalk) = SlatWalk; \
143 return rcX; \
144 } \
145 } \
146 } while (0)
147#endif
148
149 /*
150 * Init the walking structures.
151 */
152 RT_ZERO(*pWalk);
153 RT_ZERO(*pGstWalk);
154 pWalk->GCPtr = GCPtr;
155
156# if PGM_GST_TYPE == PGM_TYPE_32BIT \
157 || PGM_GST_TYPE == PGM_TYPE_PAE
158 /*
159 * Boundary check for PAE and 32-bit (prevents trouble further down).
160 */
161 if (RT_UNLIKELY(GCPtr >= _4G))
162 return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 8);
163# endif
164
165 uint64_t fEffective;
166 {
167# if PGM_GST_TYPE == PGM_TYPE_AMD64
168 /*
169 * The PML4 table.
170 */
171 rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGstWalk->pPml4);
172 if (RT_SUCCESS(rc)) { /* probable */ }
173 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 4, rc);
174
175 PX86PML4E pPml4e;
176 pGstWalk->pPml4e = pPml4e = &pGstWalk->pPml4->a[(GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK];
177 X86PML4E Pml4e;
178 pGstWalk->Pml4e.u = Pml4e.u = pPml4e->u;
179
180 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pml4e)) { /* probable */ }
181 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 4);
182
183 if (RT_LIKELY(GST_IS_PML4E_VALID(pVCpu, Pml4e))) { /* likely */ }
184 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 4);
185
186 fEffective = Pml4e.u & ( X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_PWT | X86_PML4E_PCD | X86_PML4E_A
187 | X86_PML4E_NX);
188 pWalk->fEffective = fEffective;
189
190 /*
191 * The PDPT.
192 */
193 RTGCPHYS GCPhysPdpt = Pml4e.u & X86_PML4E_PG_MASK;
194#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
195 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPdpt, GCPhysPdpt, pWalk);
196#endif
197 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPdpt, &pGstWalk->pPdpt);
198 if (RT_SUCCESS(rc)) { /* probable */ }
199 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 3, rc);
200
201# elif PGM_GST_TYPE == PGM_TYPE_PAE
202 rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGstWalk->pPdpt);
203 if (RT_SUCCESS(rc)) { /* probable */ }
204 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
205#endif
206 }
207 {
208# if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
209 PX86PDPE pPdpe;
210 pGstWalk->pPdpe = pPdpe = &pGstWalk->pPdpt->a[(GCPtr >> GST_PDPT_SHIFT) & GST_PDPT_MASK];
211 X86PDPE Pdpe;
212 pGstWalk->Pdpe.u = Pdpe.u = pPdpe->u;
213
214 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pdpe)) { /* probable */ }
215 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 3);
216
217 if (RT_LIKELY(GST_IS_PDPE_VALID(pVCpu, Pdpe))) { /* likely */ }
218 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 3);
219
220# if PGM_GST_TYPE == PGM_TYPE_AMD64
221 fEffective &= (Pdpe.u & ( X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US
222 | X86_PDPE_PWT | X86_PDPE_PCD | X86_PDPE_A));
223 fEffective |= Pdpe.u & X86_PDPE_LM_NX;
224# else
225 /*
226 * NX in the legacy-mode PAE PDPE is reserved. The valid check above ensures the NX bit is not set.
227 * The RW, US, A bits MBZ in PAE PDPTE entries but must be 1 the way we compute cumulative (effective) access rights.
228 */
229 Assert(!(Pdpe.u & X86_PDPE_LM_NX));
230 fEffective = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A
231 | (Pdpe.u & (X86_PDPE_PWT | X86_PDPE_PCD));
232# endif
233 pWalk->fEffective = fEffective;
234
235 /*
236 * The PD.
237 */
238 RTGCPHYS GCPhysPd = Pdpe.u & X86_PDPE_PG_MASK;
239# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
240 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPd, GCPhysPd, pWalk);
241# endif
242 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPd, &pGstWalk->pPd);
243 if (RT_SUCCESS(rc)) { /* probable */ }
244 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 2, rc);
245
246# elif PGM_GST_TYPE == PGM_TYPE_32BIT
247 rc = pgmGstGet32bitPDPtrEx(pVCpu, &pGstWalk->pPd);
248 if (RT_SUCCESS(rc)) { /* probable */ }
249 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 8, rc);
250# endif
251 }
252 {
253 PGSTPDE pPde;
254 pGstWalk->pPde = pPde = &pGstWalk->pPd->a[(GCPtr >> GST_PD_SHIFT) & GST_PD_MASK];
255 GSTPDE Pde;
256 pGstWalk->Pde.u = Pde.u = pPde->u;
257 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pde)) { /* probable */ }
258 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 2);
259 if ((Pde.u & X86_PDE_PS) && GST_IS_PSE_ACTIVE(pVCpu))
260 {
261 if (RT_LIKELY(GST_IS_BIG_PDE_VALID(pVCpu, Pde))) { /* likely */ }
262 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
263
264 /*
265 * We're done.
266 */
267# if PGM_GST_TYPE == PGM_TYPE_32BIT
268 fEffective = Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
269# else
270 fEffective &= Pde.u & (X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PWT | X86_PDE4M_PCD | X86_PDE4M_A);
271 fEffective |= Pde.u & X86_PDE2M_PAE_NX;
272# endif
273 fEffective |= Pde.u & (X86_PDE4M_D | X86_PDE4M_G);
274 fEffective |= (Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT;
275 pWalk->fEffective = fEffective;
276 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
277 Assert(fEffective & PGM_PTATTRS_R_MASK);
278
279 pWalk->fBigPage = true;
280 pWalk->fSucceeded = true;
281 RTGCPHYS GCPhysPde = GST_GET_BIG_PDE_GCPHYS(pVCpu->CTX_SUFF(pVM), Pde)
282 | (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
283# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
284 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPde, GCPhysPde, pWalk);
285# endif
286 pWalk->GCPhys = GCPhysPde;
287 PGM_A20_APPLY_TO_VAR(pVCpu, pWalk->GCPhys);
288 return VINF_SUCCESS;
289 }
290
291 if (RT_UNLIKELY(!GST_IS_PDE_VALID(pVCpu, Pde)))
292 return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 2);
293# if PGM_GST_TYPE == PGM_TYPE_32BIT
294 fEffective = Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
295# else
296 fEffective &= Pde.u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT | X86_PDE_PCD | X86_PDE_A);
297 fEffective |= Pde.u & X86_PDE_PAE_NX;
298# endif
299 pWalk->fEffective = fEffective;
300
301 /*
302 * The PT.
303 */
304 RTGCPHYS GCPhysPt = GST_GET_PDE_GCPHYS(Pde);
305# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
306 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPt, GCPhysPt, pWalk);
307# endif
308 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhysPt, &pGstWalk->pPt);
309 if (RT_SUCCESS(rc)) { /* probable */ }
310 else return PGM_GST_NAME(WalkReturnBadPhysAddr)(pVCpu, pWalk, 1, rc);
311 }
312 {
313 PGSTPTE pPte;
314 pGstWalk->pPte = pPte = &pGstWalk->pPt->a[(GCPtr >> GST_PT_SHIFT) & GST_PT_MASK];
315 GSTPTE Pte;
316 pGstWalk->Pte.u = Pte.u = pPte->u;
317
318 if (GST_IS_PGENTRY_PRESENT(pVCpu, Pte)) { /* probable */ }
319 else return PGM_GST_NAME(WalkReturnNotPresent)(pVCpu, pWalk, 1);
320
321 if (RT_LIKELY(GST_IS_PTE_VALID(pVCpu, Pte))) { /* likely */ }
322 else return PGM_GST_NAME(WalkReturnRsvdError)(pVCpu, pWalk, 1);
323
324 /*
325 * We're done.
326 */
327 fEffective &= Pte.u & (X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_A);
328 fEffective |= Pte.u & (X86_PTE_D | X86_PTE_PAT | X86_PTE_G);
329# if PGM_GST_TYPE != PGM_TYPE_32BIT
330 fEffective |= Pte.u & X86_PTE_PAE_NX;
331# endif
332 pWalk->fEffective = fEffective;
333 Assert(GST_IS_NX_ACTIVE(pVCpu) || !(fEffective & PGM_PTATTRS_NX_MASK));
334 Assert(fEffective & PGM_PTATTRS_R_MASK);
335
336 pWalk->fSucceeded = true;
337 RTGCPHYS GCPhysPte = GST_GET_PTE_GCPHYS(Pte)
338 | (GCPtr & GUEST_PAGE_OFFSET_MASK);
339# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
340 PGM_GST_SLAT_WALK(pVCpu, GCPtr, GCPhysPte, GCPhysPte, pWalk);
341# endif
342 pWalk->GCPhys = GCPhysPte;
343 return VINF_SUCCESS;
344 }
345}
346
347#endif /* 32BIT, PAE, AMD64 */
348
349/**
350 * Gets effective Guest OS page information.
351 *
352 * When GCPtr is in a big page, the function will return as if it was a normal
353 * 4KB page. If the need for distinguishing between big and normal page becomes
354 * necessary at a later point, a PGMGstGetPage Ex() will be created for that
355 * purpose.
356 *
357 * @returns VBox status code.
358 * @param pVCpu The cross context virtual CPU structure.
359 * @param GCPtr Guest Context virtual address of the page.
360 * @param pWalk Where to store the page walk info.
361 */
362PGM_GST_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
363{
364#if PGM_GST_TYPE == PGM_TYPE_REAL \
365 || PGM_GST_TYPE == PGM_TYPE_PROT
366 /*
367 * Fake it.
368 */
369 RT_ZERO(*pWalk);
370 pWalk->fSucceeded = true;
371 pWalk->GCPtr = GCPtr;
372 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK;
373 pWalk->fEffective = X86_PTE_P | X86_PTE_RW | X86_PTE_US;
374 pWalk->GCPhys = GCPtr & PAGE_BASE_GC_MASK;
375 NOREF(pVCpu);
376 return VINF_SUCCESS;
377
378#elif PGM_GST_TYPE == PGM_TYPE_32BIT \
379 || PGM_GST_TYPE == PGM_TYPE_PAE \
380 || PGM_GST_TYPE == PGM_TYPE_AMD64
381
382 GSTPTWALK GstWalk;
383 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, pWalk, &GstWalk);
384 if (RT_FAILURE(rc))
385 return rc;
386
387 Assert(pWalk->fSucceeded);
388 Assert(pWalk->GCPtr == GCPtr);
389
390 PGMPTATTRS fFlags;
391 if (!pWalk->fBigPage)
392 fFlags = (GstWalk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */
393 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK))
394# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
395 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
396# endif
397 ;
398 else
399 {
400 fFlags = (GstWalk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */
401 | (pWalk->fEffective & (PGM_PTATTRS_W_MASK | PGM_PTATTRS_US_MASK | PGM_PTATTRS_PAT_MASK))
402# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
403 | (pWalk->fEffective & PGM_PTATTRS_NX_MASK)
404# endif
405 ;
406 }
407
408 pWalk->GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
409 pWalk->fEffective = fFlags;
410 return VINF_SUCCESS;
411
412#else
413# error "shouldn't be here!"
414 /* something else... */
415 return VERR_NOT_SUPPORTED;
416#endif
417}
418
419
420/**
421 * Modify page flags for a range of pages in the guest's tables
422 *
423 * The existing flags are ANDed with the fMask and ORed with the fFlags.
424 *
425 * @returns VBox status code.
426 * @param pVCpu The cross context virtual CPU structure.
427 * @param GCPtr Virtual address of the first page in the range. Page aligned!
428 * @param cb Size (in bytes) of the page range to apply the modification to. Page aligned!
429 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
430 * @param fMask The AND mask - page flags X86_PTE_*.
431 */
432PGM_GST_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
433{
434 Assert((cb & GUEST_PAGE_OFFSET_MASK) == 0); RT_NOREF_PV(cb);
435
436#if PGM_GST_TYPE == PGM_TYPE_32BIT \
437 || PGM_GST_TYPE == PGM_TYPE_PAE \
438 || PGM_GST_TYPE == PGM_TYPE_AMD64
439 for (;;)
440 {
441 PGMPTWALK Walk;
442 GSTPTWALK GstWalk;
443 int rc = PGM_GST_NAME(Walk)(pVCpu, GCPtr, &Walk, &GstWalk);
444 if (RT_FAILURE(rc))
445 return rc;
446
447 if (!Walk.fBigPage)
448 {
449 /*
450 * 4KB Page table, process
451 *
452 * Walk pages till we're done.
453 */
454 unsigned iPTE = (GCPtr >> GST_PT_SHIFT) & GST_PT_MASK;
455 while (iPTE < RT_ELEMENTS(GstWalk.pPt->a))
456 {
457 GSTPTE Pte = GstWalk.pPt->a[iPTE];
458 Pte.u = (Pte.u & (fMask | X86_PTE_PAE_PG_MASK))
459 | (fFlags & ~GST_PTE_PG_MASK);
460 GstWalk.pPt->a[iPTE] = Pte;
461
462 /* next page */
463 cb -= GUEST_PAGE_SIZE;
464 if (!cb)
465 return VINF_SUCCESS;
466 GCPtr += GUEST_PAGE_SIZE;
467 iPTE++;
468 }
469 }
470 else
471 {
472 /*
473 * 2/4MB Page table
474 */
475 GSTPDE PdeNew;
476# if PGM_GST_TYPE == PGM_TYPE_32BIT
477 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PG_HIGH_MASK | X86_PDE4M_PS))
478# else
479 PdeNew.u = (GstWalk.Pde.u & (fMask | ((fMask & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT) | GST_PDE_BIG_PG_MASK | X86_PDE4M_PS))
480# endif
481 | (fFlags & ~GST_PTE_PG_MASK)
482 | ((fFlags & X86_PTE_PAT) << X86_PDE4M_PAT_SHIFT);
483 *GstWalk.pPde = PdeNew;
484
485 /* advance */
486 const unsigned cbDone = GST_BIG_PAGE_SIZE - (GCPtr & GST_BIG_PAGE_OFFSET_MASK);
487 if (cbDone >= cb)
488 return VINF_SUCCESS;
489 cb -= cbDone;
490 GCPtr += cbDone;
491 }
492 }
493
494#else
495 /* real / protected mode: ignore. */
496 NOREF(pVCpu); NOREF(GCPtr); NOREF(fFlags); NOREF(fMask);
497 return VINF_SUCCESS;
498#endif
499}
500
501
502#ifdef IN_RING3
503/**
504 * Relocate any GC pointers related to guest mode paging.
505 *
506 * @returns VBox status code.
507 * @param pVCpu The cross context virtual CPU structure.
508 * @param offDelta The relocation offset.
509 */
510PGM_GST_DECL(int, Relocate)(PVMCPUCC pVCpu, RTGCPTR offDelta)
511{
512 RT_NOREF(pVCpu, offDelta);
513 return VINF_SUCCESS;
514}
515#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette