VirtualBox

source: vbox/trunk/src/VBox/VMM/include/PGMInline.h@ 38955

最後變更 在這個檔案從38955是 38955,由 vboxsync 提交於 13 年 前

pgmR3PhysChunkMap: Make sure we don't unmap the chunk we just added.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 49.8 KB
 
1/* $Id: PGMInline.h 38955 2011-10-06 12:23:39Z vboxsync $ */
2/** @file
3 * PGM - Inlined functions.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___PGMInline_h
19#define ___PGMInline_h
20
21#include <VBox/cdefs.h>
22#include <VBox/types.h>
23#include <VBox/err.h>
24#include <VBox/vmm/stam.h>
25#include <VBox/param.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/pdmcritsect.h>
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/dis.h>
31#include <VBox/vmm/dbgf.h>
32#include <VBox/log.h>
33#include <VBox/vmm/gmm.h>
34#include <VBox/vmm/hwaccm.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/avl.h>
38#include <iprt/critsect.h>
39#include <iprt/sha.h>
40
41
42
43/** @addtogroup grp_pgm_int Internals
44 * @internal
45 * @{
46 */
47
48/**
49 * Gets the PGMRAMRANGE structure for a guest page.
50 *
51 * @returns Pointer to the RAM range on success.
52 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
53 *
54 * @param pVM The VM handle.
55 * @param GCPhys The GC physical address.
56 */
57DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVM pVM, RTGCPHYS GCPhys)
58{
59 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
60 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb)
61 pRam = pgmPhysGetRangeSlow(pVM, GCPhys);
62 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
63 return pRam;
64}
65
66
67/**
68 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram
69 * range above it.
70 *
71 * @returns Pointer to the RAM range on success.
72 * @returns NULL if the address is located after the last range.
73 *
74 * @param pVM The VM handle.
75 * @param GCPhys The GC physical address.
76 */
77DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVM pVM, RTGCPHYS GCPhys)
78{
79 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
80 if ( !pRam
81 || (GCPhys - pRam->GCPhys) >= pRam->cb)
82 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys);
83 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
84 return pRam;
85}
86
87
88
89/**
90 * Gets the PGMPAGE structure for a guest page.
91 *
92 * @returns Pointer to the page on success.
93 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
94 *
95 * @param pVM The VM handle.
96 * @param GCPhys The GC physical address.
97 */
98DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVM pVM, RTGCPHYS GCPhys)
99{
100 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
101 RTGCPHYS off;
102 if ( !pRam
103 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
104 return pgmPhysGetPageSlow(pVM, GCPhys);
105 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
106 return &pRam->aPages[off >> PAGE_SHIFT];
107}
108
109
110/**
111 * Gets the PGMPAGE structure for a guest page.
112 *
113 * Old Phys code: Will make sure the page is present.
114 *
115 * @returns VBox status code.
116 * @retval VINF_SUCCESS and a valid *ppPage on success.
117 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
118 *
119 * @param pVM The VM handle.
120 * @param GCPhys The GC physical address.
121 * @param ppPage Where to store the page pointer on success.
122 */
123DECLINLINE(int) pgmPhysGetPageEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
124{
125 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
126 RTGCPHYS off;
127 if ( !pRam
128 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
129 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage);
130 *ppPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
131 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
132 return VINF_SUCCESS;
133}
134
135
136
137
138/**
139 * Gets the PGMPAGE structure for a guest page.
140 *
141 * Old Phys code: Will make sure the page is present.
142 *
143 * @returns VBox status code.
144 * @retval VINF_SUCCESS and a valid *ppPage on success.
145 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid.
146 *
147 * @param pVM The VM handle.
148 * @param GCPhys The GC physical address.
149 * @param ppPage Where to store the page pointer on success.
150 * @param ppRamHint Where to read and store the ram list hint.
151 * The caller initializes this to NULL before the call.
152 */
153DECLINLINE(int) pgmPhysGetPageWithHintEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)
154{
155 RTGCPHYS off;
156 PPGMRAMRANGE pRam = *ppRamHint;
157 if ( !pRam
158 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb))
159 {
160 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
161 if ( !pRam
162 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
163 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint);
164
165 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
166 *ppRamHint = pRam;
167 }
168 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE.
175 *
176 * @returns Pointer to the page on success.
177 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition.
178 *
179 * @param pVM The VM handle.
180 * @param GCPhys The GC physical address.
181 * @param ppPage Where to store the pointer to the PGMPAGE structure.
182 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure.
183 */
184DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
185{
186 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
187 RTGCPHYS off;
188 if ( !pRam
189 || (off = GCPhys - pRam->GCPhys) >= pRam->cb)
190 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam);
191
192 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits));
193 *ppRam = pRam;
194 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
195 return VINF_SUCCESS;
196}
197
198
199/**
200 * Convert GC Phys to HC Phys.
201 *
202 * @returns VBox status.
203 * @param pVM The VM handle.
204 * @param GCPhys The GC physical address.
205 * @param pHCPhys Where to store the corresponding HC physical address.
206 *
207 * @deprecated Doesn't deal with zero, shared or write monitored pages.
208 * Avoid when writing new code!
209 */
210DECLINLINE(int) pgmRamGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
211{
212 PPGMPAGE pPage;
213 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
214 if (RT_FAILURE(rc))
215 return rc;
216 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
217 return VINF_SUCCESS;
218}
219
220#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) || defined(IN_RC)
221
222/**
223 * Inlined version of the ring-0 version of the host page mapping code
224 * that optimizes access to pages already in the set.
225 *
226 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.
227 * @param pVCpu The current CPU.
228 * @param HCPhys The physical address of the page.
229 * @param ppv Where to store the mapping address.
230 */
231DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPU pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
232{
233 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
234
235 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
236 Assert(!(HCPhys & PAGE_OFFSET_MASK));
237 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
238
239 unsigned iHash = PGMMAPSET_HASH(HCPhys);
240 unsigned iEntry = pSet->aiHashTable[iHash];
241 if ( iEntry < pSet->cEntries
242 && pSet->aEntries[iEntry].HCPhys == HCPhys
243 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
244 {
245 pSet->aEntries[iEntry].cInlinedRefs++;
246 *ppv = pSet->aEntries[iEntry].pvPage;
247 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);
248 }
249 else
250 {
251 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);
252 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
253 }
254
255 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);
256 return VINF_SUCCESS;
257}
258
259
260/**
261 * Inlined version of the guest page mapping code that optimizes access to pages
262 * already in the set.
263 *
264 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
265 * @param pVM The VM handle.
266 * @param pVCpu The current CPU.
267 * @param GCPhys The guest physical address of the page.
268 * @param ppv Where to store the mapping address.
269 */
270DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
271{
272 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
273 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
274
275 /*
276 * Get the ram range.
277 */
278 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
279 RTGCPHYS off;
280 if ( !pRam
281 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
282 /** @todo || page state stuff */
283 )
284 {
285 /* This case is not counted into StatRZDynMapGCPageInl. */
286 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
287 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
288 }
289
290 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
291 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
292
293 /*
294 * pgmRZDynMapHCPageInlined with out stats.
295 */
296 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
297 Assert(!(HCPhys & PAGE_OFFSET_MASK));
298 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
299
300 unsigned iHash = PGMMAPSET_HASH(HCPhys);
301 unsigned iEntry = pSet->aiHashTable[iHash];
302 if ( iEntry < pSet->cEntries
303 && pSet->aEntries[iEntry].HCPhys == HCPhys
304 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
305 {
306 pSet->aEntries[iEntry].cInlinedRefs++;
307 *ppv = pSet->aEntries[iEntry].pvPage;
308 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
309 }
310 else
311 {
312 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
313 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
314 }
315
316 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
317 return VINF_SUCCESS;
318}
319
320
321/**
322 * Inlined version of the ring-0 version of guest page mapping that optimizes
323 * access to pages already in the set.
324 *
325 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
326 * @param pVCpu The current CPU.
327 * @param GCPhys The guest physical address of the page.
328 * @param ppv Where to store the mapping address.
329 */
330DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
331{
332 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
333}
334
335
336/**
337 * Inlined version of the ring-0 version of the guest byte mapping code
338 * that optimizes access to pages already in the set.
339 *
340 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.
341 * @param pVCpu The current CPU.
342 * @param HCPhys The physical address of the page.
343 * @param ppv Where to store the mapping address. The offset is
344 * preserved.
345 */
346DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)
347{
348 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);
349
350 /*
351 * Get the ram range.
352 */
353 PVM pVM = pVCpu->CTX_SUFF(pVM);
354 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];
355 RTGCPHYS off;
356 if ( !pRam
357 || (off = GCPhys - pRam->GCPhys) >= pRam->cb
358 /** @todo || page state stuff */
359 )
360 {
361 /* This case is not counted into StatRZDynMapGCPageInl. */
362 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);
363 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
364 }
365
366 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
367 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);
368
369 /*
370 * pgmRZDynMapHCPageInlined with out stats.
371 */
372 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
373 Assert(!(HCPhys & PAGE_OFFSET_MASK));
374 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
375
376 unsigned iHash = PGMMAPSET_HASH(HCPhys);
377 unsigned iEntry = pSet->aiHashTable[iHash];
378 if ( iEntry < pSet->cEntries
379 && pSet->aEntries[iEntry].HCPhys == HCPhys
380 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)
381 {
382 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);
383 pSet->aEntries[iEntry].cInlinedRefs++;
384 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
385 }
386 else
387 {
388 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);
389 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);
390 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));
391 }
392
393 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);
394 return VINF_SUCCESS;
395}
396
397#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
398#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
399
400/**
401 * Maps the page into current context (RC and maybe R0).
402 *
403 * @returns pointer to the mapping.
404 * @param pVM Pointer to the PGM instance data.
405 * @param pPage The page.
406 */
407DECLINLINE(void *) pgmPoolMapPageInlined(PVM pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
408{
409 if (pPage->idx >= PGMPOOL_IDX_FIRST)
410 {
411 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
412 void *pv;
413 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
414 return pv;
415 }
416 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
417}
418
419/**
420 * Maps the page into current context (RC and maybe R0).
421 *
422 * @returns pointer to the mapping.
423 * @param pVM Pointer to the PGM instance data.
424 * @param pVCpu The current CPU.
425 * @param pPage The page.
426 */
427DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)
428{
429 if (pPage->idx >= PGMPOOL_IDX_FIRST)
430 {
431 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
432 void *pv;
433 Assert(pVCpu == VMMGetCpu(pVM));
434 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);
435 return pv;
436 }
437 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
438}
439
440#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 || IN_RC */
441#ifndef IN_RC
442
443/**
444 * Queries the Physical TLB entry for a physical guest page,
445 * attempting to load the TLB entry if necessary.
446 *
447 * @returns VBox status code.
448 * @retval VINF_SUCCESS on success
449 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
450 *
451 * @param pVM The VM handle.
452 * @param GCPhys The address of the guest page.
453 * @param ppTlbe Where to store the pointer to the TLB entry.
454 */
455DECLINLINE(int) pgmPhysPageQueryTlbe(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
456{
457 int rc;
458 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
459 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
460 {
461 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
462 rc = VINF_SUCCESS;
463 }
464 else
465 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys);
466 *ppTlbe = pTlbe;
467 return rc;
468}
469
470
471/**
472 * Queries the Physical TLB entry for a physical guest page,
473 * attempting to load the TLB entry if necessary.
474 *
475 * @returns VBox status code.
476 * @retval VINF_SUCCESS on success
477 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
478 *
479 * @param pVM The VM handle.
480 * @param pPage Pointer to the PGMPAGE structure corresponding to
481 * GCPhys.
482 * @param GCPhys The address of the guest page.
483 * @param ppTlbe Where to store the pointer to the TLB entry.
484 */
485DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)
486{
487 int rc;
488 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
489 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK))
490 {
491 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));
492 rc = VINF_SUCCESS;
493 AssertPtr(pTlbe->pv);
494 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv));
495 }
496 else
497 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
498 *ppTlbe = pTlbe;
499 return rc;
500}
501
502#endif /* !IN_RC */
503
504
505/**
506 * Enables write monitoring for an allocated page.
507 *
508 * The caller is responsible for updating the shadow page tables.
509 *
510 * @param pVM The VM handle.
511 * @param pPage The page to write monitor.
512 * @param GCPhysPage The address of the page.
513 */
514DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
515{
516 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
517 PGM_LOCK_ASSERT_OWNER(pVM);
518
519 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_WRITE_MONITORED);
520 pVM->pgm.s.cMonitoredPages++;
521
522 /* Large pages must disabled. */
523 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
524 {
525 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
526 AssertFatal(pFirstPage);
527 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
528 {
529 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
530 pVM->pgm.s.cLargePagesDisabled++;
531 }
532 else
533 Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
534 }
535}
536
537
538/**
539 * Checks if the no-execute (NX) feature is active (EFER.NXE=1).
540 *
541 * Only used when the guest is in PAE or long mode. This is inlined so that we
542 * can perform consistency checks in debug builds.
543 *
544 * @returns true if it is, false if it isn't.
545 * @param pVCpu The current CPU.
546 */
547DECL_FORCE_INLINE(bool) pgmGstIsNoExecuteActive(PVMCPU pVCpu)
548{
549 Assert(pVCpu->pgm.s.fNoExecuteEnabled == CPUMIsGuestNXEnabled(pVCpu));
550 Assert(CPUMIsGuestInPAEMode(pVCpu) || CPUMIsGuestInLongMode(pVCpu));
551 return pVCpu->pgm.s.fNoExecuteEnabled;
552}
553
554
555/**
556 * Checks if the page size extension (PSE) is currently enabled (CR4.PSE=1).
557 *
558 * Only used when the guest is in paged 32-bit mode. This is inlined so that
559 * we can perform consistency checks in debug builds.
560 *
561 * @returns true if it is, false if it isn't.
562 * @param pVCpu The current CPU.
563 */
564DECL_FORCE_INLINE(bool) pgmGst32BitIsPageSizeExtActive(PVMCPU pVCpu)
565{
566 Assert(pVCpu->pgm.s.fGst32BitPageSizeExtension == CPUMIsGuestPageSizeExtEnabled(pVCpu));
567 Assert(!CPUMIsGuestInPAEMode(pVCpu));
568 Assert(!CPUMIsGuestInLongMode(pVCpu));
569 return pVCpu->pgm.s.fGst32BitPageSizeExtension;
570}
571
572
573/**
574 * Calculated the guest physical address of the large (4 MB) page in 32 bits paging mode.
575 * Takes PSE-36 into account.
576 *
577 * @returns guest physical address
578 * @param pVM The VM handle.
579 * @param Pde Guest Pde
580 */
581DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVM pVM, X86PDE Pde)
582{
583 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK;
584 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32;
585
586 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask;
587}
588
589
590/**
591 * Gets the address the guest page directory (32-bit paging).
592 *
593 * @returns VBox status code.
594 * @param pVCpu The current CPU.
595 * @param ppPd Where to return the mapping. This is always set.
596 */
597DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPU pVCpu, PX86PD *ppPd)
598{
599#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
600 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);
601 if (RT_FAILURE(rc))
602 {
603 *ppPd = NULL;
604 return rc;
605 }
606#else
607 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
608 if (RT_UNLIKELY(!*ppPd))
609 return pgmGstLazyMap32BitPD(pVCpu, ppPd);
610#endif
611 return VINF_SUCCESS;
612}
613
614
615/**
616 * Gets the address the guest page directory (32-bit paging).
617 *
618 * @returns Pointer the page directory entry in question.
619 * @param pVCpu The current CPU.
620 */
621DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPU pVCpu)
622{
623#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
624 PX86PD pGuestPD = NULL;
625 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);
626 if (RT_FAILURE(rc))
627 {
628 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
629 return NULL;
630 }
631#else
632 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd);
633 if (RT_UNLIKELY(!pGuestPD))
634 {
635 int rc = pgmGstLazyMap32BitPD(pVCpu, &pGuestPD);
636 if (RT_FAILURE(rc))
637 return NULL;
638 }
639#endif
640 return pGuestPD;
641}
642
643
644/**
645 * Gets the guest page directory pointer table.
646 *
647 * @returns VBox status code.
648 * @param pVCpu The current CPU.
649 * @param ppPdpt Where to return the mapping. This is always set.
650 */
651DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPU pVCpu, PX86PDPT *ppPdpt)
652{
653#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
654 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);
655 if (RT_FAILURE(rc))
656 {
657 *ppPdpt = NULL;
658 return rc;
659 }
660#else
661 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
662 if (RT_UNLIKELY(!*ppPdpt))
663 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt);
664#endif
665 return VINF_SUCCESS;
666}
667
668/**
669 * Gets the guest page directory pointer table.
670 *
671 * @returns Pointer to the page directory in question.
672 * @returns NULL if the page directory is not present or on an invalid page.
673 * @param pVCpu The current CPU.
674 */
675DECLINLINE(PX86PDPT) pgmGstGetPaePDPTPtr(PVMCPU pVCpu)
676{
677 PX86PDPT pGuestPdpt;
678 int rc = pgmGstGetPaePDPTPtrEx(pVCpu, &pGuestPdpt);
679 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
680 return pGuestPdpt;
681}
682
683
684/**
685 * Gets the guest page directory pointer table entry for the specified address.
686 *
687 * @returns Pointer to the page directory in question.
688 * @returns NULL if the page directory is not present or on an invalid page.
689 * @param pVCpu The current CPU
690 * @param GCPtr The address.
691 */
692DECLINLINE(PX86PDPE) pgmGstGetPaePDPEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
693{
694 AssertGCPtr32(GCPtr);
695
696#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
697 PX86PDPT pGuestPDPT = NULL;
698 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);
699 AssertRCReturn(rc, NULL);
700#else
701 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
702 if (RT_UNLIKELY(!pGuestPDPT))
703 {
704 int rc = pgmGstLazyMapPaePDPT(pVCpu, &pGuestPDPT);
705 if (RT_FAILURE(rc))
706 return NULL;
707 }
708#endif
709 return &pGuestPDPT->a[(GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE];
710}
711
712
713/**
714 * Gets the page directory entry for the specified address.
715 *
716 * @returns The page directory entry in question.
717 * @returns A non-present entry if the page directory is not present or on an invalid page.
718 * @param pVCpu The handle of the virtual CPU.
719 * @param GCPtr The address.
720 */
721DECLINLINE(X86PDEPAE) pgmGstGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
722{
723 AssertGCPtr32(GCPtr);
724 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
725 if (RT_LIKELY(pGuestPDPT))
726 {
727 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
728 if ( pGuestPDPT->a[iPdpt].n.u1Present
729 && !(pGuestPDPT->a[iPdpt].u & pVCpu->pgm.s.fGstPaeMbzPdpeMask) )
730 {
731 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
732#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
733 PX86PDPAE pGuestPD = NULL;
734 int rc = pgmRZDynMapGCPageInlined(pVCpu,
735 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
736 (void **)&pGuestPD
737 RTLOG_COMMA_SRC_POS);
738 if (RT_SUCCESS(rc))
739 return pGuestPD->a[iPD];
740 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
741#else
742 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
743 if ( !pGuestPD
744 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
745 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
746 if (pGuestPD)
747 return pGuestPD->a[iPD];
748#endif
749 }
750 }
751
752 X86PDEPAE ZeroPde = {0};
753 return ZeroPde;
754}
755
756
757/**
758 * Gets the page directory pointer table entry for the specified address
759 * and returns the index into the page directory
760 *
761 * @returns Pointer to the page directory in question.
762 * @returns NULL if the page directory is not present or on an invalid page.
763 * @param pVCpu The current CPU.
764 * @param GCPtr The address.
765 * @param piPD Receives the index into the returned page directory
766 * @param pPdpe Receives the page directory pointer entry. Optional.
767 */
768DECLINLINE(PX86PDPAE) pgmGstGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, unsigned *piPD, PX86PDPE pPdpe)
769{
770 AssertGCPtr32(GCPtr);
771
772 /* The PDPE. */
773 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(pVCpu);
774 if (RT_UNLIKELY(!pGuestPDPT))
775 return NULL;
776 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
777 if (pPdpe)
778 *pPdpe = pGuestPDPT->a[iPdpt];
779 if (!pGuestPDPT->a[iPdpt].n.u1Present)
780 return NULL;
781 if (RT_UNLIKELY(pVCpu->pgm.s.fGstPaeMbzPdpeMask & pGuestPDPT->a[iPdpt].u))
782 return NULL;
783
784 /* The PDE. */
785#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
786 PX86PDPAE pGuestPD = NULL;
787 int rc = pgmRZDynMapGCPageInlined(pVCpu,
788 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,
789 (void **)&pGuestPD
790 RTLOG_COMMA_SRC_POS);
791 if (RT_FAILURE(rc))
792 {
793 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
794 return NULL;
795 }
796#else
797 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt];
798 if ( !pGuestPD
799 || (pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt])
800 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD);
801#endif
802
803 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
804 return pGuestPD;
805}
806
807#ifndef IN_RC
808
809/**
810 * Gets the page map level-4 pointer for the guest.
811 *
812 * @returns VBox status code.
813 * @param pVCpu The current CPU.
814 * @param ppPml4 Where to return the mapping. Always set.
815 */
816DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPU pVCpu, PX86PML4 *ppPml4)
817{
818#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
819 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);
820 if (RT_FAILURE(rc))
821 {
822 *ppPml4 = NULL;
823 return rc;
824 }
825#else
826 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
827 if (RT_UNLIKELY(!*ppPml4))
828 return pgmGstLazyMapPml4(pVCpu, ppPml4);
829#endif
830 return VINF_SUCCESS;
831}
832
833
834/**
835 * Gets the page map level-4 pointer for the guest.
836 *
837 * @returns Pointer to the PML4 page.
838 * @param pVCpu The current CPU.
839 */
840DECLINLINE(PX86PML4) pgmGstGetLongModePML4Ptr(PVMCPU pVCpu)
841{
842 PX86PML4 pGuestPml4;
843 int rc = pgmGstGetLongModePML4PtrEx(pVCpu, &pGuestPml4);
844 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc)); NOREF(rc);
845 return pGuestPml4;
846}
847
848
849/**
850 * Gets the pointer to a page map level-4 entry.
851 *
852 * @returns Pointer to the PML4 entry.
853 * @param pVCpu The current CPU.
854 * @param iPml4 The index.
855 * @remarks Only used by AssertCR3.
856 */
857DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
858{
859#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
860 PX86PML4 pGuestPml4;
861 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);
862 AssertRCReturn(rc, NULL);
863#else
864 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4);
865 if (RT_UNLIKELY(!pGuestPml4))
866 {
867 int rc = pgmGstLazyMapPml4(pVCpu, &pGuestPml4);
868 AssertRCReturn(rc, NULL);
869 }
870#endif
871 return &pGuestPml4->a[iPml4];
872}
873
874
875/**
876 * Gets the page directory entry for the specified address.
877 *
878 * @returns The page directory entry in question.
879 * @returns A non-present entry if the page directory is not present or on an invalid page.
880 * @param pVCpu The current CPU.
881 * @param GCPtr The address.
882 */
883DECLINLINE(X86PDEPAE) pgmGstGetLongModePDE(PVMCPU pVCpu, RTGCPTR64 GCPtr)
884{
885 /*
886 * Note! To keep things simple, ASSUME invalid physical addresses will
887 * cause X86_TRAP_PF_RSVD. This isn't a problem until we start
888 * supporting 52-bit wide physical guest addresses.
889 */
890 PCX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
891 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
892 if ( RT_LIKELY(pGuestPml4)
893 && pGuestPml4->a[iPml4].n.u1Present
894 && !(pGuestPml4->a[iPml4].u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask) )
895 {
896 PCX86PDPT pPdptTemp;
897 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pGuestPml4->a[iPml4].u & X86_PML4E_PG_MASK, &pPdptTemp);
898 if (RT_SUCCESS(rc))
899 {
900 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
901 if ( pPdptTemp->a[iPdpt].n.u1Present
902 && !(pPdptTemp->a[iPdpt].u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask) )
903 {
904 PCX86PDPAE pPD;
905 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
906 if (RT_SUCCESS(rc))
907 {
908 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
909 return pPD->a[iPD];
910 }
911 }
912 }
913 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
914 }
915
916 X86PDEPAE ZeroPde = {0};
917 return ZeroPde;
918}
919
920
921/**
922 * Gets the GUEST page directory pointer for the specified address.
923 *
924 * @returns The page directory in question.
925 * @returns NULL if the page directory is not present or on an invalid page.
926 * @param pVCpu The current CPU.
927 * @param GCPtr The address.
928 * @param ppPml4e Page Map Level-4 Entry (out)
929 * @param pPdpe Page directory pointer table entry (out)
930 * @param piPD Receives the index into the returned page directory
931 */
932DECLINLINE(PX86PDPAE) pgmGstGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPE pPdpe, unsigned *piPD)
933{
934 /* The PMLE4. */
935 PX86PML4 pGuestPml4 = pgmGstGetLongModePML4Ptr(pVCpu);
936 if (RT_UNLIKELY(!pGuestPml4))
937 return NULL;
938 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
939 PCX86PML4E pPml4e = *ppPml4e = &pGuestPml4->a[iPml4];
940 if (!pPml4e->n.u1Present)
941 return NULL;
942 if (RT_UNLIKELY(pPml4e->u & pVCpu->pgm.s.fGstAmd64MbzPml4eMask))
943 return NULL;
944
945 /* The PDPE. */
946 PCX86PDPT pPdptTemp;
947 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4e->u & X86_PML4E_PG_MASK, &pPdptTemp);
948 if (RT_FAILURE(rc))
949 {
950 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
951 return NULL;
952 }
953 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
954 *pPdpe = pPdptTemp->a[iPdpt];
955 if (!pPdpe->n.u1Present)
956 return NULL;
957 if (RT_UNLIKELY(pPdpe->u & pVCpu->pgm.s.fGstAmd64MbzPdpeMask))
958 return NULL;
959
960 /* The PDE. */
961 PX86PDPAE pPD;
962 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPdptTemp->a[iPdpt].u & X86_PDPE_PG_MASK, &pPD);
963 if (RT_FAILURE(rc))
964 {
965 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));
966 return NULL;
967 }
968
969 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
970 return pPD;
971}
972
973#endif /* !IN_RC */
974
975/**
976 * Gets the shadow page directory, 32-bit.
977 *
978 * @returns Pointer to the shadow 32-bit PD.
979 * @param pVCpu The current CPU.
980 */
981DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PVMCPU pVCpu)
982{
983 return (PX86PD)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
984}
985
986
987/**
988 * Gets the shadow page directory entry for the specified address, 32-bit.
989 *
990 * @returns Shadow 32-bit PDE.
991 * @param pVCpu The current CPU.
992 * @param GCPtr The address.
993 */
994DECLINLINE(X86PDE) pgmShwGet32BitPDE(PVMCPU pVCpu, RTGCPTR GCPtr)
995{
996 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
997
998 PX86PD pShwPde = pgmShwGet32BitPDPtr(pVCpu);
999 if (!pShwPde)
1000 {
1001 X86PDE ZeroPde = {0};
1002 return ZeroPde;
1003 }
1004 return pShwPde->a[iPd];
1005}
1006
1007
1008/**
1009 * Gets the pointer to the shadow page directory entry for the specified
1010 * address, 32-bit.
1011 *
1012 * @returns Pointer to the shadow 32-bit PDE.
1013 * @param pVCpu The current CPU.
1014 * @param GCPtr The address.
1015 */
1016DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1017{
1018 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK;
1019
1020 PX86PD pPde = pgmShwGet32BitPDPtr(pVCpu);
1021 AssertReturn(pPde, NULL);
1022 return &pPde->a[iPd];
1023}
1024
1025
1026/**
1027 * Gets the shadow page pointer table, PAE.
1028 *
1029 * @returns Pointer to the shadow PAE PDPT.
1030 * @param pVCpu The current CPU.
1031 */
1032DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PVMCPU pVCpu)
1033{
1034 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1035}
1036
1037
1038/**
1039 * Gets the shadow page directory for the specified address, PAE.
1040 *
1041 * @returns Pointer to the shadow PD.
1042 * @param pVCpu The current CPU.
1043 * @param GCPtr The address.
1044 */
1045DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1046{
1047 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1048 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1049
1050 if (!pPdpt->a[iPdpt].n.u1Present)
1051 return NULL;
1052
1053 /* Fetch the pgm pool shadow descriptor. */
1054 PVM pVM = pVCpu->CTX_SUFF(pVM);
1055 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1056 AssertReturn(pShwPde, NULL);
1057
1058 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1059}
1060
1061
1062/**
1063 * Gets the shadow page directory for the specified address, PAE.
1064 *
1065 * @returns Pointer to the shadow PD.
1066 * @param pVCpu The current CPU.
1067 * @param GCPtr The address.
1068 */
1069DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PVMCPU pVCpu, PX86PDPT pPdpt, RTGCPTR GCPtr)
1070{
1071 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1072
1073 if (!pPdpt->a[iPdpt].n.u1Present)
1074 return NULL;
1075
1076 /* Fetch the pgm pool shadow descriptor. */
1077 PVM pVM = pVCpu->CTX_SUFF(pVM);
1078 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
1079 AssertReturn(pShwPde, NULL);
1080
1081 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPde);
1082}
1083
1084
1085/**
1086 * Gets the shadow page directory entry, PAE.
1087 *
1088 * @returns PDE.
1089 * @param pVCpu The current CPU.
1090 * @param GCPtr The address.
1091 */
1092DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PVMCPU pVCpu, RTGCPTR GCPtr)
1093{
1094 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1095
1096 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1097 if (!pShwPde)
1098 {
1099 X86PDEPAE ZeroPde = {0};
1100 return ZeroPde;
1101 }
1102 return pShwPde->a[iPd];
1103}
1104
1105
1106/**
1107 * Gets the pointer to the shadow page directory entry for an address, PAE.
1108 *
1109 * @returns Pointer to the PDE.
1110 * @param pVCpu The current CPU.
1111 * @param GCPtr The address.
1112 * @remarks Only used by AssertCR3.
1113 */
1114DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PVMCPU pVCpu, RTGCPTR GCPtr)
1115{
1116 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK;
1117
1118 PX86PDPAE pPde = pgmShwGetPaePDPtr(pVCpu, GCPtr);
1119 AssertReturn(pPde, NULL);
1120 return &pPde->a[iPd];
1121}
1122
1123#ifndef IN_RC
1124
1125/**
1126 * Gets the shadow page map level-4 pointer.
1127 *
1128 * @returns Pointer to the shadow PML4.
1129 * @param pVCpu The current CPU.
1130 */
1131DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PVMCPU pVCpu)
1132{
1133 return (PX86PML4)PGMPOOL_PAGE_2_PTR_V2(pVCpu->CTX_SUFF(pVM), pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1134}
1135
1136
1137/**
1138 * Gets the shadow page map level-4 entry for the specified address.
1139 *
1140 * @returns The entry.
1141 * @param pVCpu The current CPU.
1142 * @param GCPtr The address.
1143 */
1144DECLINLINE(X86PML4E) pgmShwGetLongModePML4E(PVMCPU pVCpu, RTGCPTR GCPtr)
1145{
1146 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1147 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1148
1149 if (!pShwPml4)
1150 {
1151 X86PML4E ZeroPml4e = {0};
1152 return ZeroPml4e;
1153 }
1154 return pShwPml4->a[iPml4];
1155}
1156
1157
1158/**
1159 * Gets the pointer to the specified shadow page map level-4 entry.
1160 *
1161 * @returns The entry.
1162 * @param pVCpu The current CPU.
1163 * @param iPml4 The PML4 index.
1164 */
1165DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PVMCPU pVCpu, unsigned int iPml4)
1166{
1167 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pVCpu);
1168 if (!pShwPml4)
1169 return NULL;
1170 return &pShwPml4->a[iPml4];
1171}
1172
1173#endif /* !IN_RC */
1174
1175
1176/**
1177 * Cached physical handler lookup.
1178 *
1179 * @returns Physical handler covering @a GCPhys.
1180 * @param pVM The VM handle.
1181 * @param GCPhys The lookup address.
1182 */
1183DECLINLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalLookup(PVM pVM, RTGCPHYS GCPhys)
1184{
1185 PPGMPHYSHANDLER pHandler = pVM->pgm.s.CTX_SUFF(pLastPhysHandler);
1186 if ( pHandler
1187 && GCPhys >= pHandler->Core.Key
1188 && GCPhys < pHandler->Core.KeyLast)
1189 {
1190 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupHits));
1191 return pHandler;
1192 }
1193
1194 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerLookupMisses));
1195 pHandler = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1196 if (pHandler)
1197 pVM->pgm.s.CTX_SUFF(pLastPhysHandler) = pHandler;
1198 return pHandler;
1199}
1200
1201
1202/**
1203 * Gets the page state for a physical handler.
1204 *
1205 * @returns The physical handler page state.
1206 * @param pCur The physical handler in question.
1207 */
1208DECLINLINE(unsigned) pgmHandlerPhysicalCalcState(PPGMPHYSHANDLER pCur)
1209{
1210 switch (pCur->enmType)
1211 {
1212 case PGMPHYSHANDLERTYPE_PHYSICAL_WRITE:
1213 return PGM_PAGE_HNDL_PHYS_STATE_WRITE;
1214
1215 case PGMPHYSHANDLERTYPE_MMIO:
1216 case PGMPHYSHANDLERTYPE_PHYSICAL_ALL:
1217 return PGM_PAGE_HNDL_PHYS_STATE_ALL;
1218
1219 default:
1220 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1221 }
1222}
1223
1224
1225/**
1226 * Gets the page state for a virtual handler.
1227 *
1228 * @returns The virtual handler page state.
1229 * @param pCur The virtual handler in question.
1230 * @remarks This should never be used on a hypervisor access handler.
1231 */
1232DECLINLINE(unsigned) pgmHandlerVirtualCalcState(PPGMVIRTHANDLER pCur)
1233{
1234 switch (pCur->enmType)
1235 {
1236 case PGMVIRTHANDLERTYPE_WRITE:
1237 return PGM_PAGE_HNDL_VIRT_STATE_WRITE;
1238 case PGMVIRTHANDLERTYPE_ALL:
1239 return PGM_PAGE_HNDL_VIRT_STATE_ALL;
1240 default:
1241 AssertFatalMsgFailed(("Invalid type %d\n", pCur->enmType));
1242 }
1243}
1244
1245
1246/**
1247 * Clears one physical page of a virtual handler.
1248 *
1249 * @param pVM The VM handle.
1250 * @param pCur Virtual handler structure.
1251 * @param iPage Physical page index.
1252 *
1253 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no
1254 * need to care about other handlers in the same page.
1255 */
1256DECLINLINE(void) pgmHandlerVirtualClearPage(PVM pVM, PPGMVIRTHANDLER pCur, unsigned iPage)
1257{
1258 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage];
1259
1260 /*
1261 * Remove the node from the tree (it's supposed to be in the tree if we get here!).
1262 */
1263#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1264 AssertReleaseMsg(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1265 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1266 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1267#endif
1268 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_IS_HEAD)
1269 {
1270 /* We're the head of the alias chain. */
1271 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);
1272#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1273 AssertReleaseMsg(pRemove != NULL,
1274 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1275 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias));
1276 AssertReleaseMsg(pRemove == pPhys2Virt,
1277 ("wanted: pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n"
1278 " got: pRemove=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1279 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias,
1280 pRemove, pRemove->Core.Key, pRemove->Core.KeyLast, pRemove->offVirtHandler, pRemove->offNextAlias));
1281#endif
1282 if (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK)
1283 {
1284 /* Insert the next list in the alias chain into the tree. */
1285 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1286#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1287 AssertReleaseMsg(pNext->offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE,
1288 ("pNext=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32}\n",
1289 pNext, pNext->Core.Key, pNext->Core.KeyLast, pNext->offVirtHandler, pNext->offNextAlias));
1290#endif
1291 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD;
1292 bool fRc = RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);
1293 AssertRelease(fRc);
1294 }
1295 }
1296 else
1297 {
1298 /* Locate the previous node in the alias chain. */
1299 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);
1300#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1301 AssertReleaseMsg(pPrev != pPhys2Virt,
1302 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1303 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1304#endif
1305 for (;;)
1306 {
1307 PPGMPHYS2VIRTHANDLER pNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPrev + (pPrev->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1308 if (pNext == pPhys2Virt)
1309 {
1310 /* unlink. */
1311 LogFlow(("pgmHandlerVirtualClearPage: removed %p:{.offNextAlias=%#RX32} from alias chain. prev %p:{.offNextAlias=%#RX32} [%RGp-%RGp]\n",
1312 pPhys2Virt, pPhys2Virt->offNextAlias, pPrev, pPrev->offNextAlias, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast));
1313 if (!(pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK))
1314 pPrev->offNextAlias &= ~PGMPHYS2VIRTHANDLER_OFF_MASK;
1315 else
1316 {
1317 PPGMPHYS2VIRTHANDLER pNewNext = (PPGMPHYS2VIRTHANDLER)((intptr_t)pPhys2Virt + (pPhys2Virt->offNextAlias & PGMPHYS2VIRTHANDLER_OFF_MASK));
1318 pPrev->offNextAlias = ((intptr_t)pNewNext - (intptr_t)pPrev)
1319 | (pPrev->offNextAlias & ~PGMPHYS2VIRTHANDLER_OFF_MASK);
1320 }
1321 break;
1322 }
1323
1324 /* next */
1325 if (pNext == pPrev)
1326 {
1327#ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL
1328 AssertReleaseMsg(pNext != pPrev,
1329 ("pPhys2Virt=%p:{.Core.Key=%RGp, .Core.KeyLast=%RGp, .offVirtHandler=%#RX32, .offNextAlias=%#RX32} pPrev=%p\n",
1330 pPhys2Virt, pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offVirtHandler, pPhys2Virt->offNextAlias, pPrev));
1331#endif
1332 break;
1333 }
1334 pPrev = pNext;
1335 }
1336 }
1337 Log2(("PHYS2VIRT: Removing %RGp-%RGp %#RX32 %s\n",
1338 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias, R3STRING(pCur->pszDesc)));
1339 pPhys2Virt->offNextAlias = 0;
1340 pPhys2Virt->Core.KeyLast = NIL_RTGCPHYS; /* require reinsert */
1341
1342 /*
1343 * Clear the ram flags for this page.
1344 */
1345 PPGMPAGE pPage = pgmPhysGetPage(pVM, pPhys2Virt->Core.Key);
1346 AssertReturnVoid(pPage);
1347 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE);
1348}
1349
1350
1351/**
1352 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
1353 *
1354 * @returns Pointer to the shadow page structure.
1355 * @param pPool The pool.
1356 * @param idx The pool page index.
1357 */
1358DECLINLINE(PPGMPOOLPAGE) pgmPoolGetPageByIdx(PPGMPOOL pPool, unsigned idx)
1359{
1360 AssertFatalMsg(idx >= PGMPOOL_IDX_FIRST && idx < pPool->cCurPages, ("idx=%d\n", idx));
1361 return &pPool->aPages[idx];
1362}
1363
1364
1365/**
1366 * Clear references to guest physical memory.
1367 *
1368 * @param pPool The pool.
1369 * @param pPoolPage The pool page.
1370 * @param pPhysPage The physical guest page tracking structure.
1371 * @param iPte Shadow PTE index
1372 */
1373DECLINLINE(void) pgmTrackDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage, uint16_t iPte)
1374{
1375 /*
1376 * Just deal with the simple case here.
1377 */
1378# ifdef VBOX_STRICT
1379 PVM pVM = pPool->CTX_SUFF(pVM); NOREF(pVM);
1380# endif
1381# ifdef LOG_ENABLED
1382 const unsigned uOrg = PGM_PAGE_GET_TRACKING(pPhysPage);
1383# endif
1384 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage);
1385 if (cRefs == 1)
1386 {
1387 Assert(pPoolPage->idx == PGM_PAGE_GET_TD_IDX(pPhysPage));
1388 Assert(iPte == PGM_PAGE_GET_PTE_INDEX(pPhysPage));
1389 /* Invalidate the tracking data. */
1390 PGM_PAGE_SET_TRACKING(pVM, pPhysPage, 0);
1391 }
1392 else
1393 pgmPoolTrackPhysExtDerefGCPhys(pPool, pPoolPage, pPhysPage, iPte);
1394 Log2(("pgmTrackDerefGCPhys: %x -> %x pPhysPage=%R[pgmpage]\n", uOrg, PGM_PAGE_GET_TRACKING(pPhysPage), pPhysPage ));
1395}
1396
1397
1398/**
1399 * Moves the page to the head of the age list.
1400 *
1401 * This is done when the cached page is used in one way or another.
1402 *
1403 * @param pPool The pool.
1404 * @param pPage The cached page.
1405 */
1406DECLINLINE(void) pgmPoolCacheUsed(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1407{
1408 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1409
1410 /*
1411 * Move to the head of the age list.
1412 */
1413 if (pPage->iAgePrev != NIL_PGMPOOL_IDX)
1414 {
1415 /* unlink */
1416 pPool->aPages[pPage->iAgePrev].iAgeNext = pPage->iAgeNext;
1417 if (pPage->iAgeNext != NIL_PGMPOOL_IDX)
1418 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->iAgePrev;
1419 else
1420 pPool->iAgeTail = pPage->iAgePrev;
1421
1422 /* insert at head */
1423 pPage->iAgePrev = NIL_PGMPOOL_IDX;
1424 pPage->iAgeNext = pPool->iAgeHead;
1425 Assert(pPage->iAgeNext != NIL_PGMPOOL_IDX); /* we would've already been head then */
1426 pPool->iAgeHead = pPage->idx;
1427 pPool->aPages[pPage->iAgeNext].iAgePrev = pPage->idx;
1428 }
1429}
1430
1431/**
1432 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
1433 *
1434 * @param pVM VM Handle.
1435 * @param pPage PGM pool page
1436 */
1437DECLINLINE(void) pgmPoolLockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1438{
1439 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1440 ASMAtomicIncU32(&pPage->cLocked);
1441}
1442
1443
1444/**
1445 * Unlocks a page to allow flushing again
1446 *
1447 * @param pVM VM Handle.
1448 * @param pPage PGM pool page
1449 */
1450DECLINLINE(void) pgmPoolUnlockPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
1451{
1452 PGM_LOCK_ASSERT_OWNER(pPool->CTX_SUFF(pVM));
1453 Assert(pPage->cLocked);
1454 ASMAtomicDecU32(&pPage->cLocked);
1455}
1456
1457
1458/**
1459 * Checks if the page is locked (e.g. the active CR3 or one of the four PDs of a PAE PDPT)
1460 *
1461 * @returns VBox status code.
1462 * @param pPage PGM pool page
1463 */
1464DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage)
1465{
1466 if (pPage->cLocked)
1467 {
1468 LogFlow(("pgmPoolIsPageLocked found root page %d\n", pPage->enmKind));
1469 if (pPage->cModifications)
1470 pPage->cModifications = 1; /* reset counter (can't use 0, or else it will be reinserted in the modified list) */
1471 return true;
1472 }
1473 return false;
1474}
1475
1476
1477/**
1478 * Tells if mappings are to be put into the shadow page table or not.
1479 *
1480 * @returns boolean result
1481 * @param pVM VM handle.
1482 */
1483DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVM pVM)
1484{
1485#ifdef PGM_WITHOUT_MAPPINGS
1486 /* There are no mappings in VT-x and AMD-V mode. */
1487 Assert(pVM->pgm.s.fMappingsDisabled);
1488 return false;
1489#else
1490 return !pVM->pgm.s.fMappingsDisabled;
1491#endif
1492}
1493
1494
1495/**
1496 * Checks if the mappings are floating and enabled.
1497 *
1498 * @returns true / false.
1499 * @param pVM The VM handle.
1500 */
1501DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVM pVM)
1502{
1503#ifdef PGM_WITHOUT_MAPPINGS
1504 /* There are no mappings in VT-x and AMD-V mode. */
1505 Assert(pVM->pgm.s.fMappingsDisabled);
1506 return false;
1507#else
1508 return !pVM->pgm.s.fMappingsDisabled
1509 && !pVM->pgm.s.fMappingsFixed;
1510#endif
1511}
1512
1513/** @} */
1514
1515#endif
1516
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette