VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMPool.cpp@ 93554

最後變更 在這個檔案從93554是 93554,由 vboxsync 提交於 3 年 前

VMM: Changed PAGE_SIZE -> GUEST_PAGE_SIZE / HOST_PAGE_SIZE, PAGE_SHIFT -> GUEST_PAGE_SHIFT / HOST_PAGE_SHIFT, and PAGE_OFFSET_MASK -> GUEST_PAGE_OFFSET_MASK / HOST_PAGE_OFFSET_MASK. Also removed most usage of ASMMemIsZeroPage and ASMMemZeroPage since the host and guest page size doesn't need to be the same any more. Some work left to do in the page pool code. bugref:9898

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 54.1 KB
 
1/* $Id: PGMPool.cpp 93554 2022-02-02 22:57:02Z vboxsync $ */
2/** @file
3 * PGM Shadow Page Pool.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_pgm_pool PGM Shadow Page Pool
19 *
20 * Motivations:
21 * -# Relationship between shadow page tables and physical guest pages. This
22 * should allow us to skip most of the global flushes now following access
23 * handler changes. The main expense is flushing shadow pages.
24 * -# Limit the pool size if necessary (default is kind of limitless).
25 * -# Allocate shadow pages from RC. We use to only do this in SyncCR3.
26 * -# Required for 64-bit guests.
27 * -# Combining the PD cache and page pool in order to simplify caching.
28 *
29 *
30 * @section sec_pgm_pool_outline Design Outline
31 *
32 * The shadow page pool tracks pages used for shadowing paging structures (i.e.
33 * page tables, page directory, page directory pointer table and page map
34 * level-4). Each page in the pool has an unique identifier. This identifier is
35 * used to link a guest physical page to a shadow PT. The identifier is a
36 * non-zero value and has a relativly low max value - say 14 bits. This makes it
37 * possible to fit it into the upper bits of the of the aHCPhys entries in the
38 * ram range.
39 *
40 * By restricting host physical memory to the first 48 bits (which is the
41 * announced physical memory range of the K8L chip (scheduled for 2008)), we
42 * can safely use the upper 16 bits for shadow page ID and reference counting.
43 *
44 * Update: The 48 bit assumption will be lifted with the new physical memory
45 * management (PGMPAGE), so we won't have any trouble when someone stuffs 2TB
46 * into a box in some years.
47 *
48 * Now, it's possible for a page to be aliased, i.e. mapped by more than one PT
49 * or PD. This is solved by creating a list of physical cross reference extents
50 * when ever this happens. Each node in the list (extent) is can contain 3 page
51 * pool indexes. The list it self is chained using indexes into the paPhysExt
52 * array.
53 *
54 *
55 * @section sec_pgm_pool_life Life Cycle of a Shadow Page
56 *
57 * -# The SyncPT function requests a page from the pool.
58 * The request includes the kind of page it is (PT/PD, PAE/legacy), the
59 * address of the page it's shadowing, and more.
60 * -# The pool responds to the request by allocating a new page.
61 * When the cache is enabled, it will first check if it's in the cache.
62 * Should the pool be exhausted, one of two things can be done:
63 * -# Flush the whole pool and current CR3.
64 * -# Use the cache to find a page which can be flushed (~age).
65 * -# The SyncPT function will sync one or more pages and insert it into the
66 * shadow PD.
67 * -# The SyncPage function may sync more pages on a later \#PFs.
68 * -# The page is freed / flushed in SyncCR3 (perhaps) and some other cases.
69 * When caching is enabled, the page isn't flush but remains in the cache.
70 *
71 *
72 * @section sec_pgm_pool_monitoring Monitoring
73 *
74 * We always monitor GUEST_PAGE_SIZE chunks of memory. When we've got multiple
75 * shadow pages for the same GUEST_PAGE_SIZE of guest memory (PAE and mixed
76 * PD/PT) the pages sharing the monitor get linked using the
77 * iMonitoredNext/Prev. The head page is the pvUser to the access handlers.
78 *
79 *
80 * @section sec_pgm_pool_impl Implementation
81 *
82 * The pool will take pages from the MM page pool. The tracking data
83 * (attributes, bitmaps and so on) are allocated from the hypervisor heap. The
84 * pool content can be accessed both by using the page id and the physical
85 * address (HC). The former is managed by means of an array, the latter by an
86 * offset based AVL tree.
87 *
88 * Flushing of a pool page means that we iterate the content (we know what kind
89 * it is) and updates the link information in the ram range.
90 *
91 * ...
92 */
93
94
95/*********************************************************************************************************************************
96* Header Files *
97*********************************************************************************************************************************/
98#define LOG_GROUP LOG_GROUP_PGM_POOL
99#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/mm.h>
102#include "PGMInternal.h"
103#include <VBox/vmm/vm.h>
104#include <VBox/vmm/uvm.h>
105#include "PGMInline.h"
106
107#include <VBox/log.h>
108#include <VBox/err.h>
109#include <iprt/asm.h>
110#include <iprt/string.h>
111#include <VBox/dbg.h>
112
113
114/*********************************************************************************************************************************
115* Internal Functions *
116*********************************************************************************************************************************/
117#ifdef VBOX_WITH_DEBUGGER
118static FNDBGCCMD pgmR3PoolCmdCheck;
119#endif
120
121#ifdef VBOX_WITH_DEBUGGER
122/** Command descriptors. */
123static const DBGCCMD g_aCmds[] =
124{
125 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
126 { "pgmpoolcheck", 0, 0, NULL, 0, 0, pgmR3PoolCmdCheck, "", "Check the pgm pool pages." },
127};
128#endif
129
130/**
131 * Initializes the pool
132 *
133 * @returns VBox status code.
134 * @param pVM The cross context VM structure.
135 */
136int pgmR3PoolInit(PVM pVM)
137{
138 int rc;
139
140 AssertCompile(NIL_PGMPOOL_IDX == 0);
141 /* pPage->cLocked is an unsigned byte. */
142 AssertCompile(VMM_MAX_CPU_COUNT <= 255);
143
144 /*
145 * Query Pool config.
146 */
147 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "/PGM/Pool");
148
149 /* Default pgm pool size is 1024 pages (4MB). */
150 uint16_t cMaxPages = 1024;
151
152 /* Adjust it up relative to the RAM size, using the nested paging formula. */
153 uint64_t cbRam;
154 rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0); AssertRCReturn(rc, rc);
155 /** @todo guest x86 specific */
156 uint64_t u64MaxPages = (cbRam >> 9)
157 + (cbRam >> 18)
158 + (cbRam >> 27)
159 + 32 * GUEST_PAGE_SIZE;
160 u64MaxPages >>= GUEST_PAGE_SHIFT;
161 if (u64MaxPages > PGMPOOL_IDX_LAST)
162 cMaxPages = PGMPOOL_IDX_LAST;
163 else
164 cMaxPages = (uint16_t)u64MaxPages;
165
166 /** @cfgm{/PGM/Pool/MaxPages, uint16_t, \#pages, 16, 0x3fff, F(ram-size)}
167 * The max size of the shadow page pool in pages. The pool will grow dynamically
168 * up to this limit.
169 */
170 rc = CFGMR3QueryU16Def(pCfg, "MaxPages", &cMaxPages, cMaxPages);
171 AssertLogRelRCReturn(rc, rc);
172 AssertLogRelMsgReturn(cMaxPages <= PGMPOOL_IDX_LAST && cMaxPages >= RT_ALIGN(PGMPOOL_IDX_FIRST, 16),
173 ("cMaxPages=%u (%#x)\n", cMaxPages, cMaxPages), VERR_INVALID_PARAMETER);
174 AssertCompile(RT_IS_POWER_OF_TWO(PGMPOOL_CFG_MAX_GROW));
175 if (cMaxPages < PGMPOOL_IDX_LAST)
176 cMaxPages = RT_ALIGN(cMaxPages, PGMPOOL_CFG_MAX_GROW / 2);
177 if (cMaxPages > PGMPOOL_IDX_LAST)
178 cMaxPages = PGMPOOL_IDX_LAST;
179 LogRel(("PGM: PGMPool: cMaxPages=%u (u64MaxPages=%llu)\n", cMaxPages, u64MaxPages));
180
181 /** @todo
182 * We need to be much more careful with our allocation strategy here.
183 * For nested paging we don't need pool user info nor extents at all, but
184 * we can't check for nested paging here (too early during init to get a
185 * confirmation it can be used). The default for large memory configs is a
186 * bit large for shadow paging, so I've restricted the extent maximum to 8k
187 * (8k * 16 = 128k of hyper heap).
188 *
189 * Also when large page support is enabled, we typically don't need so much,
190 * although that depends on the availability of 2 MB chunks on the host.
191 */
192
193 /** @cfgm{/PGM/Pool/MaxUsers, uint16_t, \#users, MaxUsers, 32K, MaxPages*2}
194 * The max number of shadow page user tracking records. Each shadow page has
195 * zero of other shadow pages (or CR3s) that references it, or uses it if you
196 * like. The structures describing these relationships are allocated from a
197 * fixed sized pool. This configuration variable defines the pool size.
198 */
199 uint16_t cMaxUsers;
200 rc = CFGMR3QueryU16Def(pCfg, "MaxUsers", &cMaxUsers, cMaxPages * 2);
201 AssertLogRelRCReturn(rc, rc);
202 AssertLogRelMsgReturn(cMaxUsers >= cMaxPages && cMaxPages <= _32K,
203 ("cMaxUsers=%u (%#x)\n", cMaxUsers, cMaxUsers), VERR_INVALID_PARAMETER);
204
205 /** @cfgm{/PGM/Pool/MaxPhysExts, uint16_t, \#extents, 16, MaxPages * 2, MIN(MaxPages*2\,8192)}
206 * The max number of extents for tracking aliased guest pages.
207 */
208 uint16_t cMaxPhysExts;
209 rc = CFGMR3QueryU16Def(pCfg, "MaxPhysExts", &cMaxPhysExts,
210 RT_MIN(cMaxPages * 2, 8192 /* 8Ki max as this eat too much hyper heap */));
211 AssertLogRelRCReturn(rc, rc);
212 AssertLogRelMsgReturn(cMaxPhysExts >= 16 && cMaxPhysExts <= PGMPOOL_IDX_LAST,
213 ("cMaxPhysExts=%u (%#x)\n", cMaxPhysExts, cMaxPhysExts), VERR_INVALID_PARAMETER);
214
215 /** @cfgm{/PGM/Pool/ChacheEnabled, bool, true}
216 * Enables or disabling caching of shadow pages. Caching means that we will try
217 * reuse shadow pages instead of recreating them everything SyncCR3, SyncPT or
218 * SyncPage requests one. When reusing a shadow page, we can save time
219 * reconstructing it and it's children.
220 */
221 bool fCacheEnabled;
222 rc = CFGMR3QueryBoolDef(pCfg, "CacheEnabled", &fCacheEnabled, true);
223 AssertLogRelRCReturn(rc, rc);
224
225 LogRel(("PGM: pgmR3PoolInit: cMaxPages=%#RX16 cMaxUsers=%#RX16 cMaxPhysExts=%#RX16 fCacheEnable=%RTbool\n",
226 cMaxPages, cMaxUsers, cMaxPhysExts, fCacheEnabled));
227
228 /*
229 * Allocate the data structures.
230 */
231 uint32_t cb = RT_UOFFSETOF_DYN(PGMPOOL, aPages[cMaxPages]);
232 cb += cMaxUsers * sizeof(PGMPOOLUSER);
233 cb += cMaxPhysExts * sizeof(PGMPOOLPHYSEXT);
234 PPGMPOOL pPool;
235 rc = MMR3HyperAllocOnceNoRel(pVM, cb, 0, MM_TAG_PGM_POOL, (void **)&pPool);
236 if (RT_FAILURE(rc))
237 return rc;
238 pVM->pgm.s.pPoolR3 = pPool;
239 pVM->pgm.s.pPoolR0 = MMHyperR3ToR0(pVM, pPool);
240
241 /*
242 * Initialize it.
243 */
244 pPool->pVMR3 = pVM;
245 pPool->pVMR0 = pVM->pVMR0ForCall;
246 pPool->cMaxPages = cMaxPages;
247 pPool->cCurPages = PGMPOOL_IDX_FIRST;
248 pPool->iUserFreeHead = 0;
249 pPool->cMaxUsers = cMaxUsers;
250 PPGMPOOLUSER paUsers = (PPGMPOOLUSER)&pPool->aPages[pPool->cMaxPages];
251 pPool->paUsersR3 = paUsers;
252 pPool->paUsersR0 = MMHyperR3ToR0(pVM, paUsers);
253 for (unsigned i = 0; i < cMaxUsers; i++)
254 {
255 paUsers[i].iNext = i + 1;
256 paUsers[i].iUser = NIL_PGMPOOL_IDX;
257 paUsers[i].iUserTable = 0xfffffffe;
258 }
259 paUsers[cMaxUsers - 1].iNext = NIL_PGMPOOL_USER_INDEX;
260 pPool->iPhysExtFreeHead = 0;
261 pPool->cMaxPhysExts = cMaxPhysExts;
262 PPGMPOOLPHYSEXT paPhysExts = (PPGMPOOLPHYSEXT)&paUsers[cMaxUsers];
263 pPool->paPhysExtsR3 = paPhysExts;
264 pPool->paPhysExtsR0 = MMHyperR3ToR0(pVM, paPhysExts);
265 for (unsigned i = 0; i < cMaxPhysExts; i++)
266 {
267 paPhysExts[i].iNext = i + 1;
268 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
269 paPhysExts[i].apte[0] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
270 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
271 paPhysExts[i].apte[1] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
272 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
273 paPhysExts[i].apte[2] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
274 }
275 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
276 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aiHash); i++)
277 pPool->aiHash[i] = NIL_PGMPOOL_IDX;
278 pPool->iAgeHead = NIL_PGMPOOL_IDX;
279 pPool->iAgeTail = NIL_PGMPOOL_IDX;
280 pPool->fCacheEnabled = fCacheEnabled;
281
282 pPool->hAccessHandlerType = NIL_PGMPHYSHANDLERTYPE;
283 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_WRITE, true /*fKeepPgmLock*/,
284 pgmPoolAccessHandler,
285 NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler",
286 NULL, "pgmPoolAccessHandler", "pgmRZPoolAccessPfHandler",
287 "Guest Paging Access Handler",
288 &pPool->hAccessHandlerType);
289 AssertLogRelRCReturn(rc, rc);
290
291 pPool->HCPhysTree = 0;
292
293 /*
294 * The NIL entry.
295 */
296 Assert(NIL_PGMPOOL_IDX == 0);
297 pPool->aPages[NIL_PGMPOOL_IDX].enmKind = PGMPOOLKIND_INVALID;
298 pPool->aPages[NIL_PGMPOOL_IDX].idx = NIL_PGMPOOL_IDX;
299 pPool->aPages[NIL_PGMPOOL_IDX].Core.Key = NIL_RTHCPHYS;
300 pPool->aPages[NIL_PGMPOOL_IDX].GCPhys = NIL_RTGCPHYS;
301 pPool->aPages[NIL_PGMPOOL_IDX].iNext = NIL_PGMPOOL_IDX;
302 /* pPool->aPages[NIL_PGMPOOL_IDX].cLocked = INT32_MAX; - test this out... */
303 pPool->aPages[NIL_PGMPOOL_IDX].pvPageR3 = 0;
304 pPool->aPages[NIL_PGMPOOL_IDX].iUserHead = NIL_PGMPOOL_USER_INDEX;
305 pPool->aPages[NIL_PGMPOOL_IDX].iModifiedNext = NIL_PGMPOOL_IDX;
306 pPool->aPages[NIL_PGMPOOL_IDX].iModifiedPrev = NIL_PGMPOOL_IDX;
307 pPool->aPages[NIL_PGMPOOL_IDX].iMonitoredNext = NIL_PGMPOOL_IDX;
308 pPool->aPages[NIL_PGMPOOL_IDX].iMonitoredPrev = NIL_PGMPOOL_IDX;
309 pPool->aPages[NIL_PGMPOOL_IDX].iAgeNext = NIL_PGMPOOL_IDX;
310 pPool->aPages[NIL_PGMPOOL_IDX].iAgePrev = NIL_PGMPOOL_IDX;
311
312 Assert(pPool->aPages[NIL_PGMPOOL_IDX].idx == NIL_PGMPOOL_IDX);
313 Assert(pPool->aPages[NIL_PGMPOOL_IDX].GCPhys == NIL_RTGCPHYS);
314 Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fSeenNonGlobal);
315 Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fMonitored);
316 Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fCached);
317 Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fZeroed);
318 Assert(!pPool->aPages[NIL_PGMPOOL_IDX].fReusedFlushPending);
319
320 /*
321 * Register statistics.
322 */
323 STAM_REL_REG(pVM, &pPool->StatGrow, STAMTYPE_PROFILE, "/PGM/Pool/Grow", STAMUNIT_TICKS_PER_CALL, "Profiling PGMR0PoolGrow");
324#ifdef VBOX_WITH_STATISTICS
325 STAM_REG(pVM, &pPool->cCurPages, STAMTYPE_U16, "/PGM/Pool/cCurPages", STAMUNIT_PAGES, "Current pool size.");
326 STAM_REG(pVM, &pPool->cMaxPages, STAMTYPE_U16, "/PGM/Pool/cMaxPages", STAMUNIT_PAGES, "Max pool size.");
327 STAM_REG(pVM, &pPool->cUsedPages, STAMTYPE_U16, "/PGM/Pool/cUsedPages", STAMUNIT_PAGES, "The number of pages currently in use.");
328 STAM_REG(pVM, &pPool->cUsedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/cUsedPagesHigh", STAMUNIT_PAGES, "The high watermark for cUsedPages.");
329 STAM_REG(pVM, &pPool->StatAlloc, STAMTYPE_PROFILE_ADV, "/PGM/Pool/Alloc", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolAlloc.");
330 STAM_REG(pVM, &pPool->StatClearAll, STAMTYPE_PROFILE, "/PGM/Pool/ClearAll", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmR3PoolClearAll.");
331 STAM_REG(pVM, &pPool->StatR3Reset, STAMTYPE_PROFILE, "/PGM/Pool/R3Reset", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmR3PoolReset.");
332 STAM_REG(pVM, &pPool->StatFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushPage.");
333 STAM_REG(pVM, &pPool->StatFree, STAMTYPE_PROFILE, "/PGM/Pool/Free", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFree.");
334 STAM_REG(pVM, &pPool->StatForceFlushPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForce", STAMUNIT_OCCURENCES, "Counting explicit flushes by PGMPoolFlushPage().");
335 STAM_REG(pVM, &pPool->StatForceFlushDirtyPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForceDirty", STAMUNIT_OCCURENCES, "Counting explicit flushes of dirty pages by PGMPoolFlushPage().");
336 STAM_REG(pVM, &pPool->StatForceFlushReused, STAMTYPE_COUNTER, "/PGM/Pool/FlushReused", STAMUNIT_OCCURENCES, "Counting flushes for reused pages.");
337 STAM_REG(pVM, &pPool->StatZeroPage, STAMTYPE_PROFILE, "/PGM/Pool/ZeroPage", STAMUNIT_TICKS_PER_CALL, "Profiling time spent zeroing pages. Overlaps with Alloc.");
338 STAM_REG(pVM, &pPool->cMaxUsers, STAMTYPE_U16, "/PGM/Pool/Track/cMaxUsers", STAMUNIT_COUNT, "Max user tracking records.");
339 STAM_REG(pVM, &pPool->cPresent, STAMTYPE_U32, "/PGM/Pool/Track/cPresent", STAMUNIT_COUNT, "Number of present page table entries.");
340 STAM_REG(pVM, &pPool->StatTrackDeref, STAMTYPE_PROFILE, "/PGM/Pool/Track/Deref", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackDeref.");
341 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPT, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPT", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPT.");
342 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTs, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTs", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPTs.");
343 STAM_REG(pVM, &pPool->StatTrackFlushGCPhysPTsSlow, STAMTYPE_PROFILE, "/PGM/Pool/Track/FlushGCPhysPTsSlow", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolTrackFlushGCPhysPTsSlow.");
344 STAM_REG(pVM, &pPool->StatTrackFlushEntry, STAMTYPE_COUNTER, "/PGM/Pool/Track/Entry/Flush", STAMUNIT_COUNT, "Nr of flushed entries.");
345 STAM_REG(pVM, &pPool->StatTrackFlushEntryKeep, STAMTYPE_COUNTER, "/PGM/Pool/Track/Entry/Update", STAMUNIT_COUNT, "Nr of updated entries.");
346 STAM_REG(pVM, &pPool->StatTrackFreeUpOneUser, STAMTYPE_COUNTER, "/PGM/Pool/Track/FreeUpOneUser", STAMUNIT_TICKS_PER_CALL, "The number of times we were out of user tracking records.");
347 STAM_REG(pVM, &pPool->StatTrackDerefGCPhys, STAMTYPE_PROFILE, "/PGM/Pool/Track/DrefGCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling deref activity related tracking GC physical pages.");
348 STAM_REG(pVM, &pPool->StatTrackLinearRamSearches, STAMTYPE_COUNTER, "/PGM/Pool/Track/LinearRamSearches", STAMUNIT_OCCURENCES, "The number of times we had to do linear ram searches.");
349 STAM_REG(pVM, &pPool->StamTrackPhysExtAllocFailures,STAMTYPE_COUNTER, "/PGM/Pool/Track/PhysExtAllocFailures", STAMUNIT_OCCURENCES, "The number of failing pgmPoolTrackPhysExtAlloc calls.");
350
351 STAM_REG(pVM, &pPool->StatMonitorPfRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access handler.");
352 STAM_REG(pVM, &pPool->StatMonitorPfRZEmulateInstr, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/EmulateInstr", STAMUNIT_OCCURENCES, "Times we've failed interpreting the instruction.");
353 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler.");
354 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushReinit, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushReinit", STAMUNIT_OCCURENCES, "Times we've detected a page table reinit.");
355 STAM_REG(pVM, &pPool->StatMonitorPfRZFlushModOverflow,STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/FlushOverflow", STAMUNIT_OCCURENCES, "Counting flushes for pages that are modified too often.");
356 STAM_REG(pVM, &pPool->StatMonitorPfRZFork, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/Fork", STAMUNIT_OCCURENCES, "Times we've detected fork().");
357 STAM_REG(pVM, &pPool->StatMonitorPfRZHandled, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/Handled", STAMUNIT_TICKS_PER_CALL, "Profiling the RC/R0 #PF access we've handled (except REP STOSD).");
358 STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch1, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch1", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction.");
359 STAM_REG(pVM, &pPool->StatMonitorPfRZIntrFailPatch2, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/IntrFailPatch2", STAMUNIT_OCCURENCES, "Times we've failed interpreting a patch code instruction during flushing.");
360 STAM_REG(pVM, &pPool->StatMonitorPfRZRepPrefix, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/#PF/RepPrefix", STAMUNIT_OCCURENCES, "The number of times we've seen rep prefixes we can't handle.");
361 STAM_REG(pVM, &pPool->StatMonitorPfRZRepStosd, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/#PF/RepStosd", STAMUNIT_TICKS_PER_CALL, "Profiling the REP STOSD cases we've handled.");
362
363 STAM_REG(pVM, &pPool->StatMonitorRZ, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM", STAMUNIT_TICKS_PER_CALL, "Profiling the regular access handler.");
364 STAM_REG(pVM, &pPool->StatMonitorRZFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/RZ/IEM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the regular access handler.");
365 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses.");
366 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses.");
367 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses.");
368 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses.");
369 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses.");
370 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses.");
371 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses.");
372 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[7], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses.");
373 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[8], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses.");
374 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[9], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses.");
375 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[10], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses.");
376 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[11], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses.");
377 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[12], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses.");
378 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[13], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses.");
379 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[14], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses.");
380 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[15], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses.");
381 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[16], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses.");
382 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[17], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses.");
383 STAM_REG(pVM, &pPool->aStatMonitorRZSizes[18], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses.");
384 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1.");
385 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2.");
386 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3.");
387 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4.");
388 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5.");
389 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6.");
390 STAM_REG(pVM, &pPool->aStatMonitorRZMisaligned[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/IEM/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7.");
391
392 STAM_REG(pVM, &pPool->StatMonitorRZFaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults.");
393 STAM_REG(pVM, &pPool->StatMonitorRZFaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults.");
394 STAM_REG(pVM, &pPool->StatMonitorRZFaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults.");
395 STAM_REG(pVM, &pPool->StatMonitorRZFaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/RZ/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults.");
396
397 STAM_REG(pVM, &pPool->StatMonitorR3, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3", STAMUNIT_TICKS_PER_CALL, "Profiling the R3 access handler.");
398 STAM_REG(pVM, &pPool->StatMonitorR3FlushPage, STAMTYPE_PROFILE, "/PGM/Pool/Monitor/R3/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling the pgmPoolFlushPage calls made from the R3 access handler.");
399 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size01", STAMUNIT_OCCURENCES, "Number of 1 byte accesses (R3).");
400 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size02", STAMUNIT_OCCURENCES, "Number of 2 byte accesses (R3).");
401 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size03", STAMUNIT_OCCURENCES, "Number of 3 byte accesses (R3).");
402 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size04", STAMUNIT_OCCURENCES, "Number of 4 byte accesses (R3).");
403 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size05", STAMUNIT_OCCURENCES, "Number of 5 byte accesses (R3).");
404 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size06", STAMUNIT_OCCURENCES, "Number of 6 byte accesses (R3).");
405 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size07", STAMUNIT_OCCURENCES, "Number of 7 byte accesses (R3).");
406 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[7], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size08", STAMUNIT_OCCURENCES, "Number of 8 byte accesses (R3).");
407 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[8], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size09", STAMUNIT_OCCURENCES, "Number of 9 byte accesses (R3).");
408 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[9], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0a", STAMUNIT_OCCURENCES, "Number of 10 byte accesses (R3).");
409 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[10], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0b", STAMUNIT_OCCURENCES, "Number of 11 byte accesses (R3).");
410 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[11], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0c", STAMUNIT_OCCURENCES, "Number of 12 byte accesses (R3).");
411 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[12], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0d", STAMUNIT_OCCURENCES, "Number of 13 byte accesses (R3).");
412 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[13], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0e", STAMUNIT_OCCURENCES, "Number of 14 byte accesses (R3).");
413 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[14], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size0f", STAMUNIT_OCCURENCES, "Number of 15 byte accesses (R3).");
414 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[15], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size10", STAMUNIT_OCCURENCES, "Number of 16 byte accesses (R3).");
415 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[16], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size11-2f", STAMUNIT_OCCURENCES, "Number of 17-31 byte accesses.");
416 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[17], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size20-3f", STAMUNIT_OCCURENCES, "Number of 32-63 byte accesses.");
417 STAM_REG(pVM, &pPool->aStatMonitorR3Sizes[18], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Size40+", STAMUNIT_OCCURENCES, "Number of 64+ byte accesses.");
418 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[0], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned1", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 1 in R3.");
419 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[1], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned2", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 2 in R3.");
420 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[2], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned3", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 3 in R3.");
421 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[3], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned4", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 4 in R3.");
422 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[4], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned5", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 5 in R3.");
423 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[5], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned6", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 6 in R3.");
424 STAM_REG(pVM, &pPool->aStatMonitorR3Misaligned[6], STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Misaligned7", STAMUNIT_OCCURENCES, "Number of misaligned access with offset 7 in R3.");
425
426 STAM_REG(pVM, &pPool->StatMonitorR3FaultPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PT", STAMUNIT_OCCURENCES, "Nr of handled PT faults.");
427 STAM_REG(pVM, &pPool->StatMonitorR3FaultPD, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PD", STAMUNIT_OCCURENCES, "Nr of handled PD faults.");
428 STAM_REG(pVM, &pPool->StatMonitorR3FaultPDPT, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PDPT", STAMUNIT_OCCURENCES, "Nr of handled PDPT faults.");
429 STAM_REG(pVM, &pPool->StatMonitorR3FaultPML4, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/R3/Fault/PML4", STAMUNIT_OCCURENCES, "Nr of handled PML4 faults.");
430
431 STAM_REG(pVM, &pPool->cModifiedPages, STAMTYPE_U16, "/PGM/Pool/Monitor/cModifiedPages", STAMUNIT_PAGES, "The current cModifiedPages value.");
432 STAM_REG(pVM, &pPool->cModifiedPagesHigh, STAMTYPE_U16_RESET, "/PGM/Pool/Monitor/cModifiedPagesHigh", STAMUNIT_PAGES, "The high watermark for cModifiedPages.");
433 STAM_REG(pVM, &pPool->StatResetDirtyPages, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/Resets", STAMUNIT_OCCURENCES, "Times we've called pgmPoolResetDirtyPages (and there were dirty page).");
434 STAM_REG(pVM, &pPool->StatDirtyPage, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/Pages", STAMUNIT_OCCURENCES, "Times we've called pgmPoolAddDirtyPage.");
435 STAM_REG(pVM, &pPool->StatDirtyPageDupFlush, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/FlushDup", STAMUNIT_OCCURENCES, "Times we've had to flush duplicates for dirty page management.");
436 STAM_REG(pVM, &pPool->StatDirtyPageOverFlowFlush, STAMTYPE_COUNTER, "/PGM/Pool/Monitor/Dirty/FlushOverflow",STAMUNIT_OCCURENCES, "Times we've had to flush because of overflow.");
437 STAM_REG(pVM, &pPool->StatCacheHits, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Hits", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls satisfied by the cache.");
438 STAM_REG(pVM, &pPool->StatCacheMisses, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Misses", STAMUNIT_OCCURENCES, "The number of pgmPoolAlloc calls not statisfied by the cache.");
439 STAM_REG(pVM, &pPool->StatCacheKindMismatches, STAMTYPE_COUNTER, "/PGM/Pool/Cache/KindMismatches", STAMUNIT_OCCURENCES, "The number of shadow page kind mismatches. (Better be low, preferably 0!)");
440 STAM_REG(pVM, &pPool->StatCacheFreeUpOne, STAMTYPE_COUNTER, "/PGM/Pool/Cache/FreeUpOne", STAMUNIT_OCCURENCES, "The number of times the cache was asked to free up a page.");
441 STAM_REG(pVM, &pPool->StatCacheCacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Cacheable", STAMUNIT_OCCURENCES, "The number of cacheable allocations.");
442 STAM_REG(pVM, &pPool->StatCacheUncacheable, STAMTYPE_COUNTER, "/PGM/Pool/Cache/Uncacheable", STAMUNIT_OCCURENCES, "The number of uncacheable allocations.");
443#endif /* VBOX_WITH_STATISTICS */
444
445#ifdef VBOX_WITH_DEBUGGER
446 /*
447 * Debugger commands.
448 */
449 static bool s_fRegisteredCmds = false;
450 if (!s_fRegisteredCmds)
451 {
452 rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
453 if (RT_SUCCESS(rc))
454 s_fRegisteredCmds = true;
455 }
456#endif
457
458 return VINF_SUCCESS;
459}
460
461
462/**
463 * Relocate the page pool data.
464 *
465 * @param pVM The cross context VM structure.
466 */
467void pgmR3PoolRelocate(PVM pVM)
468{
469 RT_NOREF(pVM);
470}
471
472
473/**
474 * Grows the shadow page pool.
475 *
476 * I.e. adds more pages to it, assuming that hasn't reached cMaxPages yet.
477 *
478 * @returns VBox status code.
479 * @param pVM The cross context VM structure.
480 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
481 */
482VMMR3_INT_DECL(int) PGMR3PoolGrow(PVM pVM, PVMCPU pVCpu)
483{
484 /* This used to do a lot of stuff, but it has moved to ring-0 (PGMR0PoolGrow). */
485 AssertReturn(pVM->pgm.s.pPoolR3->cCurPages < pVM->pgm.s.pPoolR3->cMaxPages, VERR_PGM_POOL_MAXED_OUT_ALREADY);
486 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_PGM_POOL_GROW, 0, NULL);
487 if (rc == VINF_SUCCESS)
488 return rc;
489 LogRel(("PGMR3PoolGrow: rc=%Rrc cCurPages=%#x cMaxPages=%#x\n",
490 rc, pVM->pgm.s.pPoolR3->cCurPages, pVM->pgm.s.pPoolR3->cMaxPages));
491 if (pVM->pgm.s.pPoolR3->cCurPages > 128 && RT_FAILURE_NP(rc))
492 return -rc;
493 return rc;
494}
495
496
497/**
498 * Rendezvous callback used by pgmR3PoolClearAll that clears all shadow pages
499 * and all modification counters.
500 *
501 * This is only called on one of the EMTs while the other ones are waiting for
502 * it to complete this function.
503 *
504 * @returns VINF_SUCCESS (VBox strict status code).
505 * @param pVM The cross context VM structure.
506 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
507 * @param fpvFlushRemTlb When not NULL, we'll flush the REM TLB as well.
508 * (This is the pvUser, so it has to be void *.)
509 *
510 */
511DECLCALLBACK(VBOXSTRICTRC) pgmR3PoolClearAllRendezvous(PVM pVM, PVMCPU pVCpu, void *fpvFlushRemTlb)
512{
513 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
514 STAM_PROFILE_START(&pPool->StatClearAll, c);
515 NOREF(pVCpu);
516
517 PGM_LOCK_VOID(pVM);
518 Log(("pgmR3PoolClearAllRendezvous: cUsedPages=%d fpvFlushRemTlb=%RTbool\n", pPool->cUsedPages, !!fpvFlushRemTlb));
519
520 /*
521 * Iterate all the pages until we've encountered all that are in use.
522 * This is a simple but not quite optimal solution.
523 */
524 unsigned cModifiedPages = 0; NOREF(cModifiedPages);
525 unsigned cLeft = pPool->cUsedPages;
526 uint32_t iPage = pPool->cCurPages;
527 while (--iPage >= PGMPOOL_IDX_FIRST)
528 {
529 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
530 if (pPage->GCPhys != NIL_RTGCPHYS)
531 {
532 switch (pPage->enmKind)
533 {
534 /*
535 * We only care about shadow page tables that reference physical memory
536 */
537#ifdef PGM_WITH_LARGE_PAGES
538 case PGMPOOLKIND_PAE_PD_PHYS: /* Large pages reference 2 MB of physical memory, so we must clear them. */
539 if (pPage->cPresent)
540 {
541 PX86PDPAE pShwPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
542 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
543 {
544 //Assert((pShwPD->a[i].u & UINT64_C(0xfff0000000000f80)) == 0); - bogus, includes X86_PDE_PS.
545 if ((pShwPD->a[i].u & (X86_PDE_P | X86_PDE_PS)) == (X86_PDE_P | X86_PDE_PS))
546 {
547 pShwPD->a[i].u = 0;
548 Assert(pPage->cPresent);
549 pPage->cPresent--;
550 }
551 }
552 if (pPage->cPresent == 0)
553 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
554 }
555 goto default_case;
556
557 case PGMPOOLKIND_EPT_PD_FOR_PHYS: /* Large pages reference 2 MB of physical memory, so we must clear them. */
558 if (pPage->cPresent)
559 {
560 PEPTPD pShwPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
561 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
562 {
563 if ((pShwPD->a[i].u & (EPT_E_READ | EPT_E_LEAF)) == (EPT_E_READ | EPT_E_LEAF))
564 {
565 pShwPD->a[i].u = 0;
566 Assert(pPage->cPresent);
567 pPage->cPresent--;
568 }
569 }
570 if (pPage->cPresent == 0)
571 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
572 }
573 goto default_case;
574#endif /* PGM_WITH_LARGE_PAGES */
575
576 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
577 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
578 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
579 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
580 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
581 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
582 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
583 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
584 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
585 {
586 if (pPage->cPresent)
587 {
588 void *pvShw = PGMPOOL_PAGE_2_PTR_V2(pPool->CTX_SUFF(pVM), pVCpu, pPage);
589 STAM_PROFILE_START(&pPool->StatZeroPage, z);
590#if 0
591 /* Useful check for leaking references; *very* expensive though. */
592 switch (pPage->enmKind)
593 {
594 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
595 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
596 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
597 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
598 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
599 {
600 bool fFoundFirst = false;
601 PPGMSHWPTPAE pPT = (PPGMSHWPTPAE)pvShw;
602 for (unsigned ptIndex = 0; ptIndex < RT_ELEMENTS(pPT->a); ptIndex++)
603 {
604 if (pPT->a[ptIndex].u)
605 {
606 if (!fFoundFirst)
607 {
608 AssertFatalMsg(pPage->iFirstPresent <= ptIndex, ("ptIndex = %d first present = %d\n", ptIndex, pPage->iFirstPresent));
609 if (pPage->iFirstPresent != ptIndex)
610 Log(("ptIndex = %d first present = %d\n", ptIndex, pPage->iFirstPresent));
611 fFoundFirst = true;
612 }
613 if (PGMSHWPTEPAE_IS_P(pPT->a[ptIndex]))
614 {
615 pgmPoolTracDerefGCPhysHint(pPool, pPage, PGMSHWPTEPAE_GET_HCPHYS(pPT->a[ptIndex]), NIL_RTGCPHYS);
616 if (pPage->iFirstPresent == ptIndex)
617 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
618 }
619 }
620 }
621 AssertFatalMsg(pPage->cPresent == 0, ("cPresent = %d pPage = %RGv\n", pPage->cPresent, pPage->GCPhys));
622 break;
623 }
624 default:
625 break;
626 }
627#endif
628 ASMMemZeroPage(pvShw);
629 STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
630 pPage->cPresent = 0;
631 pPage->iFirstPresent = NIL_PGMPOOL_PRESENT_INDEX;
632 }
633 }
634 RT_FALL_THRU();
635 default:
636#ifdef PGM_WITH_LARGE_PAGES
637 default_case:
638#endif
639 Assert(!pPage->cModifications || ++cModifiedPages);
640 Assert(pPage->iModifiedNext == NIL_PGMPOOL_IDX || pPage->cModifications);
641 Assert(pPage->iModifiedPrev == NIL_PGMPOOL_IDX || pPage->cModifications);
642 pPage->iModifiedNext = NIL_PGMPOOL_IDX;
643 pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
644 pPage->cModifications = 0;
645 break;
646
647 }
648 if (!--cLeft)
649 break;
650 }
651 }
652
653#ifndef DEBUG_michael
654 AssertMsg(cModifiedPages == pPool->cModifiedPages, ("%d != %d\n", cModifiedPages, pPool->cModifiedPages));
655#endif
656 pPool->iModifiedHead = NIL_PGMPOOL_IDX;
657 pPool->cModifiedPages = 0;
658
659 /*
660 * Clear all the GCPhys links and rebuild the phys ext free list.
661 */
662 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX);
663 pRam;
664 pRam = pRam->CTX_SUFF(pNext))
665 {
666 iPage = pRam->cb >> GUEST_PAGE_SHIFT;
667 while (iPage-- > 0)
668 PGM_PAGE_SET_TRACKING(pVM, &pRam->aPages[iPage], 0);
669 }
670
671 pPool->iPhysExtFreeHead = 0;
672 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts);
673 const unsigned cMaxPhysExts = pPool->cMaxPhysExts;
674 for (unsigned i = 0; i < cMaxPhysExts; i++)
675 {
676 paPhysExts[i].iNext = i + 1;
677 paPhysExts[i].aidx[0] = NIL_PGMPOOL_IDX;
678 paPhysExts[i].apte[0] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
679 paPhysExts[i].aidx[1] = NIL_PGMPOOL_IDX;
680 paPhysExts[i].apte[1] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
681 paPhysExts[i].aidx[2] = NIL_PGMPOOL_IDX;
682 paPhysExts[i].apte[2] = NIL_PGMPOOL_PHYSEXT_IDX_PTE;
683 }
684 paPhysExts[cMaxPhysExts - 1].iNext = NIL_PGMPOOL_PHYSEXT_INDEX;
685
686
687#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
688 /* Reset all dirty pages to reactivate the page monitoring. */
689 /* Note: we must do this *after* clearing all page references and shadow page tables as there might be stale references to
690 * recently removed MMIO ranges around that might otherwise end up asserting in pgmPoolTracDerefGCPhysHint
691 */
692 for (unsigned i = 0; i < RT_ELEMENTS(pPool->aDirtyPages); i++)
693 {
694 unsigned idxPage = pPool->aidxDirtyPages[i];
695 if (idxPage == NIL_PGMPOOL_IDX)
696 continue;
697
698 PPGMPOOLPAGE pPage = &pPool->aPages[idxPage];
699 Assert(pPage->idx == idxPage);
700 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX);
701
702 AssertMsg(pPage->fDirty, ("Page %RGp (slot=%d) not marked dirty!", pPage->GCPhys, i));
703
704 Log(("Reactivate dirty page %RGp\n", pPage->GCPhys));
705
706 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */
707 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK);
708 AssertRCSuccess(rc);
709 pPage->fDirty = false;
710
711 pPool->aidxDirtyPages[i] = NIL_PGMPOOL_IDX;
712 }
713
714 /* Clear all dirty pages. */
715 pPool->idxFreeDirtyPage = 0;
716 pPool->cDirtyPages = 0;
717#endif
718
719 /* Clear the PGM_SYNC_CLEAR_PGM_POOL flag on all VCPUs to prevent redundant flushes. */
720 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
721 pVM->apCpusR3[idCpu]->pgm.s.fSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL;
722
723 /* Flush job finished. */
724 VM_FF_CLEAR(pVM, VM_FF_PGM_POOL_FLUSH_PENDING);
725 pPool->cPresent = 0;
726 PGM_UNLOCK(pVM);
727
728 PGM_INVL_ALL_VCPU_TLBS(pVM);
729
730 if (fpvFlushRemTlb)
731 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
732 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
733
734 STAM_PROFILE_STOP(&pPool->StatClearAll, c);
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Clears the shadow page pool.
741 *
742 * @param pVM The cross context VM structure.
743 * @param fFlushRemTlb When set, the REM TLB is scheduled for flushing as
744 * well.
745 */
746void pgmR3PoolClearAll(PVM pVM, bool fFlushRemTlb)
747{
748 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PoolClearAllRendezvous, &fFlushRemTlb);
749 AssertRC(rc);
750}
751
752
753/**
754 * Protect all pgm pool page table entries to monitor writes
755 *
756 * @param pVM The cross context VM structure.
757 *
758 * @remarks ASSUMES the caller will flush all TLBs!!
759 */
760void pgmR3PoolWriteProtectPages(PVM pVM)
761{
762 PGM_LOCK_ASSERT_OWNER(pVM);
763 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
764 unsigned cLeft = pPool->cUsedPages;
765 unsigned iPage = pPool->cCurPages;
766 while (--iPage >= PGMPOOL_IDX_FIRST)
767 {
768 PPGMPOOLPAGE pPage = &pPool->aPages[iPage];
769 if ( pPage->GCPhys != NIL_RTGCPHYS
770 && pPage->cPresent)
771 {
772 union
773 {
774 void *pv;
775 PX86PT pPT;
776 PPGMSHWPTPAE pPTPae;
777 PEPTPT pPTEpt;
778 } uShw;
779 uShw.pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
780
781 switch (pPage->enmKind)
782 {
783 /*
784 * We only care about shadow page tables.
785 */
786 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
787 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
788 case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
789 for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPT->a); iShw++)
790 if (uShw.pPT->a[iShw].u & X86_PTE_P)
791 uShw.pPT->a[iShw].u = ~(X86PGUINT)X86_PTE_RW;
792 break;
793
794 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
795 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
796 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
797 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB:
798 case PGMPOOLKIND_PAE_PT_FOR_PHYS:
799 for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPTPae->a); iShw++)
800 if (PGMSHWPTEPAE_IS_P(uShw.pPTPae->a[iShw]))
801 PGMSHWPTEPAE_SET_RO(uShw.pPTPae->a[iShw]);
802 break;
803
804 case PGMPOOLKIND_EPT_PT_FOR_PHYS:
805 for (unsigned iShw = 0; iShw < RT_ELEMENTS(uShw.pPTEpt->a); iShw++)
806 if (uShw.pPTEpt->a[iShw].u & EPT_E_READ)
807 uShw.pPTEpt->a[iShw].u &= ~(X86PGPAEUINT)EPT_E_WRITE;
808 break;
809
810 default:
811 break;
812 }
813 if (!--cLeft)
814 break;
815 }
816 }
817}
818
819#ifdef VBOX_WITH_DEBUGGER
820/**
821 * @callback_method_impl{FNDBGCCMD, The '.pgmpoolcheck' command.}
822 */
823static DECLCALLBACK(int) pgmR3PoolCmdCheck(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
824{
825 DBGC_CMDHLP_REQ_UVM_RET(pCmdHlp, pCmd, pUVM);
826 PVM pVM = pUVM->pVM;
827 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
828 DBGC_CMDHLP_ASSERT_PARSER_RET(pCmdHlp, pCmd, -1, cArgs == 0);
829 uint32_t cErrors = 0;
830 NOREF(paArgs);
831
832 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
833 for (unsigned i = 0; i < pPool->cCurPages; i++)
834 {
835 PPGMPOOLPAGE pPage = &pPool->aPages[i];
836 bool fFirstMsg = true;
837
838 /** @todo cover other paging modes too. */
839 if (pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
840 {
841 PPGMSHWPTPAE pShwPT = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
842 {
843 PX86PTPAE pGstPT;
844 PGMPAGEMAPLOCK LockPage;
845 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, pPage->GCPhys, (const void **)&pGstPT, &LockPage); AssertReleaseRC(rc);
846
847 /* Check if any PTEs are out of sync. */
848 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++)
849 {
850 if (PGMSHWPTEPAE_IS_P(pShwPT->a[j]))
851 {
852 RTHCPHYS HCPhys = NIL_RTHCPHYS;
853 rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[j].u & X86_PTE_PAE_PG_MASK, &HCPhys);
854 if ( rc != VINF_SUCCESS
855 || PGMSHWPTEPAE_GET_HCPHYS(pShwPT->a[j]) != HCPhys)
856 {
857 if (fFirstMsg)
858 {
859 DBGCCmdHlpPrintf(pCmdHlp, "Check pool page %RGp\n", pPage->GCPhys);
860 fFirstMsg = false;
861 }
862 DBGCCmdHlpPrintf(pCmdHlp, "Mismatch HCPhys: rc=%Rrc idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, j, pGstPT->a[j].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), HCPhys);
863 cErrors++;
864 }
865 else if ( PGMSHWPTEPAE_IS_RW(pShwPT->a[j])
866 && !(pGstPT->a[j].u & X86_PTE_RW))
867 {
868 if (fFirstMsg)
869 {
870 DBGCCmdHlpPrintf(pCmdHlp, "Check pool page %RGp\n", pPage->GCPhys);
871 fFirstMsg = false;
872 }
873 DBGCCmdHlpPrintf(pCmdHlp, "Mismatch r/w gst/shw: idx=%d guest %RX64 shw=%RX64 vs %RHp\n", j, pGstPT->a[j].u, PGMSHWPTEPAE_GET_LOG(pShwPT->a[j]), HCPhys);
874 cErrors++;
875 }
876 }
877 }
878 PGMPhysReleasePageMappingLock(pVM, &LockPage);
879 }
880
881 /* Make sure this page table can't be written to from any shadow mapping. */
882 RTHCPHYS HCPhysPT = NIL_RTHCPHYS;
883 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pPage->GCPhys, &HCPhysPT);
884 AssertMsgRC(rc, ("PGMPhysGCPhys2HCPhys failed with rc=%d for %RGp\n", rc, pPage->GCPhys));
885 if (rc == VINF_SUCCESS)
886 {
887 for (unsigned j = 0; j < pPool->cCurPages; j++)
888 {
889 PPGMPOOLPAGE pTempPage = &pPool->aPages[j];
890
891 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT)
892 {
893 PPGMSHWPTPAE pShwPT2 = (PPGMSHWPTPAE)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pTempPage);
894
895 for (unsigned k = 0; k < RT_ELEMENTS(pShwPT->a); k++)
896 {
897 if ( PGMSHWPTEPAE_IS_P_RW(pShwPT2->a[k])
898# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
899 && !pPage->fDirty
900# endif
901 && PGMSHWPTEPAE_GET_HCPHYS(pShwPT2->a[k]) == HCPhysPT)
902 {
903 if (fFirstMsg)
904 {
905 DBGCCmdHlpPrintf(pCmdHlp, "Check pool page %RGp\n", pPage->GCPhys);
906 fFirstMsg = false;
907 }
908 DBGCCmdHlpPrintf(pCmdHlp, "Mismatch: r/w: GCPhys=%RGp idx=%d shw %RX64 %RX64\n", pTempPage->GCPhys, k, PGMSHWPTEPAE_GET_LOG(pShwPT->a[k]), PGMSHWPTEPAE_GET_LOG(pShwPT2->a[k]));
909 cErrors++;
910 }
911 }
912 }
913 }
914 }
915 }
916 }
917 if (cErrors > 0)
918 return DBGCCmdHlpFail(pCmdHlp, pCmd, "Found %#x errors", cErrors);
919 return VINF_SUCCESS;
920}
921#endif /* VBOX_WITH_DEBUGGER */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette