VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp@ 80281

最後變更 在這個檔案從80281是 80281,由 vboxsync 提交於 6 年 前

VMM,++: Refactoring code to use VMMC & VMMCPUCC. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 206.6 KB
 
1/* $Id: PGMPhys.cpp 80281 2019-08-15 07:29:37Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_PGM_PHYS
24#include <VBox/vmm/pgm.h>
25#include <VBox/vmm/iem.h>
26#include <VBox/vmm/iom.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/nem.h>
29#include <VBox/vmm/stam.h>
30#ifdef VBOX_WITH_REM
31# include <VBox/vmm/rem.h>
32#endif
33#include <VBox/vmm/pdmdev.h>
34#include "PGMInternal.h"
35#include <VBox/vmm/vmcc.h>
36
37#include "PGMInline.h"
38
39#include <VBox/sup.h>
40#include <VBox/param.h>
41#include <VBox/err.h>
42#include <VBox/log.h>
43#include <iprt/assert.h>
44#include <iprt/alloc.h>
45#include <iprt/asm.h>
46#ifdef VBOX_STRICT
47# include <iprt/crc.h>
48#endif
49#include <iprt/thread.h>
50#include <iprt/string.h>
51#include <iprt/system.h>
52
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57/** The number of pages to free in one batch. */
58#define PGMPHYS_FREE_PAGE_BATCH_SIZE 128
59
60
61/*
62 * PGMR3PhysReadU8-64
63 * PGMR3PhysWriteU8-64
64 */
65#define PGMPHYSFN_READNAME PGMR3PhysReadU8
66#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU8
67#define PGMPHYS_DATASIZE 1
68#define PGMPHYS_DATATYPE uint8_t
69#include "PGMPhysRWTmpl.h"
70
71#define PGMPHYSFN_READNAME PGMR3PhysReadU16
72#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU16
73#define PGMPHYS_DATASIZE 2
74#define PGMPHYS_DATATYPE uint16_t
75#include "PGMPhysRWTmpl.h"
76
77#define PGMPHYSFN_READNAME PGMR3PhysReadU32
78#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU32
79#define PGMPHYS_DATASIZE 4
80#define PGMPHYS_DATATYPE uint32_t
81#include "PGMPhysRWTmpl.h"
82
83#define PGMPHYSFN_READNAME PGMR3PhysReadU64
84#define PGMPHYSFN_WRITENAME PGMR3PhysWriteU64
85#define PGMPHYS_DATASIZE 8
86#define PGMPHYS_DATATYPE uint64_t
87#include "PGMPhysRWTmpl.h"
88
89
90/**
91 * EMT worker for PGMR3PhysReadExternal.
92 */
93static DECLCALLBACK(int) pgmR3PhysReadExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, void *pvBuf, size_t cbRead,
94 PGMACCESSORIGIN enmOrigin)
95{
96 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, *pGCPhys, pvBuf, cbRead, enmOrigin);
97 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
98 return VINF_SUCCESS;
99}
100
101
102/**
103 * Read from physical memory, external users.
104 *
105 * @returns VBox status code.
106 * @retval VINF_SUCCESS.
107 *
108 * @param pVM The cross context VM structure.
109 * @param GCPhys Physical address to read from.
110 * @param pvBuf Where to read into.
111 * @param cbRead How many bytes to read.
112 * @param enmOrigin Who is calling.
113 *
114 * @thread Any but EMTs.
115 */
116VMMR3DECL(int) PGMR3PhysReadExternal(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
117{
118 VM_ASSERT_OTHER_THREAD(pVM);
119
120 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
121 LogFlow(("PGMR3PhysReadExternal: %RGp %d\n", GCPhys, cbRead));
122
123 pgmLock(pVM);
124
125 /*
126 * Copy loop on ram ranges.
127 */
128 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
129 for (;;)
130 {
131 /* Inside range or not? */
132 if (pRam && GCPhys >= pRam->GCPhys)
133 {
134 /*
135 * Must work our way thru this page by page.
136 */
137 RTGCPHYS off = GCPhys - pRam->GCPhys;
138 while (off < pRam->cb)
139 {
140 unsigned iPage = off >> PAGE_SHIFT;
141 PPGMPAGE pPage = &pRam->aPages[iPage];
142
143 /*
144 * If the page has an ALL access handler, we'll have to
145 * delegate the job to EMT.
146 */
147 if ( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
148 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
149 {
150 pgmUnlock(pVM);
151
152 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysReadExternalEMT, 5,
153 pVM, &GCPhys, pvBuf, cbRead, enmOrigin);
154 }
155 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
156
157 /*
158 * Simple stuff, go ahead.
159 */
160 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
161 if (cb > cbRead)
162 cb = cbRead;
163 PGMPAGEMAPLOCK PgMpLck;
164 const void *pvSrc;
165 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
166 if (RT_SUCCESS(rc))
167 {
168 memcpy(pvBuf, pvSrc, cb);
169 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
170 }
171 else
172 {
173 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
174 pRam->GCPhys + off, pPage, rc));
175 memset(pvBuf, 0xff, cb);
176 }
177
178 /* next page */
179 if (cb >= cbRead)
180 {
181 pgmUnlock(pVM);
182 return VINF_SUCCESS;
183 }
184 cbRead -= cb;
185 off += cb;
186 GCPhys += cb;
187 pvBuf = (char *)pvBuf + cb;
188 } /* walk pages in ram range. */
189 }
190 else
191 {
192 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
193
194 /*
195 * Unassigned address space.
196 */
197 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
198 if (cb >= cbRead)
199 {
200 memset(pvBuf, 0xff, cbRead);
201 break;
202 }
203 memset(pvBuf, 0xff, cb);
204
205 cbRead -= cb;
206 pvBuf = (char *)pvBuf + cb;
207 GCPhys += cb;
208 }
209
210 /* Advance range if necessary. */
211 while (pRam && GCPhys > pRam->GCPhysLast)
212 pRam = pRam->CTX_SUFF(pNext);
213 } /* Ram range walk */
214
215 pgmUnlock(pVM);
216
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * EMT worker for PGMR3PhysWriteExternal.
223 */
224static DECLCALLBACK(int) pgmR3PhysWriteExternalEMT(PVM pVM, PRTGCPHYS pGCPhys, const void *pvBuf, size_t cbWrite,
225 PGMACCESSORIGIN enmOrigin)
226{
227 /** @todo VERR_EM_NO_MEMORY */
228 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, *pGCPhys, pvBuf, cbWrite, enmOrigin);
229 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
230 return VINF_SUCCESS;
231}
232
233
234/**
235 * Write to physical memory, external users.
236 *
237 * @returns VBox status code.
238 * @retval VINF_SUCCESS.
239 * @retval VERR_EM_NO_MEMORY.
240 *
241 * @param pVM The cross context VM structure.
242 * @param GCPhys Physical address to write to.
243 * @param pvBuf What to write.
244 * @param cbWrite How many bytes to write.
245 * @param enmOrigin Who is calling.
246 *
247 * @thread Any but EMTs.
248 */
249VMMDECL(int) PGMR3PhysWriteExternal(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
250{
251 VM_ASSERT_OTHER_THREAD(pVM);
252
253 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites,
254 ("Calling PGMR3PhysWriteExternal after pgmR3Save()! GCPhys=%RGp cbWrite=%#x enmOrigin=%d\n",
255 GCPhys, cbWrite, enmOrigin));
256 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
257 LogFlow(("PGMR3PhysWriteExternal: %RGp %d\n", GCPhys, cbWrite));
258
259 pgmLock(pVM);
260
261 /*
262 * Copy loop on ram ranges, stop when we hit something difficult.
263 */
264 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
265 for (;;)
266 {
267 /* Inside range or not? */
268 if (pRam && GCPhys >= pRam->GCPhys)
269 {
270 /*
271 * Must work our way thru this page by page.
272 */
273 RTGCPTR off = GCPhys - pRam->GCPhys;
274 while (off < pRam->cb)
275 {
276 RTGCPTR iPage = off >> PAGE_SHIFT;
277 PPGMPAGE pPage = &pRam->aPages[iPage];
278
279 /*
280 * Is the page problematic, we have to do the work on the EMT.
281 *
282 * Allocating writable pages and access handlers are
283 * problematic, write monitored pages are simple and can be
284 * dealt with here.
285 */
286 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
287 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
288 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
289 {
290 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
291 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
292 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
293 else
294 {
295 pgmUnlock(pVM);
296
297 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysWriteExternalEMT, 5,
298 pVM, &GCPhys, pvBuf, cbWrite, enmOrigin);
299 }
300 }
301 Assert(!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage));
302
303 /*
304 * Simple stuff, go ahead.
305 */
306 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
307 if (cb > cbWrite)
308 cb = cbWrite;
309 PGMPAGEMAPLOCK PgMpLck;
310 void *pvDst;
311 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
312 if (RT_SUCCESS(rc))
313 {
314 memcpy(pvDst, pvBuf, cb);
315 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
316 }
317 else
318 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
319 pRam->GCPhys + off, pPage, rc));
320
321 /* next page */
322 if (cb >= cbWrite)
323 {
324 pgmUnlock(pVM);
325 return VINF_SUCCESS;
326 }
327
328 cbWrite -= cb;
329 off += cb;
330 GCPhys += cb;
331 pvBuf = (const char *)pvBuf + cb;
332 } /* walk pages in ram range */
333 }
334 else
335 {
336 /*
337 * Unassigned address space, skip it.
338 */
339 if (!pRam)
340 break;
341 size_t cb = pRam->GCPhys - GCPhys;
342 if (cb >= cbWrite)
343 break;
344 cbWrite -= cb;
345 pvBuf = (const char *)pvBuf + cb;
346 GCPhys += cb;
347 }
348
349 /* Advance range if necessary. */
350 while (pRam && GCPhys > pRam->GCPhysLast)
351 pRam = pRam->CTX_SUFF(pNext);
352 } /* Ram range walk */
353
354 pgmUnlock(pVM);
355 return VINF_SUCCESS;
356}
357
358
359/**
360 * VMR3ReqCall worker for PGMR3PhysGCPhys2CCPtrExternal to make pages writable.
361 *
362 * @returns see PGMR3PhysGCPhys2CCPtrExternal
363 * @param pVM The cross context VM structure.
364 * @param pGCPhys Pointer to the guest physical address.
365 * @param ppv Where to store the mapping address.
366 * @param pLock Where to store the lock.
367 */
368static DECLCALLBACK(int) pgmR3PhysGCPhys2CCPtrDelegated(PVM pVM, PRTGCPHYS pGCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
369{
370 /*
371 * Just hand it to PGMPhysGCPhys2CCPtr and check that it's not a page with
372 * an access handler after it succeeds.
373 */
374 int rc = pgmLock(pVM);
375 AssertRCReturn(rc, rc);
376
377 rc = PGMPhysGCPhys2CCPtr(pVM, *pGCPhys, ppv, pLock);
378 if (RT_SUCCESS(rc))
379 {
380 PPGMPAGEMAPTLBE pTlbe;
381 int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe);
382 AssertFatalRC(rc2);
383 PPGMPAGE pPage = pTlbe->pPage;
384 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
385 {
386 PGMPhysReleasePageMappingLock(pVM, pLock);
387 rc = VERR_PGM_PHYS_PAGE_RESERVED;
388 }
389 else if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
390#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
391 || pgmPoolIsDirtyPage(pVM, *pGCPhys)
392#endif
393 )
394 {
395 /* We *must* flush any corresponding pgm pool page here, otherwise we'll
396 * not be informed about writes and keep bogus gst->shw mappings around.
397 */
398 pgmPoolFlushPageByGCPhys(pVM, *pGCPhys);
399 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage));
400 /** @todo r=bird: return VERR_PGM_PHYS_PAGE_RESERVED here if it still has
401 * active handlers, see the PGMR3PhysGCPhys2CCPtrExternal docs. */
402 }
403 }
404
405 pgmUnlock(pVM);
406 return rc;
407}
408
409
410/**
411 * Requests the mapping of a guest page into ring-3, external threads.
412 *
413 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
414 * release it.
415 *
416 * This API will assume your intention is to write to the page, and will
417 * therefore replace shared and zero pages. If you do not intend to modify the
418 * page, use the PGMR3PhysGCPhys2CCPtrReadOnlyExternal() API.
419 *
420 * @returns VBox status code.
421 * @retval VINF_SUCCESS on success.
422 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
423 * backing or if the page has any active access handlers. The caller
424 * must fall back on using PGMR3PhysWriteExternal.
425 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
426 *
427 * @param pVM The cross context VM structure.
428 * @param GCPhys The guest physical address of the page that should be mapped.
429 * @param ppv Where to store the address corresponding to GCPhys.
430 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
431 *
432 * @remark Avoid calling this API from within critical sections (other than the
433 * PGM one) because of the deadlock risk when we have to delegating the
434 * task to an EMT.
435 * @thread Any.
436 */
437VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrExternal(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
438{
439 AssertPtr(ppv);
440 AssertPtr(pLock);
441
442 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
443
444 int rc = pgmLock(pVM);
445 AssertRCReturn(rc, rc);
446
447 /*
448 * Query the Physical TLB entry for the page (may fail).
449 */
450 PPGMPAGEMAPTLBE pTlbe;
451 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
452 if (RT_SUCCESS(rc))
453 {
454 PPGMPAGE pPage = pTlbe->pPage;
455 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
456 rc = VERR_PGM_PHYS_PAGE_RESERVED;
457 else
458 {
459 /*
460 * If the page is shared, the zero page, or being write monitored
461 * it must be converted to an page that's writable if possible.
462 * We can only deal with write monitored pages here, the rest have
463 * to be on an EMT.
464 */
465 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
466 || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
467#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
468 || pgmPoolIsDirtyPage(pVM, GCPhys)
469#endif
470 )
471 {
472 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED
473 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
474#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
475 && !pgmPoolIsDirtyPage(pVM, GCPhys) /** @todo we're very likely doing this twice. */
476#endif
477 )
478 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
479 else
480 {
481 pgmUnlock(pVM);
482
483 return VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
484 pVM, &GCPhys, ppv, pLock);
485 }
486 }
487
488 /*
489 * Now, just perform the locking and calculate the return address.
490 */
491 PPGMPAGEMAP pMap = pTlbe->pMap;
492 if (pMap)
493 pMap->cRefs++;
494
495 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
496 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
497 {
498 if (cLocks == 0)
499 pVM->pgm.s.cWriteLockedPages++;
500 PGM_PAGE_INC_WRITE_LOCKS(pPage);
501 }
502 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
503 {
504 PGM_PAGE_INC_WRITE_LOCKS(pPage);
505 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
506 if (pMap)
507 pMap->cRefs++; /* Extra ref to prevent it from going away. */
508 }
509
510 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
511 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
512 pLock->pvMap = pMap;
513 }
514 }
515
516 pgmUnlock(pVM);
517 return rc;
518}
519
520
521/**
522 * Requests the mapping of a guest page into ring-3, external threads.
523 *
524 * When you're done with the page, call PGMPhysReleasePageMappingLock() ASAP to
525 * release it.
526 *
527 * @returns VBox status code.
528 * @retval VINF_SUCCESS on success.
529 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical
530 * backing or if the page as an active ALL access handler. The caller
531 * must fall back on using PGMPhysRead.
532 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
533 *
534 * @param pVM The cross context VM structure.
535 * @param GCPhys The guest physical address of the page that should be mapped.
536 * @param ppv Where to store the address corresponding to GCPhys.
537 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
538 *
539 * @remark Avoid calling this API from within critical sections (other than
540 * the PGM one) because of the deadlock risk.
541 * @thread Any.
542 */
543VMMR3DECL(int) PGMR3PhysGCPhys2CCPtrReadOnlyExternal(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
544{
545 int rc = pgmLock(pVM);
546 AssertRCReturn(rc, rc);
547
548 /*
549 * Query the Physical TLB entry for the page (may fail).
550 */
551 PPGMPAGEMAPTLBE pTlbe;
552 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
553 if (RT_SUCCESS(rc))
554 {
555 PPGMPAGE pPage = pTlbe->pPage;
556#if 1
557 /* MMIO pages doesn't have any readable backing. */
558 if (PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
559 rc = VERR_PGM_PHYS_PAGE_RESERVED;
560#else
561 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
562 rc = VERR_PGM_PHYS_PAGE_RESERVED;
563#endif
564 else
565 {
566 /*
567 * Now, just perform the locking and calculate the return address.
568 */
569 PPGMPAGEMAP pMap = pTlbe->pMap;
570 if (pMap)
571 pMap->cRefs++;
572
573 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
574 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
575 {
576 if (cLocks == 0)
577 pVM->pgm.s.cReadLockedPages++;
578 PGM_PAGE_INC_READ_LOCKS(pPage);
579 }
580 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
581 {
582 PGM_PAGE_INC_READ_LOCKS(pPage);
583 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
584 if (pMap)
585 pMap->cRefs++; /* Extra ref to prevent it from going away. */
586 }
587
588 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
589 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
590 pLock->pvMap = pMap;
591 }
592 }
593
594 pgmUnlock(pVM);
595 return rc;
596}
597
598
599/**
600 * Requests the mapping of multiple guest page into ring-3, external threads.
601 *
602 * When you're done with the pages, call PGMPhysBulkReleasePageMappingLock()
603 * ASAP to release them.
604 *
605 * This API will assume your intention is to write to the pages, and will
606 * therefore replace shared and zero pages. If you do not intend to modify the
607 * pages, use the PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal() API.
608 *
609 * @returns VBox status code.
610 * @retval VINF_SUCCESS on success.
611 * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
612 * backing or if any of the pages the page has any active access
613 * handlers. The caller must fall back on using PGMR3PhysWriteExternal.
614 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
615 * an invalid physical address.
616 *
617 * @param pVM The cross context VM structure.
618 * @param cPages Number of pages to lock.
619 * @param paGCPhysPages The guest physical address of the pages that
620 * should be mapped (@a cPages entries).
621 * @param papvPages Where to store the ring-3 mapping addresses
622 * corresponding to @a paGCPhysPages.
623 * @param paLocks Where to store the locking information that
624 * pfnPhysBulkReleasePageMappingLock needs (@a cPages
625 * in length).
626 *
627 * @remark Avoid calling this API from within critical sections (other than the
628 * PGM one) because of the deadlock risk when we have to delegating the
629 * task to an EMT.
630 * @thread Any.
631 */
632VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
633 void **papvPages, PPGMPAGEMAPLOCK paLocks)
634{
635 Assert(cPages > 0);
636 AssertPtr(papvPages);
637 AssertPtr(paLocks);
638
639 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
640
641 int rc = pgmLock(pVM);
642 AssertRCReturn(rc, rc);
643
644 /*
645 * Lock the pages one by one.
646 * The loop body is similar to PGMR3PhysGCPhys2CCPtrExternal.
647 */
648 int32_t cNextYield = 128;
649 uint32_t iPage;
650 for (iPage = 0; iPage < cPages; iPage++)
651 {
652 if (--cNextYield > 0)
653 { /* likely */ }
654 else
655 {
656 pgmUnlock(pVM);
657 ASMNopPause();
658 pgmLock(pVM);
659 cNextYield = 128;
660 }
661
662 /*
663 * Query the Physical TLB entry for the page (may fail).
664 */
665 PPGMPAGEMAPTLBE pTlbe;
666 rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
667 if (RT_SUCCESS(rc))
668 { }
669 else
670 break;
671 PPGMPAGE pPage = pTlbe->pPage;
672
673 /*
674 * No MMIO or active access handlers.
675 */
676 if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
677 && !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
678 { }
679 else
680 {
681 rc = VERR_PGM_PHYS_PAGE_RESERVED;
682 break;
683 }
684
685 /*
686 * The page must be in the allocated state and not be a dirty pool page.
687 * We can handle converting a write monitored page to an allocated one, but
688 * anything more complicated must be delegated to an EMT.
689 */
690 bool fDelegateToEmt = false;
691 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
692#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
693 fDelegateToEmt = pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]);
694#else
695 fDelegateToEmt = false;
696#endif
697 else if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
698 {
699#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
700 if (!pgmPoolIsDirtyPage(pVM, paGCPhysPages[iPage]))
701 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, paGCPhysPages[iPage]);
702 else
703 fDelegateToEmt = true;
704#endif
705 }
706 else
707 fDelegateToEmt = true;
708 if (!fDelegateToEmt)
709 { }
710 else
711 {
712 /* We could do this delegation in bulk, but considered too much work vs gain. */
713 pgmUnlock(pVM);
714 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)pgmR3PhysGCPhys2CCPtrDelegated, 4,
715 pVM, &paGCPhysPages[iPage], &papvPages[iPage], &paLocks[iPage]);
716 pgmLock(pVM);
717 if (RT_FAILURE(rc))
718 break;
719 cNextYield = 128;
720 }
721
722 /*
723 * Now, just perform the locking and address calculation.
724 */
725 PPGMPAGEMAP pMap = pTlbe->pMap;
726 if (pMap)
727 pMap->cRefs++;
728
729 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
730 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
731 {
732 if (cLocks == 0)
733 pVM->pgm.s.cWriteLockedPages++;
734 PGM_PAGE_INC_WRITE_LOCKS(pPage);
735 }
736 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
737 {
738 PGM_PAGE_INC_WRITE_LOCKS(pPage);
739 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", paGCPhysPages[iPage], pPage));
740 if (pMap)
741 pMap->cRefs++; /* Extra ref to prevent it from going away. */
742 }
743
744 papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
745 paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
746 paLocks[iPage].pvMap = pMap;
747 }
748
749 pgmUnlock(pVM);
750
751 /*
752 * On failure we must unlock any pages we managed to get already.
753 */
754 if (RT_FAILURE(rc) && iPage > 0)
755 PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
756
757 return rc;
758}
759
760
761/**
762 * Requests the mapping of multiple guest page into ring-3, for reading only,
763 * external threads.
764 *
765 * When you're done with the pages, call PGMPhysReleasePageMappingLock() ASAP
766 * to release them.
767 *
768 * @returns VBox status code.
769 * @retval VINF_SUCCESS on success.
770 * @retval VERR_PGM_PHYS_PAGE_RESERVED if any of the pages has no physical
771 * backing or if any of the pages the page has an active ALL access
772 * handler. The caller must fall back on using PGMR3PhysWriteExternal.
773 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if @a paGCPhysPages contains
774 * an invalid physical address.
775 *
776 * @param pVM The cross context VM structure.
777 * @param cPages Number of pages to lock.
778 * @param paGCPhysPages The guest physical address of the pages that
779 * should be mapped (@a cPages entries).
780 * @param papvPages Where to store the ring-3 mapping addresses
781 * corresponding to @a paGCPhysPages.
782 * @param paLocks Where to store the lock information that
783 * pfnPhysReleasePageMappingLock needs (@a cPages
784 * in length).
785 *
786 * @remark Avoid calling this API from within critical sections (other than
787 * the PGM one) because of the deadlock risk.
788 * @thread Any.
789 */
790VMMR3DECL(int) PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal(PVM pVM, uint32_t cPages, PCRTGCPHYS paGCPhysPages,
791 void const **papvPages, PPGMPAGEMAPLOCK paLocks)
792{
793 Assert(cPages > 0);
794 AssertPtr(papvPages);
795 AssertPtr(paLocks);
796
797 Assert(VM_IS_EMT(pVM) || !PGMIsLockOwner(pVM));
798
799 int rc = pgmLock(pVM);
800 AssertRCReturn(rc, rc);
801
802 /*
803 * Lock the pages one by one.
804 * The loop body is similar to PGMR3PhysGCPhys2CCPtrReadOnlyExternal.
805 */
806 int32_t cNextYield = 256;
807 uint32_t iPage;
808 for (iPage = 0; iPage < cPages; iPage++)
809 {
810 if (--cNextYield > 0)
811 { /* likely */ }
812 else
813 {
814 pgmUnlock(pVM);
815 ASMNopPause();
816 pgmLock(pVM);
817 cNextYield = 256;
818 }
819
820 /*
821 * Query the Physical TLB entry for the page (may fail).
822 */
823 PPGMPAGEMAPTLBE pTlbe;
824 rc = pgmPhysPageQueryTlbe(pVM, paGCPhysPages[iPage], &pTlbe);
825 if (RT_SUCCESS(rc))
826 { }
827 else
828 break;
829 PPGMPAGE pPage = pTlbe->pPage;
830
831 /*
832 * No MMIO or active all access handlers, everything else can be accessed.
833 */
834 if ( !PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)
835 && !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage))
836 { }
837 else
838 {
839 rc = VERR_PGM_PHYS_PAGE_RESERVED;
840 break;
841 }
842
843 /*
844 * Now, just perform the locking and address calculation.
845 */
846 PPGMPAGEMAP pMap = pTlbe->pMap;
847 if (pMap)
848 pMap->cRefs++;
849
850 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
851 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
852 {
853 if (cLocks == 0)
854 pVM->pgm.s.cReadLockedPages++;
855 PGM_PAGE_INC_READ_LOCKS(pPage);
856 }
857 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
858 {
859 PGM_PAGE_INC_READ_LOCKS(pPage);
860 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", paGCPhysPages[iPage], pPage));
861 if (pMap)
862 pMap->cRefs++; /* Extra ref to prevent it from going away. */
863 }
864
865 papvPages[iPage] = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(paGCPhysPages[iPage] & PAGE_OFFSET_MASK));
866 paLocks[iPage].uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
867 paLocks[iPage].pvMap = pMap;
868 }
869
870 pgmUnlock(pVM);
871
872 /*
873 * On failure we must unlock any pages we managed to get already.
874 */
875 if (RT_FAILURE(rc) && iPage > 0)
876 PGMPhysBulkReleasePageMappingLocks(pVM, iPage, paLocks);
877
878 return rc;
879}
880
881
882#define MAKE_LEAF(a_pNode) \
883 do { \
884 (a_pNode)->pLeftR3 = NIL_RTR3PTR; \
885 (a_pNode)->pRightR3 = NIL_RTR3PTR; \
886 (a_pNode)->pLeftR0 = NIL_RTR0PTR; \
887 (a_pNode)->pRightR0 = NIL_RTR0PTR; \
888 } while (0)
889
890#define INSERT_LEFT(a_pParent, a_pNode) \
891 do { \
892 (a_pParent)->pLeftR3 = (a_pNode); \
893 (a_pParent)->pLeftR0 = (a_pNode)->pSelfR0; \
894 } while (0)
895#define INSERT_RIGHT(a_pParent, a_pNode) \
896 do { \
897 (a_pParent)->pRightR3 = (a_pNode); \
898 (a_pParent)->pRightR0 = (a_pNode)->pSelfR0; \
899 } while (0)
900
901
902/**
903 * Recursive tree builder.
904 *
905 * @param ppRam Pointer to the iterator variable.
906 * @param iDepth The current depth. Inserts a leaf node if 0.
907 */
908static PPGMRAMRANGE pgmR3PhysRebuildRamRangeSearchTreesRecursively(PPGMRAMRANGE *ppRam, int iDepth)
909{
910 PPGMRAMRANGE pRam;
911 if (iDepth <= 0)
912 {
913 /*
914 * Leaf node.
915 */
916 pRam = *ppRam;
917 if (pRam)
918 {
919 *ppRam = pRam->pNextR3;
920 MAKE_LEAF(pRam);
921 }
922 }
923 else
924 {
925
926 /*
927 * Intermediate node.
928 */
929 PPGMRAMRANGE pLeft = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
930
931 pRam = *ppRam;
932 if (!pRam)
933 return pLeft;
934 *ppRam = pRam->pNextR3;
935 MAKE_LEAF(pRam);
936 INSERT_LEFT(pRam, pLeft);
937
938 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(ppRam, iDepth - 1);
939 if (pRight)
940 INSERT_RIGHT(pRam, pRight);
941 }
942 return pRam;
943}
944
945
946/**
947 * Rebuilds the RAM range search trees.
948 *
949 * @param pVM The cross context VM structure.
950 */
951static void pgmR3PhysRebuildRamRangeSearchTrees(PVM pVM)
952{
953
954 /*
955 * Create the reasonably balanced tree in a sequential fashion.
956 * For simplicity (laziness) we use standard recursion here.
957 */
958 int iDepth = 0;
959 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
960 PPGMRAMRANGE pRoot = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, 0);
961 while (pRam)
962 {
963 PPGMRAMRANGE pLeft = pRoot;
964
965 pRoot = pRam;
966 pRam = pRam->pNextR3;
967 MAKE_LEAF(pRoot);
968 INSERT_LEFT(pRoot, pLeft);
969
970 PPGMRAMRANGE pRight = pgmR3PhysRebuildRamRangeSearchTreesRecursively(&pRam, iDepth);
971 if (pRight)
972 INSERT_RIGHT(pRoot, pRight);
973 /** @todo else: rotate the tree. */
974
975 iDepth++;
976 }
977
978 pVM->pgm.s.pRamRangeTreeR3 = pRoot;
979 pVM->pgm.s.pRamRangeTreeR0 = pRoot ? pRoot->pSelfR0 : NIL_RTR0PTR;
980
981#ifdef VBOX_STRICT
982 /*
983 * Verify that the above code works.
984 */
985 unsigned cRanges = 0;
986 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
987 cRanges++;
988 Assert(cRanges > 0);
989
990 unsigned cMaxDepth = ASMBitLastSetU32(cRanges);
991 if ((1U << cMaxDepth) < cRanges)
992 cMaxDepth++;
993
994 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
995 {
996 unsigned cDepth = 0;
997 PPGMRAMRANGE pRam2 = pVM->pgm.s.pRamRangeTreeR3;
998 for (;;)
999 {
1000 if (pRam == pRam2)
1001 break;
1002 Assert(pRam2);
1003 if (pRam->GCPhys < pRam2->GCPhys)
1004 pRam2 = pRam2->pLeftR3;
1005 else
1006 pRam2 = pRam2->pRightR3;
1007 }
1008 AssertMsg(cDepth <= cMaxDepth, ("cDepth=%d cMaxDepth=%d\n", cDepth, cMaxDepth));
1009 }
1010#endif /* VBOX_STRICT */
1011}
1012
1013#undef MAKE_LEAF
1014#undef INSERT_LEFT
1015#undef INSERT_RIGHT
1016
1017/**
1018 * Relinks the RAM ranges using the pSelfRC and pSelfR0 pointers.
1019 *
1020 * Called when anything was relocated.
1021 *
1022 * @param pVM The cross context VM structure.
1023 */
1024void pgmR3PhysRelinkRamRanges(PVM pVM)
1025{
1026 PPGMRAMRANGE pCur;
1027
1028#ifdef VBOX_STRICT
1029 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3)
1030 {
1031 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur));
1032 Assert((pCur->GCPhys & PAGE_OFFSET_MASK) == 0);
1033 Assert((pCur->GCPhysLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1034 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0);
1035 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1);
1036 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3)
1037 Assert( pCur2 == pCur
1038 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */
1039 }
1040#endif
1041
1042 pCur = pVM->pgm.s.pRamRangesXR3;
1043 if (pCur)
1044 {
1045 pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0;
1046
1047 for (; pCur->pNextR3; pCur = pCur->pNextR3)
1048 pCur->pNextR0 = pCur->pNextR3->pSelfR0;
1049
1050 Assert(pCur->pNextR0 == NIL_RTR0PTR);
1051 }
1052 else
1053 {
1054 Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR);
1055 }
1056 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
1057
1058 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
1059}
1060
1061
1062/**
1063 * Links a new RAM range into the list.
1064 *
1065 * @param pVM The cross context VM structure.
1066 * @param pNew Pointer to the new list entry.
1067 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
1068 */
1069static void pgmR3PhysLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, PPGMRAMRANGE pPrev)
1070{
1071 AssertMsg(pNew->pszDesc, ("%RGp-%RGp\n", pNew->GCPhys, pNew->GCPhysLast));
1072 Assert((pNew->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pNew->pSelfR0 == MMHyperCCToR0(pVM, pNew));
1073
1074 pgmLock(pVM);
1075
1076 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3;
1077 pNew->pNextR3 = pRam;
1078 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR;
1079
1080 if (pPrev)
1081 {
1082 pPrev->pNextR3 = pNew;
1083 pPrev->pNextR0 = pNew->pSelfR0;
1084 }
1085 else
1086 {
1087 pVM->pgm.s.pRamRangesXR3 = pNew;
1088 pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0;
1089 }
1090 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
1091
1092 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
1093 pgmUnlock(pVM);
1094}
1095
1096
1097/**
1098 * Unlink an existing RAM range from the list.
1099 *
1100 * @param pVM The cross context VM structure.
1101 * @param pRam Pointer to the new list entry.
1102 * @param pPrev Pointer to the previous list entry. If NULL, insert as head.
1103 */
1104static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev)
1105{
1106 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam);
1107 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam));
1108
1109 pgmLock(pVM);
1110
1111 PPGMRAMRANGE pNext = pRam->pNextR3;
1112 if (pPrev)
1113 {
1114 pPrev->pNextR3 = pNext;
1115 pPrev->pNextR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
1116 }
1117 else
1118 {
1119 Assert(pVM->pgm.s.pRamRangesXR3 == pRam);
1120 pVM->pgm.s.pRamRangesXR3 = pNext;
1121 pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;
1122 }
1123 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen);
1124
1125 pgmR3PhysRebuildRamRangeSearchTrees(pVM);
1126 pgmUnlock(pVM);
1127}
1128
1129
1130/**
1131 * Unlink an existing RAM range from the list.
1132 *
1133 * @param pVM The cross context VM structure.
1134 * @param pRam Pointer to the new list entry.
1135 */
1136static void pgmR3PhysUnlinkRamRange(PVM pVM, PPGMRAMRANGE pRam)
1137{
1138 pgmLock(pVM);
1139
1140 /* find prev. */
1141 PPGMRAMRANGE pPrev = NULL;
1142 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3;
1143 while (pCur != pRam)
1144 {
1145 pPrev = pCur;
1146 pCur = pCur->pNextR3;
1147 }
1148 AssertFatal(pCur);
1149
1150 pgmR3PhysUnlinkRamRange2(pVM, pRam, pPrev);
1151 pgmUnlock(pVM);
1152}
1153
1154
1155/**
1156 * Frees a range of pages, replacing them with ZERO pages of the specified type.
1157 *
1158 * @returns VBox status code.
1159 * @param pVM The cross context VM structure.
1160 * @param pRam The RAM range in which the pages resides.
1161 * @param GCPhys The address of the first page.
1162 * @param GCPhysLast The address of the last page.
1163 * @param enmType The page type to replace then with.
1164 */
1165static int pgmR3PhysFreePageRange(PVM pVM, PPGMRAMRANGE pRam, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPAGETYPE enmType)
1166{
1167 PGM_LOCK_ASSERT_OWNER(pVM);
1168 uint32_t cPendingPages = 0;
1169 PGMMFREEPAGESREQ pReq;
1170 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1171 AssertLogRelRCReturn(rc, rc);
1172
1173 /* Iterate the pages. */
1174 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
1175 uint32_t cPagesLeft = ((GCPhysLast - GCPhys) >> PAGE_SHIFT) + 1;
1176 while (cPagesLeft-- > 0)
1177 {
1178 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPageDst, GCPhys, enmType);
1179 AssertLogRelRCReturn(rc, rc); /* We're done for if this goes wrong. */
1180
1181 PGM_PAGE_SET_TYPE(pVM, pPageDst, enmType);
1182
1183 GCPhys += PAGE_SIZE;
1184 pPageDst++;
1185 }
1186
1187 if (cPendingPages)
1188 {
1189 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1190 AssertLogRelRCReturn(rc, rc);
1191 }
1192 GMMR3FreePagesCleanup(pReq);
1193
1194 return rc;
1195}
1196
1197#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
1198
1199/**
1200 * Rendezvous callback used by PGMR3ChangeMemBalloon that changes the memory balloon size
1201 *
1202 * This is only called on one of the EMTs while the other ones are waiting for
1203 * it to complete this function.
1204 *
1205 * @returns VINF_SUCCESS (VBox strict status code).
1206 * @param pVM The cross context VM structure.
1207 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
1208 * @param pvUser User parameter
1209 */
1210static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysChangeMemBalloonRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
1211{
1212 uintptr_t *paUser = (uintptr_t *)pvUser;
1213 bool fInflate = !!paUser[0];
1214 unsigned cPages = paUser[1];
1215 RTGCPHYS *paPhysPage = (RTGCPHYS *)paUser[2];
1216 uint32_t cPendingPages = 0;
1217 PGMMFREEPAGESREQ pReq;
1218 int rc;
1219
1220 Log(("pgmR3PhysChangeMemBalloonRendezvous: %s %x pages\n", (fInflate) ? "inflate" : "deflate", cPages));
1221 pgmLock(pVM);
1222
1223 if (fInflate)
1224 {
1225 /* Flush the PGM pool cache as we might have stale references to pages that we just freed. */
1226 pgmR3PoolClearAllRendezvous(pVM, pVCpu, NULL);
1227
1228 /* Replace pages with ZERO pages. */
1229 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
1230 if (RT_FAILURE(rc))
1231 {
1232 pgmUnlock(pVM);
1233 AssertLogRelRC(rc);
1234 return rc;
1235 }
1236
1237 /* Iterate the pages. */
1238 for (unsigned i = 0; i < cPages; i++)
1239 {
1240 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
1241 if ( pPage == NULL
1242 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM)
1243 {
1244 Log(("pgmR3PhysChangeMemBalloonRendezvous: invalid physical page %RGp pPage->u3Type=%d\n", paPhysPage[i], pPage ? PGM_PAGE_GET_TYPE(pPage) : 0));
1245 break;
1246 }
1247
1248 LogFlow(("balloon page: %RGp\n", paPhysPage[i]));
1249
1250 /* Flush the shadow PT if this page was previously used as a guest page table. */
1251 pgmPoolFlushPageByGCPhys(pVM, paPhysPage[i]);
1252
1253 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, paPhysPage[i], (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage));
1254 if (RT_FAILURE(rc))
1255 {
1256 pgmUnlock(pVM);
1257 AssertLogRelRC(rc);
1258 return rc;
1259 }
1260 Assert(PGM_PAGE_IS_ZERO(pPage));
1261 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_BALLOONED);
1262 }
1263
1264 if (cPendingPages)
1265 {
1266 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
1267 if (RT_FAILURE(rc))
1268 {
1269 pgmUnlock(pVM);
1270 AssertLogRelRC(rc);
1271 return rc;
1272 }
1273 }
1274 GMMR3FreePagesCleanup(pReq);
1275 }
1276 else
1277 {
1278 /* Iterate the pages. */
1279 for (unsigned i = 0; i < cPages; i++)
1280 {
1281 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]);
1282 AssertBreak(pPage && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM);
1283
1284 LogFlow(("Free ballooned page: %RGp\n", paPhysPage[i]));
1285
1286 Assert(PGM_PAGE_IS_BALLOONED(pPage));
1287
1288 /* Change back to zero page. (NEM does not need to be informed.) */
1289 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
1290 }
1291
1292 /* Note that we currently do not map any ballooned pages in our shadow page tables, so no need to flush the pgm pool. */
1293 }
1294
1295 /* Notify GMM about the balloon change. */
1296 rc = GMMR3BalloonedPages(pVM, (fInflate) ? GMMBALLOONACTION_INFLATE : GMMBALLOONACTION_DEFLATE, cPages);
1297 if (RT_SUCCESS(rc))
1298 {
1299 if (!fInflate)
1300 {
1301 Assert(pVM->pgm.s.cBalloonedPages >= cPages);
1302 pVM->pgm.s.cBalloonedPages -= cPages;
1303 }
1304 else
1305 pVM->pgm.s.cBalloonedPages += cPages;
1306 }
1307
1308 pgmUnlock(pVM);
1309
1310 /* Flush the recompiler's TLB as well. */
1311 for (VMCPUID i = 0; i < pVM->cCpus; i++)
1312 CPUMSetChangedFlags(pVM->apCpusR3[i], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1313
1314 AssertLogRelRC(rc);
1315 return rc;
1316}
1317
1318
1319/**
1320 * Frees a range of ram pages, replacing them with ZERO pages; helper for PGMR3PhysFreeRamPages
1321 *
1322 * @returns VBox status code.
1323 * @param pVM The cross context VM structure.
1324 * @param fInflate Inflate or deflate memory balloon
1325 * @param cPages Number of pages to free
1326 * @param paPhysPage Array of guest physical addresses
1327 */
1328static DECLCALLBACK(void) pgmR3PhysChangeMemBalloonHelper(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1329{
1330 uintptr_t paUser[3];
1331
1332 paUser[0] = fInflate;
1333 paUser[1] = cPages;
1334 paUser[2] = (uintptr_t)paPhysPage;
1335 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1336 AssertRC(rc);
1337
1338 /* Made a copy in PGMR3PhysFreeRamPages; free it here. */
1339 RTMemFree(paPhysPage);
1340}
1341
1342#endif /* 64-bit host && (Windows || Solaris || Linux || FreeBSD) */
1343
1344/**
1345 * Inflate or deflate a memory balloon
1346 *
1347 * @returns VBox status code.
1348 * @param pVM The cross context VM structure.
1349 * @param fInflate Inflate or deflate memory balloon
1350 * @param cPages Number of pages to free
1351 * @param paPhysPage Array of guest physical addresses
1352 */
1353VMMR3DECL(int) PGMR3PhysChangeMemBalloon(PVM pVM, bool fInflate, unsigned cPages, RTGCPHYS *paPhysPage)
1354{
1355 /* This must match GMMR0Init; currently we only support memory ballooning on all 64-bit hosts except Mac OS X */
1356#if HC_ARCH_BITS == 64 && (defined(RT_OS_WINDOWS) || defined(RT_OS_SOLARIS) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD))
1357 int rc;
1358
1359 /* Older additions (ancient non-functioning balloon code) pass wrong physical addresses. */
1360 AssertReturn(!(paPhysPage[0] & 0xfff), VERR_INVALID_PARAMETER);
1361
1362 /* We own the IOM lock here and could cause a deadlock by waiting for another VCPU that is blocking on the IOM lock.
1363 * In the SMP case we post a request packet to postpone the job.
1364 */
1365 if (pVM->cCpus > 1)
1366 {
1367 unsigned cbPhysPage = cPages * sizeof(paPhysPage[0]);
1368 RTGCPHYS *paPhysPageCopy = (RTGCPHYS *)RTMemAlloc(cbPhysPage);
1369 AssertReturn(paPhysPageCopy, VERR_NO_MEMORY);
1370
1371 memcpy(paPhysPageCopy, paPhysPage, cbPhysPage);
1372
1373 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysChangeMemBalloonHelper, 4, pVM, fInflate, cPages, paPhysPageCopy);
1374 AssertRC(rc);
1375 }
1376 else
1377 {
1378 uintptr_t paUser[3];
1379
1380 paUser[0] = fInflate;
1381 paUser[1] = cPages;
1382 paUser[2] = (uintptr_t)paPhysPage;
1383 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysChangeMemBalloonRendezvous, (void *)paUser);
1384 AssertRC(rc);
1385 }
1386 return rc;
1387
1388#else
1389 NOREF(pVM); NOREF(fInflate); NOREF(cPages); NOREF(paPhysPage);
1390 return VERR_NOT_IMPLEMENTED;
1391#endif
1392}
1393
1394
1395/**
1396 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all
1397 * physical RAM.
1398 *
1399 * This is only called on one of the EMTs while the other ones are waiting for
1400 * it to complete this function.
1401 *
1402 * @returns VINF_SUCCESS (VBox strict status code).
1403 * @param pVM The cross context VM structure.
1404 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
1405 * @param pvUser User parameter, unused.
1406 */
1407static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
1408{
1409 int rc = VINF_SUCCESS;
1410 NOREF(pvUser); NOREF(pVCpu);
1411
1412 pgmLock(pVM);
1413#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1414 pgmPoolResetDirtyPages(pVM);
1415#endif
1416
1417 /** @todo pointless to write protect the physical page pointed to by RSP. */
1418
1419 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX);
1420 pRam;
1421 pRam = pRam->CTX_SUFF(pNext))
1422 {
1423 uint32_t cPages = pRam->cb >> PAGE_SHIFT;
1424 for (uint32_t iPage = 0; iPage < cPages; iPage++)
1425 {
1426 PPGMPAGE pPage = &pRam->aPages[iPage];
1427 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1428
1429 if ( RT_LIKELY(enmPageType == PGMPAGETYPE_RAM)
1430 || enmPageType == PGMPAGETYPE_MMIO2)
1431 {
1432 /*
1433 * A RAM page.
1434 */
1435 switch (PGM_PAGE_GET_STATE(pPage))
1436 {
1437 case PGM_PAGE_STATE_ALLOCATED:
1438 /** @todo Optimize this: Don't always re-enable write
1439 * monitoring if the page is known to be very busy. */
1440 if (PGM_PAGE_IS_WRITTEN_TO(pPage))
1441 PGM_PAGE_CLEAR_WRITTEN_TO(pVM, pPage);
1442
1443 pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
1444 break;
1445
1446 case PGM_PAGE_STATE_SHARED:
1447 AssertFailed();
1448 break;
1449
1450 case PGM_PAGE_STATE_WRITE_MONITORED: /* nothing to change. */
1451 default:
1452 break;
1453 }
1454 }
1455 }
1456 }
1457 pgmR3PoolWriteProtectPages(pVM);
1458 PGM_INVL_ALL_VCPU_TLBS(pVM);
1459 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1460 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
1461
1462 pgmUnlock(pVM);
1463 return rc;
1464}
1465
1466/**
1467 * Protect all physical RAM to monitor writes
1468 *
1469 * @returns VBox status code.
1470 * @param pVM The cross context VM structure.
1471 */
1472VMMR3DECL(int) PGMR3PhysWriteProtectRAM(PVM pVM)
1473{
1474 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1475
1476 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysWriteProtectRAMRendezvous, NULL);
1477 AssertRC(rc);
1478 return rc;
1479}
1480
1481
1482/**
1483 * Gets the number of ram ranges.
1484 *
1485 * @returns Number of ram ranges. Returns UINT32_MAX if @a pVM is invalid.
1486 * @param pVM The cross context VM structure.
1487 */
1488VMMR3DECL(uint32_t) PGMR3PhysGetRamRangeCount(PVM pVM)
1489{
1490 VM_ASSERT_VALID_EXT_RETURN(pVM, UINT32_MAX);
1491
1492 pgmLock(pVM);
1493 uint32_t cRamRanges = 0;
1494 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext))
1495 cRamRanges++;
1496 pgmUnlock(pVM);
1497 return cRamRanges;
1498}
1499
1500
1501/**
1502 * Get information about a range.
1503 *
1504 * @returns VINF_SUCCESS or VERR_OUT_OF_RANGE.
1505 * @param pVM The cross context VM structure.
1506 * @param iRange The ordinal of the range.
1507 * @param pGCPhysStart Where to return the start of the range. Optional.
1508 * @param pGCPhysLast Where to return the address of the last byte in the
1509 * range. Optional.
1510 * @param ppszDesc Where to return the range description. Optional.
1511 * @param pfIsMmio Where to indicate that this is a pure MMIO range.
1512 * Optional.
1513 */
1514VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast,
1515 const char **ppszDesc, bool *pfIsMmio)
1516{
1517 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1518
1519 pgmLock(pVM);
1520 uint32_t iCurRange = 0;
1521 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)
1522 if (iCurRange == iRange)
1523 {
1524 if (pGCPhysStart)
1525 *pGCPhysStart = pCur->GCPhys;
1526 if (pGCPhysLast)
1527 *pGCPhysLast = pCur->GCPhysLast;
1528 if (ppszDesc)
1529 *ppszDesc = pCur->pszDesc;
1530 if (pfIsMmio)
1531 *pfIsMmio = !!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO);
1532
1533 pgmUnlock(pVM);
1534 return VINF_SUCCESS;
1535 }
1536 pgmUnlock(pVM);
1537 return VERR_OUT_OF_RANGE;
1538}
1539
1540
1541/**
1542 * Query the amount of free memory inside VMMR0
1543 *
1544 * @returns VBox status code.
1545 * @param pUVM The user mode VM handle.
1546 * @param pcbAllocMem Where to return the amount of memory allocated
1547 * by VMs.
1548 * @param pcbFreeMem Where to return the amount of memory that is
1549 * allocated from the host but not currently used
1550 * by any VMs.
1551 * @param pcbBallonedMem Where to return the sum of memory that is
1552 * currently ballooned by the VMs.
1553 * @param pcbSharedMem Where to return the amount of memory that is
1554 * currently shared.
1555 */
1556VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PUVM pUVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem,
1557 uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem)
1558{
1559 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1560 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
1561
1562 uint64_t cAllocPages = 0;
1563 uint64_t cFreePages = 0;
1564 uint64_t cBalloonPages = 0;
1565 uint64_t cSharedPages = 0;
1566 int rc = GMMR3QueryHypervisorMemoryStats(pUVM->pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages);
1567 AssertRCReturn(rc, rc);
1568
1569 if (pcbAllocMem)
1570 *pcbAllocMem = cAllocPages * _4K;
1571
1572 if (pcbFreeMem)
1573 *pcbFreeMem = cFreePages * _4K;
1574
1575 if (pcbBallonedMem)
1576 *pcbBallonedMem = cBalloonPages * _4K;
1577
1578 if (pcbSharedMem)
1579 *pcbSharedMem = cSharedPages * _4K;
1580
1581 Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n",
1582 cAllocPages, cFreePages, cBalloonPages, cSharedPages));
1583 return VINF_SUCCESS;
1584}
1585
1586
1587/**
1588 * Query memory stats for the VM.
1589 *
1590 * @returns VBox status code.
1591 * @param pUVM The user mode VM handle.
1592 * @param pcbTotalMem Where to return total amount memory the VM may
1593 * possibly use.
1594 * @param pcbPrivateMem Where to return the amount of private memory
1595 * currently allocated.
1596 * @param pcbSharedMem Where to return the amount of actually shared
1597 * memory currently used by the VM.
1598 * @param pcbZeroMem Where to return the amount of memory backed by
1599 * zero pages.
1600 *
1601 * @remarks The total mem is normally larger than the sum of the three
1602 * components. There are two reasons for this, first the amount of
1603 * shared memory is what we're sure is shared instead of what could
1604 * possibly be shared with someone. Secondly, because the total may
1605 * include some pure MMIO pages that doesn't go into any of the three
1606 * sub-counts.
1607 *
1608 * @todo Why do we return reused shared pages instead of anything that could
1609 * potentially be shared? Doesn't this mean the first VM gets a much
1610 * lower number of shared pages?
1611 */
1612VMMR3DECL(int) PGMR3QueryMemoryStats(PUVM pUVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem,
1613 uint64_t *pcbSharedMem, uint64_t *pcbZeroMem)
1614{
1615 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1616 PVM pVM = pUVM->pVM;
1617 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1618
1619 if (pcbTotalMem)
1620 *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * PAGE_SIZE;
1621
1622 if (pcbPrivateMem)
1623 *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * PAGE_SIZE;
1624
1625 if (pcbSharedMem)
1626 *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * PAGE_SIZE;
1627
1628 if (pcbZeroMem)
1629 *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * PAGE_SIZE;
1630
1631 Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages));
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * PGMR3PhysRegisterRam worker that initializes and links a RAM range.
1638 *
1639 * @param pVM The cross context VM structure.
1640 * @param pNew The new RAM range.
1641 * @param GCPhys The address of the RAM range.
1642 * @param GCPhysLast The last address of the RAM range.
1643 * @param RCPtrNew The RC address if the range is floating. NIL_RTRCPTR
1644 * if in HMA.
1645 * @param R0PtrNew Ditto for R0.
1646 * @param pszDesc The description.
1647 * @param pPrev The previous RAM range (for linking).
1648 */
1649static void pgmR3PhysInitAndLinkRamRange(PVM pVM, PPGMRAMRANGE pNew, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast,
1650 RTRCPTR RCPtrNew, RTR0PTR R0PtrNew, const char *pszDesc, PPGMRAMRANGE pPrev)
1651{
1652 /*
1653 * Initialize the range.
1654 */
1655 pNew->pSelfR0 = R0PtrNew != NIL_RTR0PTR ? R0PtrNew : MMHyperCCToR0(pVM, pNew);
1656 pNew->GCPhys = GCPhys;
1657 pNew->GCPhysLast = GCPhysLast;
1658 pNew->cb = GCPhysLast - GCPhys + 1;
1659 pNew->pszDesc = pszDesc;
1660 pNew->fFlags = RCPtrNew != NIL_RTRCPTR ? PGM_RAM_RANGE_FLAGS_FLOATING : 0;
1661 pNew->pvR3 = NULL;
1662 pNew->paLSPages = NULL;
1663
1664 uint32_t const cPages = pNew->cb >> PAGE_SHIFT;
1665 RTGCPHYS iPage = cPages;
1666 while (iPage-- > 0)
1667 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_RAM);
1668
1669 /* Update the page count stats. */
1670 pVM->pgm.s.cZeroPages += cPages;
1671 pVM->pgm.s.cAllPages += cPages;
1672
1673 /*
1674 * Link it.
1675 */
1676 pgmR3PhysLinkRamRange(pVM, pNew, pPrev);
1677}
1678
1679
1680#ifndef PGM_WITHOUT_MAPPINGS
1681/**
1682 * @callback_method_impl{FNPGMRELOCATE, Relocate a floating RAM range.}
1683 * @sa pgmR3PhysMMIO2ExRangeRelocate
1684 */
1685static DECLCALLBACK(bool) pgmR3PhysRamRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
1686 PGMRELOCATECALL enmMode, void *pvUser)
1687{
1688 PPGMRAMRANGE pRam = (PPGMRAMRANGE)pvUser;
1689 Assert(pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
1690 Assert(pRam->pSelfRC == GCPtrOld + PAGE_SIZE); RT_NOREF_PV(GCPtrOld);
1691
1692 switch (enmMode)
1693 {
1694 case PGMRELOCATECALL_SUGGEST:
1695 return true;
1696
1697 case PGMRELOCATECALL_RELOCATE:
1698 {
1699 /*
1700 * Update myself, then relink all the ranges and flush the RC TLB.
1701 */
1702 pgmLock(pVM);
1703
1704 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE);
1705
1706 pgmR3PhysRelinkRamRanges(pVM);
1707 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
1708 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
1709
1710 pgmUnlock(pVM);
1711 return true;
1712 }
1713
1714 default:
1715 AssertFailedReturn(false);
1716 }
1717}
1718#endif /* !PGM_WITHOUT_MAPPINGS */
1719
1720
1721/**
1722 * PGMR3PhysRegisterRam worker that registers a high chunk.
1723 *
1724 * @returns VBox status code.
1725 * @param pVM The cross context VM structure.
1726 * @param GCPhys The address of the RAM.
1727 * @param cRamPages The number of RAM pages to register.
1728 * @param cbChunk The size of the PGMRAMRANGE guest mapping.
1729 * @param iChunk The chunk number.
1730 * @param pszDesc The RAM range description.
1731 * @param ppPrev Previous RAM range pointer. In/Out.
1732 */
1733static int pgmR3PhysRegisterHighRamChunk(PVM pVM, RTGCPHYS GCPhys, uint32_t cRamPages,
1734 uint32_t cbChunk, uint32_t iChunk, const char *pszDesc,
1735 PPGMRAMRANGE *ppPrev)
1736{
1737 const char *pszDescChunk = iChunk == 0
1738 ? pszDesc
1739 : MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s (#%u)", pszDesc, iChunk + 1);
1740 AssertReturn(pszDescChunk, VERR_NO_MEMORY);
1741
1742 /*
1743 * Allocate memory for the new chunk.
1744 */
1745 size_t const cChunkPages = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cRamPages]), PAGE_SIZE) >> PAGE_SHIFT;
1746 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
1747 AssertReturn(paChunkPages, VERR_NO_TMP_MEMORY);
1748 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
1749 void *pvChunk = NULL;
1750 int rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages);
1751 if (RT_SUCCESS(rc))
1752 {
1753 Assert(R0PtrChunk != NIL_RTR0PTR);
1754 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
1755
1756 PPGMRAMRANGE pNew = (PPGMRAMRANGE)pvChunk;
1757
1758 /*
1759 * Create a mapping and map the pages into it.
1760 * We push these in below the HMA.
1761 */
1762 RTGCPTR GCPtrChunkMap = pVM->pgm.s.GCPtrPrevRamRangeMapping - cbChunk;
1763#ifndef PGM_WITHOUT_MAPPINGS
1764 rc = PGMR3MapPT(pVM, GCPtrChunkMap, cbChunk, 0 /*fFlags*/, pgmR3PhysRamRangeRelocate, pNew, pszDescChunk);
1765 if (RT_SUCCESS(rc))
1766#endif /* !PGM_WITHOUT_MAPPINGS */
1767 {
1768 pVM->pgm.s.GCPtrPrevRamRangeMapping = GCPtrChunkMap;
1769
1770 RTGCPTR const GCPtrChunk = GCPtrChunkMap + PAGE_SIZE;
1771#ifndef PGM_WITHOUT_MAPPINGS
1772 RTGCPTR GCPtrPage = GCPtrChunk;
1773 for (uint32_t iPage = 0; iPage < cChunkPages && RT_SUCCESS(rc); iPage++, GCPtrPage += PAGE_SIZE)
1774 rc = PGMMap(pVM, GCPtrPage, paChunkPages[iPage].Phys, PAGE_SIZE, 0);
1775 if (RT_SUCCESS(rc))
1776#endif /* !PGM_WITHOUT_MAPPINGS */
1777 {
1778 /*
1779 * Ok, init and link the range.
1780 */
1781 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhys + ((RTGCPHYS)cRamPages << PAGE_SHIFT) - 1,
1782 (RTRCPTR)GCPtrChunk, R0PtrChunk, pszDescChunk, *ppPrev);
1783 *ppPrev = pNew;
1784 }
1785 }
1786
1787 if (RT_FAILURE(rc))
1788 SUPR3PageFreeEx(pvChunk, cChunkPages);
1789 }
1790
1791 RTMemTmpFree(paChunkPages);
1792 return rc;
1793}
1794
1795
1796/**
1797 * Sets up a range RAM.
1798 *
1799 * This will check for conflicting registrations, make a resource
1800 * reservation for the memory (with GMM), and setup the per-page
1801 * tracking structures (PGMPAGE).
1802 *
1803 * @returns VBox status code.
1804 * @param pVM The cross context VM structure.
1805 * @param GCPhys The physical address of the RAM.
1806 * @param cb The size of the RAM.
1807 * @param pszDesc The description - not copied, so, don't free or change it.
1808 */
1809VMMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
1810{
1811 /*
1812 * Validate input.
1813 */
1814 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
1815 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
1816 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
1817 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
1818 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
1819 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
1820 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
1821 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
1822
1823 pgmLock(pVM);
1824
1825 /*
1826 * Find range location and check for conflicts.
1827 * (We don't lock here because the locking by EMT is only required on update.)
1828 */
1829 PPGMRAMRANGE pPrev = NULL;
1830 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
1831 while (pRam && GCPhysLast >= pRam->GCPhys)
1832 {
1833 if ( GCPhysLast >= pRam->GCPhys
1834 && GCPhys <= pRam->GCPhysLast)
1835 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
1836 GCPhys, GCPhysLast, pszDesc,
1837 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
1838 VERR_PGM_RAM_CONFLICT);
1839
1840 /* next */
1841 pPrev = pRam;
1842 pRam = pRam->pNextR3;
1843 }
1844
1845 /*
1846 * Register it with GMM (the API bitches).
1847 */
1848 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
1849 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
1850 if (RT_FAILURE(rc))
1851 {
1852 pgmUnlock(pVM);
1853 return rc;
1854 }
1855
1856 if ( GCPhys >= _4G
1857 && cPages > 256)
1858 {
1859 /*
1860 * The PGMRAMRANGE structures for the high memory can get very big.
1861 * In order to avoid SUPR3PageAllocEx allocation failures due to the
1862 * allocation size limit there and also to avoid being unable to find
1863 * guest mapping space for them, we split this memory up into 4MB in
1864 * (potential) raw-mode configs and 16MB chunks in forced AMD-V/VT-x
1865 * mode.
1866 *
1867 * The first and last page of each mapping are guard pages and marked
1868 * not-present. So, we've got 4186112 and 16769024 bytes available for
1869 * the PGMRAMRANGE structure.
1870 *
1871 * Note! The sizes used here will influence the saved state.
1872 */
1873 uint32_t cbChunk = 16U*_1M;
1874 uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */
1875 AssertCompile(sizeof(PGMRAMRANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
1876 AssertRelease(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
1877
1878 RTGCPHYS cPagesLeft = cPages;
1879 RTGCPHYS GCPhysChunk = GCPhys;
1880 uint32_t iChunk = 0;
1881 while (cPagesLeft > 0)
1882 {
1883 uint32_t cPagesInChunk = cPagesLeft;
1884 if (cPagesInChunk > cPagesPerChunk)
1885 cPagesInChunk = cPagesPerChunk;
1886
1887 rc = pgmR3PhysRegisterHighRamChunk(pVM, GCPhysChunk, cPagesInChunk, cbChunk, iChunk, pszDesc, &pPrev);
1888 AssertRCReturn(rc, rc);
1889
1890 /* advance */
1891 GCPhysChunk += (RTGCPHYS)cPagesInChunk << PAGE_SHIFT;
1892 cPagesLeft -= cPagesInChunk;
1893 iChunk++;
1894 }
1895 }
1896 else
1897 {
1898 /*
1899 * Allocate, initialize and link the new RAM range.
1900 */
1901 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
1902 PPGMRAMRANGE pNew;
1903 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
1904 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zu\n", cbRamRange), rc);
1905
1906 pgmR3PhysInitAndLinkRamRange(pVM, pNew, GCPhys, GCPhysLast, NIL_RTRCPTR, NIL_RTR0PTR, pszDesc, pPrev);
1907 }
1908 pgmPhysInvalidatePageMapTLB(pVM);
1909
1910 /*
1911 * Notify NEM while holding the lock (experimental) and REM without (like always).
1912 */
1913 rc = NEMR3NotifyPhysRamRegister(pVM, GCPhys, cb);
1914 pgmUnlock(pVM);
1915#ifdef VBOX_WITH_REM
1916 REMR3NotifyPhysRamRegister(pVM, GCPhys, cb, REM_NOTIFY_PHYS_RAM_FLAGS_RAM);
1917#endif
1918 return rc;
1919}
1920
1921
1922/**
1923 * Worker called by PGMR3InitFinalize if we're configured to pre-allocate RAM.
1924 *
1925 * We do this late in the init process so that all the ROM and MMIO ranges have
1926 * been registered already and we don't go wasting memory on them.
1927 *
1928 * @returns VBox status code.
1929 *
1930 * @param pVM The cross context VM structure.
1931 */
1932int pgmR3PhysRamPreAllocate(PVM pVM)
1933{
1934 Assert(pVM->pgm.s.fRamPreAlloc);
1935 Log(("pgmR3PhysRamPreAllocate: enter\n"));
1936
1937 /*
1938 * Walk the RAM ranges and allocate all RAM pages, halt at
1939 * the first allocation error.
1940 */
1941 uint64_t cPages = 0;
1942 uint64_t NanoTS = RTTimeNanoTS();
1943 pgmLock(pVM);
1944 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
1945 {
1946 PPGMPAGE pPage = &pRam->aPages[0];
1947 RTGCPHYS GCPhys = pRam->GCPhys;
1948 uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
1949 while (cLeft-- > 0)
1950 {
1951 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
1952 {
1953 switch (PGM_PAGE_GET_STATE(pPage))
1954 {
1955 case PGM_PAGE_STATE_ZERO:
1956 {
1957 int rc = pgmPhysAllocPage(pVM, pPage, GCPhys);
1958 if (RT_FAILURE(rc))
1959 {
1960 LogRel(("PGM: RAM Pre-allocation failed at %RGp (in %s) with rc=%Rrc\n", GCPhys, pRam->pszDesc, rc));
1961 pgmUnlock(pVM);
1962 return rc;
1963 }
1964 cPages++;
1965 break;
1966 }
1967
1968 case PGM_PAGE_STATE_BALLOONED:
1969 case PGM_PAGE_STATE_ALLOCATED:
1970 case PGM_PAGE_STATE_WRITE_MONITORED:
1971 case PGM_PAGE_STATE_SHARED:
1972 /* nothing to do here. */
1973 break;
1974 }
1975 }
1976
1977 /* next */
1978 pPage++;
1979 GCPhys += PAGE_SIZE;
1980 }
1981 }
1982 pgmUnlock(pVM);
1983 NanoTS = RTTimeNanoTS() - NanoTS;
1984
1985 LogRel(("PGM: Pre-allocated %llu pages in %llu ms\n", cPages, NanoTS / 1000000));
1986 Log(("pgmR3PhysRamPreAllocate: returns VINF_SUCCESS\n"));
1987 return VINF_SUCCESS;
1988}
1989
1990
1991/**
1992 * Checks shared page checksums.
1993 *
1994 * @param pVM The cross context VM structure.
1995 */
1996void pgmR3PhysAssertSharedPageChecksums(PVM pVM)
1997{
1998#ifdef VBOX_STRICT
1999 pgmLock(pVM);
2000
2001 if (pVM->pgm.s.cSharedPages > 0)
2002 {
2003 /*
2004 * Walk the ram ranges.
2005 */
2006 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2007 {
2008 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2009 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2010
2011 while (iPage-- > 0)
2012 {
2013 PPGMPAGE pPage = &pRam->aPages[iPage];
2014 if (PGM_PAGE_IS_SHARED(pPage))
2015 {
2016 uint32_t u32Checksum = pPage->s.u2Unused0/* | ((uint32_t)pPage->s.u2Unused1 << 8)*/;
2017 if (!u32Checksum)
2018 {
2019 RTGCPHYS GCPhysPage = pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
2020 void const *pvPage;
2021 int rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhysPage, &pvPage);
2022 if (RT_SUCCESS(rc))
2023 {
2024 uint32_t u32Checksum2 = RTCrc32(pvPage, PAGE_SIZE);
2025# if 0
2026 AssertMsg((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum, ("GCPhysPage=%RGp\n", GCPhysPage));
2027# else
2028 if ((u32Checksum2 & /*UINT32_C(0x00000303)*/ 0x3) == u32Checksum)
2029 LogFlow(("shpg %#x @ %RGp %#x [OK]\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
2030 else
2031 AssertMsgFailed(("shpg %#x @ %RGp %#x\n", PGM_PAGE_GET_PAGEID(pPage), GCPhysPage, u32Checksum2));
2032# endif
2033 }
2034 else
2035 AssertRC(rc);
2036 }
2037 }
2038
2039 } /* for each page */
2040
2041 } /* for each ram range */
2042 }
2043
2044 pgmUnlock(pVM);
2045#endif /* VBOX_STRICT */
2046 NOREF(pVM);
2047}
2048
2049
2050/**
2051 * Resets the physical memory state.
2052 *
2053 * ASSUMES that the caller owns the PGM lock.
2054 *
2055 * @returns VBox status code.
2056 * @param pVM The cross context VM structure.
2057 */
2058int pgmR3PhysRamReset(PVM pVM)
2059{
2060 PGM_LOCK_ASSERT_OWNER(pVM);
2061
2062 /* Reset the memory balloon. */
2063 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2064 AssertRC(rc);
2065
2066#ifdef VBOX_WITH_PAGE_SHARING
2067 /* Clear all registered shared modules. */
2068 pgmR3PhysAssertSharedPageChecksums(pVM);
2069 rc = GMMR3ResetSharedModules(pVM);
2070 AssertRC(rc);
2071#endif
2072 /* Reset counters. */
2073 pVM->pgm.s.cReusedSharedPages = 0;
2074 pVM->pgm.s.cBalloonedPages = 0;
2075
2076 return VINF_SUCCESS;
2077}
2078
2079
2080/**
2081 * Resets (zeros) the RAM after all devices and components have been reset.
2082 *
2083 * ASSUMES that the caller owns the PGM lock.
2084 *
2085 * @returns VBox status code.
2086 * @param pVM The cross context VM structure.
2087 */
2088int pgmR3PhysRamZeroAll(PVM pVM)
2089{
2090 PGM_LOCK_ASSERT_OWNER(pVM);
2091
2092 /*
2093 * We batch up pages that should be freed instead of calling GMM for
2094 * each and every one of them.
2095 */
2096 uint32_t cPendingPages = 0;
2097 PGMMFREEPAGESREQ pReq;
2098 int rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2099 AssertLogRelRCReturn(rc, rc);
2100
2101 /*
2102 * Walk the ram ranges.
2103 */
2104 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2105 {
2106 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2107 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2108
2109 if ( !pVM->pgm.s.fRamPreAlloc
2110 && pVM->pgm.s.fZeroRamPagesOnReset)
2111 {
2112 /* Replace all RAM pages by ZERO pages. */
2113 while (iPage-- > 0)
2114 {
2115 PPGMPAGE pPage = &pRam->aPages[iPage];
2116 switch (PGM_PAGE_GET_TYPE(pPage))
2117 {
2118 case PGMPAGETYPE_RAM:
2119 /* Do not replace pages part of a 2 MB continuous range
2120 with zero pages, but zero them instead. */
2121 if ( PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
2122 || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
2123 {
2124 void *pvPage;
2125 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
2126 AssertLogRelRCReturn(rc, rc);
2127 ASMMemZeroPage(pvPage);
2128 }
2129 else if (PGM_PAGE_IS_BALLOONED(pPage))
2130 {
2131 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2132 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2133 }
2134 else if (!PGM_PAGE_IS_ZERO(pPage))
2135 {
2136 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2137 PGMPAGETYPE_RAM);
2138 AssertLogRelRCReturn(rc, rc);
2139 }
2140 break;
2141
2142 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2143 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2144 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2145 true /*fDoAccounting*/);
2146 break;
2147
2148 case PGMPAGETYPE_MMIO2:
2149 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2150 case PGMPAGETYPE_ROM:
2151 case PGMPAGETYPE_MMIO:
2152 break;
2153 default:
2154 AssertFailed();
2155 }
2156 } /* for each page */
2157 }
2158 else
2159 {
2160 /* Zero the memory. */
2161 while (iPage-- > 0)
2162 {
2163 PPGMPAGE pPage = &pRam->aPages[iPage];
2164 switch (PGM_PAGE_GET_TYPE(pPage))
2165 {
2166 case PGMPAGETYPE_RAM:
2167 switch (PGM_PAGE_GET_STATE(pPage))
2168 {
2169 case PGM_PAGE_STATE_ZERO:
2170 break;
2171
2172 case PGM_PAGE_STATE_BALLOONED:
2173 /* Turn into a zero page; the balloon status is lost when the VM reboots. */
2174 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
2175 break;
2176
2177 case PGM_PAGE_STATE_SHARED:
2178 case PGM_PAGE_STATE_WRITE_MONITORED:
2179 rc = pgmPhysPageMakeWritable(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
2180 AssertLogRelRCReturn(rc, rc);
2181 RT_FALL_THRU();
2182
2183 case PGM_PAGE_STATE_ALLOCATED:
2184 if (pVM->pgm.s.fZeroRamPagesOnReset)
2185 {
2186 void *pvPage;
2187 rc = pgmPhysPageMap(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pvPage);
2188 AssertLogRelRCReturn(rc, rc);
2189 ASMMemZeroPage(pvPage);
2190 }
2191 break;
2192 }
2193 break;
2194
2195 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2196 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO: /** @todo perhaps leave the special page alone? I don't think VT-x copes with this code. */
2197 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2198 true /*fDoAccounting*/);
2199 break;
2200
2201 case PGMPAGETYPE_MMIO2:
2202 case PGMPAGETYPE_ROM_SHADOW:
2203 case PGMPAGETYPE_ROM:
2204 case PGMPAGETYPE_MMIO:
2205 break;
2206 default:
2207 AssertFailed();
2208
2209 }
2210 } /* for each page */
2211 }
2212
2213 }
2214
2215 /*
2216 * Finish off any pages pending freeing.
2217 */
2218 if (cPendingPages)
2219 {
2220 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2221 AssertLogRelRCReturn(rc, rc);
2222 }
2223 GMMR3FreePagesCleanup(pReq);
2224 return VINF_SUCCESS;
2225}
2226
2227
2228/**
2229 * Frees all RAM during VM termination
2230 *
2231 * ASSUMES that the caller owns the PGM lock.
2232 *
2233 * @returns VBox status code.
2234 * @param pVM The cross context VM structure.
2235 */
2236int pgmR3PhysRamTerm(PVM pVM)
2237{
2238 PGM_LOCK_ASSERT_OWNER(pVM);
2239
2240 /* Reset the memory balloon. */
2241 int rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_RESET, 0);
2242 AssertRC(rc);
2243
2244#ifdef VBOX_WITH_PAGE_SHARING
2245 /*
2246 * Clear all registered shared modules.
2247 */
2248 pgmR3PhysAssertSharedPageChecksums(pVM);
2249 rc = GMMR3ResetSharedModules(pVM);
2250 AssertRC(rc);
2251
2252 /*
2253 * Flush the handy pages updates to make sure no shared pages are hiding
2254 * in there. (No unlikely if the VM shuts down, apparently.)
2255 */
2256 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_FLUSH_HANDY_PAGES, 0, NULL);
2257#endif
2258
2259 /*
2260 * We batch up pages that should be freed instead of calling GMM for
2261 * each and every one of them.
2262 */
2263 uint32_t cPendingPages = 0;
2264 PGMMFREEPAGESREQ pReq;
2265 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
2266 AssertLogRelRCReturn(rc, rc);
2267
2268 /*
2269 * Walk the ram ranges.
2270 */
2271 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3)
2272 {
2273 uint32_t iPage = pRam->cb >> PAGE_SHIFT;
2274 AssertMsg(((RTGCPHYS)iPage << PAGE_SHIFT) == pRam->cb, ("%RGp %RGp\n", (RTGCPHYS)iPage << PAGE_SHIFT, pRam->cb));
2275
2276 while (iPage-- > 0)
2277 {
2278 PPGMPAGE pPage = &pRam->aPages[iPage];
2279 switch (PGM_PAGE_GET_TYPE(pPage))
2280 {
2281 case PGMPAGETYPE_RAM:
2282 /* Free all shared pages. Private pages are automatically freed during GMM VM cleanup. */
2283 /** @todo change this to explicitly free private pages here. */
2284 if (PGM_PAGE_IS_SHARED(pPage))
2285 {
2286 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT),
2287 PGMPAGETYPE_RAM);
2288 AssertLogRelRCReturn(rc, rc);
2289 }
2290 break;
2291
2292 case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
2293 case PGMPAGETYPE_SPECIAL_ALIAS_MMIO:
2294 case PGMPAGETYPE_MMIO2:
2295 case PGMPAGETYPE_ROM_SHADOW: /* handled by pgmR3PhysRomReset. */
2296 case PGMPAGETYPE_ROM:
2297 case PGMPAGETYPE_MMIO:
2298 break;
2299 default:
2300 AssertFailed();
2301 }
2302 } /* for each page */
2303 }
2304
2305 /*
2306 * Finish off any pages pending freeing.
2307 */
2308 if (cPendingPages)
2309 {
2310 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
2311 AssertLogRelRCReturn(rc, rc);
2312 }
2313 GMMR3FreePagesCleanup(pReq);
2314 return VINF_SUCCESS;
2315}
2316
2317
2318/**
2319 * This is the interface IOM is using to register an MMIO region.
2320 *
2321 * It will check for conflicts and ensure that a RAM range structure
2322 * is present before calling the PGMR3HandlerPhysicalRegister API to
2323 * register the callbacks.
2324 *
2325 * @returns VBox status code.
2326 *
2327 * @param pVM The cross context VM structure.
2328 * @param GCPhys The start of the MMIO region.
2329 * @param cb The size of the MMIO region.
2330 * @param hType The physical access handler type registration.
2331 * @param pvUserR3 The user argument for R3.
2332 * @param pvUserR0 The user argument for R0.
2333 * @param pvUserRC The user argument for RC.
2334 * @param pszDesc The description of the MMIO region.
2335 */
2336VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMPHYSHANDLERTYPE hType,
2337 RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC, const char *pszDesc)
2338{
2339 /*
2340 * Assert on some assumption.
2341 */
2342 VM_ASSERT_EMT(pVM);
2343 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2344 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2345 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2346 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2347 Assert(((PPGMPHYSHANDLERTYPEINT)MMHyperHeapOffsetToPtr(pVM, hType))->enmKind == PGMPHYSHANDLERKIND_MMIO);
2348
2349 int rc = pgmLock(pVM);
2350 AssertRCReturn(rc, rc);
2351
2352 /*
2353 * Make sure there's a RAM range structure for the region.
2354 */
2355 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2356 bool fRamExists = false;
2357 PPGMRAMRANGE pRamPrev = NULL;
2358 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2359 while (pRam && GCPhysLast >= pRam->GCPhys)
2360 {
2361 if ( GCPhysLast >= pRam->GCPhys
2362 && GCPhys <= pRam->GCPhysLast)
2363 {
2364 /* Simplification: all within the same range. */
2365 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
2366 && GCPhysLast <= pRam->GCPhysLast,
2367 ("%RGp-%RGp (MMIO/%s) falls partly outside %RGp-%RGp (%s)\n",
2368 GCPhys, GCPhysLast, pszDesc,
2369 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
2370 pgmUnlock(pVM),
2371 VERR_PGM_RAM_CONFLICT);
2372
2373 /* Check that it's all RAM or MMIO pages. */
2374 PCPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
2375 uint32_t cLeft = cb >> PAGE_SHIFT;
2376 while (cLeft-- > 0)
2377 {
2378 AssertLogRelMsgReturnStmt( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2379 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO,
2380 ("%RGp-%RGp (MMIO/%s): %RGp is not a RAM or MMIO page - type=%d desc=%s\n",
2381 GCPhys, GCPhysLast, pszDesc, pRam->GCPhys, PGM_PAGE_GET_TYPE(pPage), pRam->pszDesc),
2382 pgmUnlock(pVM),
2383 VERR_PGM_RAM_CONFLICT);
2384 pPage++;
2385 }
2386
2387 /* Looks good. */
2388 fRamExists = true;
2389 break;
2390 }
2391
2392 /* next */
2393 pRamPrev = pRam;
2394 pRam = pRam->pNextR3;
2395 }
2396 PPGMRAMRANGE pNew;
2397 if (fRamExists)
2398 {
2399 pNew = NULL;
2400
2401 /*
2402 * Make all the pages in the range MMIO/ZERO pages, freeing any
2403 * RAM pages currently mapped here. This might not be 100% correct
2404 * for PCI memory, but we're doing the same thing for MMIO2 pages.
2405 */
2406 rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
2407 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
2408
2409 /* Force a PGM pool flush as guest ram references have been changed. */
2410 /** @todo not entirely SMP safe; assuming for now the guest takes
2411 * care of this internally (not touch mapped mmio while changing the
2412 * mapping). */
2413 PVMCPU pVCpu = VMMGetCpu(pVM);
2414 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2415 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2416 }
2417 else
2418 {
2419
2420 /*
2421 * No RAM range, insert an ad hoc one.
2422 *
2423 * Note that we don't have to tell REM about this range because
2424 * PGMHandlerPhysicalRegisterEx will do that for us.
2425 */
2426 Log(("PGMR3PhysMMIORegister: Adding ad hoc MMIO range for %RGp-%RGp %s\n", GCPhys, GCPhysLast, pszDesc));
2427
2428 const uint32_t cPages = cb >> PAGE_SHIFT;
2429 const size_t cbRamRange = RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]);
2430 rc = MMHyperAlloc(pVM, RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), 16, MM_TAG_PGM_PHYS, (void **)&pNew);
2431 AssertLogRelMsgRCReturnStmt(rc, ("cbRamRange=%zu\n", cbRamRange), pgmUnlock(pVM), rc);
2432
2433 /* Initialize the range. */
2434 pNew->pSelfR0 = MMHyperCCToR0(pVM, pNew);
2435 pNew->GCPhys = GCPhys;
2436 pNew->GCPhysLast = GCPhysLast;
2437 pNew->cb = cb;
2438 pNew->pszDesc = pszDesc;
2439 pNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO;
2440 pNew->pvR3 = NULL;
2441 pNew->paLSPages = NULL;
2442
2443 uint32_t iPage = cPages;
2444 while (iPage-- > 0)
2445 PGM_PAGE_INIT_ZERO(&pNew->aPages[iPage], pVM, PGMPAGETYPE_MMIO);
2446 Assert(PGM_PAGE_GET_TYPE(&pNew->aPages[0]) == PGMPAGETYPE_MMIO);
2447
2448 /* update the page count stats. */
2449 pVM->pgm.s.cPureMmioPages += cPages;
2450 pVM->pgm.s.cAllPages += cPages;
2451
2452 /* link it */
2453 pgmR3PhysLinkRamRange(pVM, pNew, pRamPrev);
2454 }
2455
2456 /*
2457 * Register the access handler.
2458 */
2459 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc);
2460 if ( RT_FAILURE(rc)
2461 && !fRamExists)
2462 {
2463 pVM->pgm.s.cPureMmioPages -= cb >> PAGE_SHIFT;
2464 pVM->pgm.s.cAllPages -= cb >> PAGE_SHIFT;
2465
2466 /* remove the ad hoc range. */
2467 pgmR3PhysUnlinkRamRange2(pVM, pNew, pRamPrev);
2468 pNew->cb = pNew->GCPhys = pNew->GCPhysLast = NIL_RTGCPHYS;
2469 MMHyperFree(pVM, pRam);
2470 }
2471 pgmPhysInvalidatePageMapTLB(pVM);
2472
2473 pgmUnlock(pVM);
2474 return rc;
2475}
2476
2477
2478/**
2479 * This is the interface IOM is using to register an MMIO region.
2480 *
2481 * It will take care of calling PGMHandlerPhysicalDeregister and clean up
2482 * any ad hoc PGMRAMRANGE left behind.
2483 *
2484 * @returns VBox status code.
2485 * @param pVM The cross context VM structure.
2486 * @param GCPhys The start of the MMIO region.
2487 * @param cb The size of the MMIO region.
2488 */
2489VMMR3DECL(int) PGMR3PhysMMIODeregister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb)
2490{
2491 VM_ASSERT_EMT(pVM);
2492
2493 int rc = pgmLock(pVM);
2494 AssertRCReturn(rc, rc);
2495
2496 /*
2497 * First deregister the handler, then check if we should remove the ram range.
2498 */
2499 rc = PGMHandlerPhysicalDeregister(pVM, GCPhys);
2500 if (RT_SUCCESS(rc))
2501 {
2502 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
2503 PPGMRAMRANGE pRamPrev = NULL;
2504 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
2505 while (pRam && GCPhysLast >= pRam->GCPhys)
2506 {
2507 /** @todo We're being a bit too careful here. rewrite. */
2508 if ( GCPhysLast == pRam->GCPhysLast
2509 && GCPhys == pRam->GCPhys)
2510 {
2511 Assert(pRam->cb == cb);
2512
2513 /*
2514 * See if all the pages are dead MMIO pages.
2515 */
2516 uint32_t const cPages = cb >> PAGE_SHIFT;
2517 bool fAllMMIO = true;
2518 uint32_t iPage = 0;
2519 uint32_t cLeft = cPages;
2520 while (cLeft-- > 0)
2521 {
2522 PPGMPAGE pPage = &pRam->aPages[iPage];
2523 if ( !PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)
2524 /*|| not-out-of-action later */)
2525 {
2526 fAllMMIO = false;
2527 AssertMsgFailed(("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2528 break;
2529 }
2530 Assert( PGM_PAGE_IS_ZERO(pPage)
2531 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2532 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
2533 pPage++;
2534 }
2535 if (fAllMMIO)
2536 {
2537 /*
2538 * Ad-hoc range, unlink and free it.
2539 */
2540 Log(("PGMR3PhysMMIODeregister: Freeing ad hoc MMIO range for %RGp-%RGp %s\n",
2541 GCPhys, GCPhysLast, pRam->pszDesc));
2542
2543 pVM->pgm.s.cAllPages -= cPages;
2544 pVM->pgm.s.cPureMmioPages -= cPages;
2545
2546 pgmR3PhysUnlinkRamRange2(pVM, pRam, pRamPrev);
2547 pRam->cb = pRam->GCPhys = pRam->GCPhysLast = NIL_RTGCPHYS;
2548 MMHyperFree(pVM, pRam);
2549 break;
2550 }
2551 }
2552
2553 /*
2554 * Range match? It will all be within one range (see PGMAllHandler.cpp).
2555 */
2556 if ( GCPhysLast >= pRam->GCPhys
2557 && GCPhys <= pRam->GCPhysLast)
2558 {
2559 Assert(GCPhys >= pRam->GCPhys);
2560 Assert(GCPhysLast <= pRam->GCPhysLast);
2561
2562 /*
2563 * Turn the pages back into RAM pages.
2564 */
2565 uint32_t iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
2566 uint32_t cLeft = cb >> PAGE_SHIFT;
2567 while (cLeft--)
2568 {
2569 PPGMPAGE pPage = &pRam->aPages[iPage];
2570 AssertMsg( (PGM_PAGE_IS_MMIO(pPage) && PGM_PAGE_IS_ZERO(pPage))
2571 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
2572 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
2573 ("%RGp %R[pgmpage]\n", pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), pPage));
2574 if (PGM_PAGE_IS_MMIO_OR_ALIAS(pPage))
2575 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_RAM);
2576 }
2577 break;
2578 }
2579
2580 /* next */
2581 pRamPrev = pRam;
2582 pRam = pRam->pNextR3;
2583 }
2584 }
2585
2586 /* Force a PGM pool flush as guest ram references have been changed. */
2587 /** @todo Not entirely SMP safe; assuming for now the guest takes care of
2588 * this internally (not touch mapped mmio while changing the mapping). */
2589 PVMCPU pVCpu = VMMGetCpu(pVM);
2590 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
2591 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2592
2593 pgmPhysInvalidatePageMapTLB(pVM);
2594 pgmPhysInvalidRamRangeTlbs(pVM);
2595 pgmUnlock(pVM);
2596 return rc;
2597}
2598
2599
2600/**
2601 * Locate a MMIO2 range.
2602 *
2603 * @returns Pointer to the MMIO2 range.
2604 * @param pVM The cross context VM structure.
2605 * @param pDevIns The device instance owning the region.
2606 * @param iSubDev The sub-device number.
2607 * @param iRegion The region.
2608 */
2609DECLINLINE(PPGMREGMMIORANGE) pgmR3PhysMMIOExFind(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion)
2610{
2611 /*
2612 * Search the list. There shouldn't be many entries.
2613 */
2614 /** @todo Optimize this lookup! There may now be many entries and it'll
2615 * become really slow when doing MMR3HyperMapMMIO2 and similar. */
2616 for (PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3; pCur; pCur = pCur->pNextR3)
2617 if ( pCur->pDevInsR3 == pDevIns
2618 && pCur->iRegion == iRegion
2619 && pCur->iSubDev == iSubDev)
2620 return pCur;
2621 return NULL;
2622}
2623
2624
2625#ifndef PGM_WITHOUT_MAPPINGS
2626/**
2627 * @callback_method_impl{FNPGMRELOCATE, Relocate a floating MMIO/MMIO2 range.}
2628 * @sa pgmR3PhysRamRangeRelocate
2629 */
2630static DECLCALLBACK(bool) pgmR3PhysMMIOExRangeRelocate(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
2631 PGMRELOCATECALL enmMode, void *pvUser)
2632{
2633 PPGMREGMMIORANGE pMmio = (PPGMREGMMIORANGE)pvUser;
2634 Assert(pMmio->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING);
2635 Assert(pMmio->RamRange.pSelfRC == GCPtrOld + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange)); RT_NOREF_PV(GCPtrOld);
2636
2637 switch (enmMode)
2638 {
2639 case PGMRELOCATECALL_SUGGEST:
2640 return true;
2641
2642 case PGMRELOCATECALL_RELOCATE:
2643 {
2644 /*
2645 * Update myself, then relink all the ranges and flush the RC TLB.
2646 */
2647 pgmLock(pVM);
2648
2649 pMmio->RamRange.pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange));
2650
2651 pgmR3PhysRelinkRamRanges(pVM);
2652 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
2653 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
2654
2655 pgmUnlock(pVM);
2656 return true;
2657 }
2658
2659 default:
2660 AssertFailedReturn(false);
2661 }
2662}
2663#endif /* !PGM_WITHOUT_MAPPINGS */
2664
2665
2666/**
2667 * Calculates the number of chunks
2668 *
2669 * @returns Number of registration chunk needed.
2670 * @param pVM The cross context VM structure.
2671 * @param cb The size of the MMIO/MMIO2 range.
2672 * @param pcPagesPerChunk Where to return the number of pages tracked by each
2673 * chunk. Optional.
2674 * @param pcbChunk Where to return the guest mapping size for a chunk.
2675 */
2676static uint16_t pgmR3PhysMMIOExCalcChunkCount(PVM pVM, RTGCPHYS cb, uint32_t *pcPagesPerChunk, uint32_t *pcbChunk)
2677{
2678 RT_NOREF_PV(pVM); /* without raw mode */
2679
2680 /*
2681 * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be
2682 * needing a few bytes extra the PGMREGMMIORANGE structure.
2683 *
2684 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
2685 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
2686 */
2687 uint32_t cbChunk = 16U*_1M;
2688 uint32_t cPagesPerChunk = 1048048; /* max ~1048059 */
2689 AssertCompile(sizeof(PGMREGMMIORANGE) + sizeof(PGMPAGE) * 1048048 < 16U*_1M - PAGE_SIZE * 2);
2690 AssertRelease(cPagesPerChunk <= PGM_MMIO2_MAX_PAGE_COUNT); /* See above note. */
2691 AssertRelease(RT_UOFFSETOF_DYN(PGMREGMMIORANGE, RamRange.aPages[cPagesPerChunk]) + PAGE_SIZE * 2 <= cbChunk);
2692 if (pcbChunk)
2693 *pcbChunk = cbChunk;
2694 if (pcPagesPerChunk)
2695 *pcPagesPerChunk = cPagesPerChunk;
2696
2697 /* Calc the number of chunks we need. */
2698 RTGCPHYS const cPages = cb >> X86_PAGE_SHIFT;
2699 uint16_t cChunks = (uint16_t)((cPages + cPagesPerChunk - 1) / cPagesPerChunk);
2700 AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cPages);
2701 return cChunks;
2702}
2703
2704
2705/**
2706 * Worker for PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that allocates
2707 * and the PGMREGMMIORANGE structures and does basic initialization.
2708 *
2709 * Caller must set type specfic members and initialize the PGMPAGE structures.
2710 *
2711 * @returns VBox status code.
2712 * @param pVM The cross context VM structure.
2713 * @param pDevIns The device instance owning the region.
2714 * @param iSubDev The sub-device number (internal PCI config number).
2715 * @param iRegion The region number. If the MMIO2 memory is a PCI
2716 * I/O region this number has to be the number of that
2717 * region. Otherwise it can be any number safe
2718 * UINT8_MAX.
2719 * @param cb The size of the region. Must be page aligned.
2720 * @param pszDesc The description.
2721 * @param ppHeadRet Where to return the pointer to the first
2722 * registration chunk.
2723 *
2724 * @thread EMT
2725 */
2726static int pgmR3PhysMMIOExCreate(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
2727 const char *pszDesc, PPGMREGMMIORANGE *ppHeadRet)
2728{
2729 /*
2730 * Figure out how many chunks we need and of which size.
2731 */
2732 uint32_t cPagesPerChunk;
2733 uint16_t cChunks = pgmR3PhysMMIOExCalcChunkCount(pVM, cb, &cPagesPerChunk, NULL);
2734 AssertReturn(cChunks, VERR_PGM_PHYS_MMIO_EX_IPE);
2735
2736 /*
2737 * Allocate the chunks.
2738 */
2739 PPGMREGMMIORANGE *ppNext = ppHeadRet;
2740 *ppNext = NULL;
2741
2742 int rc = VINF_SUCCESS;
2743 uint32_t cPagesLeft = cb >> X86_PAGE_SHIFT;
2744 for (uint16_t iChunk = 0; iChunk < cChunks && RT_SUCCESS(rc); iChunk++)
2745 {
2746 /*
2747 * We currently do a single RAM range for the whole thing. This will
2748 * probably have to change once someone needs really large MMIO regions,
2749 * as we will be running into SUPR3PageAllocEx limitations and such.
2750 */
2751 const uint32_t cPagesTrackedByChunk = RT_MIN(cPagesLeft, cPagesPerChunk);
2752 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIORANGE, RamRange.aPages[cPagesTrackedByChunk]);
2753 PPGMREGMMIORANGE pNew = NULL;
2754 if ( iChunk + 1 < cChunks
2755 || cbRange >= _1M)
2756 {
2757 /*
2758 * Allocate memory for the registration structure.
2759 */
2760 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
2761 size_t const cbChunk = (1 + cChunkPages + 1) << PAGE_SHIFT;
2762 AssertLogRelBreakStmt(cbChunk == (uint32_t)cbChunk, rc = VERR_OUT_OF_RANGE);
2763 PSUPPAGE paChunkPages = (PSUPPAGE)RTMemTmpAllocZ(sizeof(SUPPAGE) * cChunkPages);
2764 AssertBreakStmt(paChunkPages, rc = VERR_NO_TMP_MEMORY);
2765 RTR0PTR R0PtrChunk = NIL_RTR0PTR;
2766 void *pvChunk = NULL;
2767 rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages);
2768 AssertLogRelMsgRCBreakStmt(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages), RTMemTmpFree(paChunkPages));
2769
2770 Assert(R0PtrChunk != NIL_RTR0PTR);
2771 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT);
2772
2773 pNew = (PPGMREGMMIORANGE)pvChunk;
2774 pNew->RamRange.fFlags = PGM_RAM_RANGE_FLAGS_FLOATING;
2775 pNew->RamRange.pSelfR0 = R0PtrChunk + RT_UOFFSETOF(PGMREGMMIORANGE, RamRange);
2776
2777 RTMemTmpFree(paChunkPages);
2778 }
2779 /*
2780 * Not so big, do a one time hyper allocation.
2781 */
2782 else
2783 {
2784 rc = MMR3HyperAllocOnceNoRel(pVM, cbRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
2785 AssertLogRelMsgRCBreak(rc, ("cbRange=%zu\n", cbRange));
2786
2787 /*
2788 * Initialize allocation specific items.
2789 */
2790 //pNew->RamRange.fFlags = 0;
2791 pNew->RamRange.pSelfR0 = MMHyperCCToR0(pVM, &pNew->RamRange);
2792 }
2793
2794 /*
2795 * Initialize the registration structure (caller does specific bits).
2796 */
2797 pNew->pDevInsR3 = pDevIns;
2798 //pNew->pvR3 = NULL;
2799 //pNew->pNext = NULL;
2800 //pNew->fFlags = 0;
2801 if (iChunk == 0)
2802 pNew->fFlags |= PGMREGMMIORANGE_F_FIRST_CHUNK;
2803 if (iChunk + 1 == cChunks)
2804 pNew->fFlags |= PGMREGMMIORANGE_F_LAST_CHUNK;
2805 pNew->iSubDev = iSubDev;
2806 pNew->iRegion = iRegion;
2807 pNew->idSavedState = UINT8_MAX;
2808 pNew->idMmio2 = UINT8_MAX;
2809 //pNew->pPhysHandlerR3 = NULL;
2810 //pNew->paLSPages = NULL;
2811 pNew->RamRange.GCPhys = NIL_RTGCPHYS;
2812 pNew->RamRange.GCPhysLast = NIL_RTGCPHYS;
2813 pNew->RamRange.pszDesc = pszDesc;
2814 pNew->RamRange.cb = pNew->cbReal = (RTGCPHYS)cPagesTrackedByChunk << X86_PAGE_SHIFT;
2815 pNew->RamRange.fFlags |= PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX;
2816 //pNew->RamRange.pvR3 = NULL;
2817 //pNew->RamRange.paLSPages = NULL;
2818
2819 *ppNext = pNew;
2820 ASMCompilerBarrier();
2821 cPagesLeft -= cPagesTrackedByChunk;
2822 ppNext = &pNew->pNextR3;
2823 }
2824 Assert(cPagesLeft == 0);
2825
2826 if (RT_SUCCESS(rc))
2827 {
2828 Assert((*ppHeadRet)->fFlags & PGMREGMMIORANGE_F_FIRST_CHUNK);
2829 return VINF_SUCCESS;
2830 }
2831
2832 /*
2833 * Free floating ranges.
2834 */
2835 while (*ppHeadRet)
2836 {
2837 PPGMREGMMIORANGE pFree = *ppHeadRet;
2838 *ppHeadRet = pFree->pNextR3;
2839
2840 if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
2841 {
2842 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIORANGE, RamRange.aPages[pFree->RamRange.cb >> X86_PAGE_SHIFT]);
2843 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
2844 SUPR3PageFreeEx(pFree, cChunkPages);
2845 }
2846 }
2847
2848 return rc;
2849}
2850
2851
2852/**
2853 * Common worker PGMR3PhysMMIOExPreRegister & PGMR3PhysMMIO2Register that links
2854 * a complete registration entry into the lists and lookup tables.
2855 *
2856 * @param pVM The cross context VM structure.
2857 * @param pNew The new MMIO / MMIO2 registration to link.
2858 */
2859static void pgmR3PhysMMIOExLink(PVM pVM, PPGMREGMMIORANGE pNew)
2860{
2861 /*
2862 * Link it into the list (order doesn't matter, so insert it at the head).
2863 *
2864 * Note! The range we're link may consist of multiple chunks, so we have to
2865 * find the last one.
2866 */
2867 PPGMREGMMIORANGE pLast = pNew;
2868 for (pLast = pNew; ; pLast = pLast->pNextR3)
2869 {
2870 if (pLast->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
2871 break;
2872 Assert(pLast->pNextR3);
2873 Assert(pLast->pNextR3->pDevInsR3 == pNew->pDevInsR3);
2874 Assert(pLast->pNextR3->iSubDev == pNew->iSubDev);
2875 Assert(pLast->pNextR3->iRegion == pNew->iRegion);
2876 Assert((pLast->pNextR3->fFlags & PGMREGMMIORANGE_F_MMIO2) == (pNew->fFlags & PGMREGMMIORANGE_F_MMIO2));
2877 Assert(pLast->pNextR3->idMmio2 == (pLast->fFlags & PGMREGMMIORANGE_F_MMIO2 ? pNew->idMmio2 + 1 : UINT8_MAX));
2878 }
2879
2880 pgmLock(pVM);
2881
2882 /* Link in the chain of ranges at the head of the list. */
2883 pLast->pNextR3 = pVM->pgm.s.pRegMmioRangesR3;
2884 pVM->pgm.s.pRegMmioRangesR3 = pNew;
2885
2886 /* If MMIO, insert the MMIO2 range/page IDs. */
2887 uint8_t idMmio2 = pNew->idMmio2;
2888 if (idMmio2 != UINT8_MAX)
2889 {
2890 for (;;)
2891 {
2892 Assert(pNew->fFlags & PGMREGMMIORANGE_F_MMIO2);
2893 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == NULL);
2894 Assert(pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] == NIL_RTR0PTR);
2895 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = pNew;
2896 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = pNew->RamRange.pSelfR0 - RT_UOFFSETOF(PGMREGMMIORANGE, RamRange);
2897 if (pNew->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
2898 break;
2899 pNew = pNew->pNextR3;
2900 }
2901 }
2902 else
2903 Assert(!(pNew->fFlags & PGMREGMMIORANGE_F_MMIO2));
2904
2905 pgmPhysInvalidatePageMapTLB(pVM);
2906 pgmUnlock(pVM);
2907}
2908
2909
2910/**
2911 * Allocate and pre-register an MMIO region.
2912 *
2913 * This is currently the way to deal with large MMIO regions. It may in the
2914 * future be extended to be the way we deal with all MMIO regions, but that
2915 * means we'll have to do something about the simple list based approach we take
2916 * to tracking the registrations.
2917 *
2918 * @returns VBox status code.
2919 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
2920 * memory.
2921 * @retval VERR_ALREADY_EXISTS if the region already exists.
2922 *
2923 * @param pVM The cross context VM structure.
2924 * @param pDevIns The device instance owning the region.
2925 * @param iSubDev The sub-device number.
2926 * @param iRegion The region number. If the MMIO2 memory is a PCI
2927 * I/O region this number has to be the number of that
2928 * region. Otherwise it can be any number safe
2929 * UINT8_MAX.
2930 * @param cbRegion The size of the region. Must be page aligned.
2931 * @param hType The physical handler callback type.
2932 * @param pvUserR3 User parameter for ring-3 context callbacks.
2933 * @param pvUserR0 User parameter for ring-0 context callbacks.
2934 * @param pvUserRC User parameter for raw-mode context callbacks.
2935 * @param pszDesc The description.
2936 *
2937 * @thread EMT
2938 *
2939 * @sa PGMR3PhysMMIORegister, PGMR3PhysMMIO2Register,
2940 * PGMR3PhysMMIOExMap, PGMR3PhysMMIOExUnmap, PGMR3PhysMMIOExDeregister.
2941 */
2942VMMR3DECL(int) PGMR3PhysMMIOExPreRegister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cbRegion,
2943 PGMPHYSHANDLERTYPE hType, RTR3PTR pvUserR3, RTR0PTR pvUserR0, RTRCPTR pvUserRC,
2944 const char *pszDesc)
2945{
2946 /*
2947 * Validate input.
2948 */
2949 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
2950 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
2951 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
2952 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
2953 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
2954 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
2955 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion) == NULL, VERR_ALREADY_EXISTS);
2956 AssertReturn(!(cbRegion & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2957 AssertReturn(cbRegion, VERR_INVALID_PARAMETER);
2958
2959 const uint32_t cPages = cbRegion >> PAGE_SHIFT;
2960 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cbRegion, VERR_INVALID_PARAMETER);
2961 AssertLogRelReturn(cPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE);
2962
2963 /*
2964 * For the 2nd+ instance, mangle the description string so it's unique.
2965 */
2966 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
2967 {
2968 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
2969 if (!pszDesc)
2970 return VERR_NO_MEMORY;
2971 }
2972
2973 /*
2974 * Register the MMIO callbacks.
2975 */
2976 PPGMPHYSHANDLER pPhysHandler;
2977 int rc = pgmHandlerPhysicalExCreate(pVM, hType, pvUserR3, pvUserR0, pvUserRC, pszDesc, &pPhysHandler);
2978 if (RT_SUCCESS(rc))
2979 {
2980 /*
2981 * Create the registered MMIO range record for it.
2982 */
2983 PPGMREGMMIORANGE pNew;
2984 rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iSubDev, iRegion, cbRegion, pszDesc, &pNew);
2985 if (RT_SUCCESS(rc))
2986 {
2987 Assert(!(pNew->fFlags & PGMREGMMIORANGE_F_MMIO2));
2988
2989 /*
2990 * Intialize the page structures and set up physical handlers (one for each chunk).
2991 */
2992 for (PPGMREGMMIORANGE pCur = pNew; pCur != NULL && RT_SUCCESS(rc); pCur = pCur->pNextR3)
2993 {
2994 if (pCur == pNew)
2995 pCur->pPhysHandlerR3 = pPhysHandler;
2996 else
2997 rc = pgmHandlerPhysicalExDup(pVM, pPhysHandler, &pCur->pPhysHandlerR3);
2998
2999 uint32_t iPage = pCur->RamRange.cb >> X86_PAGE_SHIFT;
3000 while (iPage-- > 0)
3001 PGM_PAGE_INIT_ZERO(&pCur->RamRange.aPages[iPage], pVM, PGMPAGETYPE_MMIO);
3002 }
3003 if (RT_SUCCESS(rc))
3004 {
3005 /*
3006 * Update the page count stats, link the registration and we're done.
3007 */
3008 pVM->pgm.s.cAllPages += cPages;
3009 pVM->pgm.s.cPureMmioPages += cPages;
3010
3011 pgmR3PhysMMIOExLink(pVM, pNew);
3012 return VINF_SUCCESS;
3013 }
3014
3015 /*
3016 * Clean up in case we're out of memory for extra access handlers.
3017 */
3018 while (pNew != NULL)
3019 {
3020 PPGMREGMMIORANGE pFree = pNew;
3021 pNew = pFree->pNextR3;
3022
3023 if (pFree->pPhysHandlerR3)
3024 {
3025 pgmHandlerPhysicalExDestroy(pVM, pFree->pPhysHandlerR3);
3026 pFree->pPhysHandlerR3 = NULL;
3027 }
3028
3029 if (pFree->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
3030 {
3031 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIORANGE, RamRange.aPages[pFree->RamRange.cb >> X86_PAGE_SHIFT]);
3032 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
3033 SUPR3PageFreeEx(pFree, cChunkPages);
3034 }
3035 }
3036 }
3037 else
3038 pgmHandlerPhysicalExDestroy(pVM, pPhysHandler);
3039 }
3040 return rc;
3041}
3042
3043
3044/**
3045 * Allocate and register an MMIO2 region.
3046 *
3047 * As mentioned elsewhere, MMIO2 is just RAM spelled differently. It's RAM
3048 * associated with a device. It is also non-shared memory with a permanent
3049 * ring-3 mapping and page backing (presently).
3050 *
3051 * A MMIO2 range may overlap with base memory if a lot of RAM is configured for
3052 * the VM, in which case we'll drop the base memory pages. Presently we will
3053 * make no attempt to preserve anything that happens to be present in the base
3054 * memory that is replaced, this is of course incorrect but it's too much
3055 * effort.
3056 *
3057 * @returns VBox status code.
3058 * @retval VINF_SUCCESS on success, *ppv pointing to the R3 mapping of the
3059 * memory.
3060 * @retval VERR_ALREADY_EXISTS if the region already exists.
3061 *
3062 * @param pVM The cross context VM structure.
3063 * @param pDevIns The device instance owning the region.
3064 * @param iSubDev The sub-device number.
3065 * @param iRegion The region number. If the MMIO2 memory is a PCI
3066 * I/O region this number has to be the number of that
3067 * region. Otherwise it can be any number safe
3068 * UINT8_MAX.
3069 * @param cb The size of the region. Must be page aligned.
3070 * @param fFlags Reserved for future use, must be zero.
3071 * @param ppv Where to store the pointer to the ring-3 mapping of
3072 * the memory.
3073 * @param pszDesc The description.
3074 * @thread EMT
3075 */
3076VMMR3DECL(int) PGMR3PhysMMIO2Register(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cb,
3077 uint32_t fFlags, void **ppv, const char *pszDesc)
3078{
3079 /*
3080 * Validate input.
3081 */
3082 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3083 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3084 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3085 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3086 AssertPtrReturn(ppv, VERR_INVALID_POINTER);
3087 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
3088 AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
3089 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion) == NULL, VERR_ALREADY_EXISTS);
3090 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3091 AssertReturn(cb, VERR_INVALID_PARAMETER);
3092 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
3093
3094 const uint32_t cPages = cb >> PAGE_SHIFT;
3095 AssertLogRelReturn(((RTGCPHYS)cPages << PAGE_SHIFT) == cb, VERR_INVALID_PARAMETER);
3096 AssertLogRelReturn(cPages <= (MM_MMIO_64_MAX >> X86_PAGE_SHIFT), VERR_OUT_OF_RANGE);
3097
3098 /*
3099 * For the 2nd+ instance, mangle the description string so it's unique.
3100 */
3101 if (pDevIns->iInstance > 0) /** @todo Move to PDMDevHlp.cpp and use a real string cache. */
3102 {
3103 pszDesc = MMR3HeapAPrintf(pVM, MM_TAG_PGM_PHYS, "%s [%u]", pszDesc, pDevIns->iInstance);
3104 if (!pszDesc)
3105 return VERR_NO_MEMORY;
3106 }
3107
3108 /*
3109 * Allocate an MMIO2 range ID (not freed on failure).
3110 *
3111 * The zero ID is not used as it could be confused with NIL_GMM_PAGEID, so
3112 * the IDs goes from 1 thru PGM_MMIO2_MAX_RANGES.
3113 */
3114 unsigned cChunks = pgmR3PhysMMIOExCalcChunkCount(pVM, cb, NULL, NULL);
3115 pgmLock(pVM);
3116 uint8_t idMmio2 = pVM->pgm.s.cMmio2Regions + 1;
3117 unsigned cNewMmio2Regions = pVM->pgm.s.cMmio2Regions + cChunks;
3118 if (cNewMmio2Regions > PGM_MMIO2_MAX_RANGES)
3119 {
3120 pgmUnlock(pVM);
3121 AssertLogRelFailedReturn(VERR_PGM_TOO_MANY_MMIO2_RANGES);
3122 }
3123 pVM->pgm.s.cMmio2Regions = cNewMmio2Regions;
3124 pgmUnlock(pVM);
3125
3126 /*
3127 * Try reserve and allocate the backing memory first as this is what is
3128 * most likely to fail.
3129 */
3130 int rc = MMR3AdjustFixedReservation(pVM, cPages, pszDesc);
3131 if (RT_SUCCESS(rc))
3132 {
3133 PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(SUPPAGE));
3134 if (RT_SUCCESS(rc))
3135 {
3136 void *pvPages;
3137 rc = SUPR3PageAllocEx(cPages, 0 /*fFlags*/, &pvPages, NULL /*pR0Ptr*/, paPages);
3138 if (RT_SUCCESS(rc))
3139 {
3140 memset(pvPages, 0, cPages * PAGE_SIZE);
3141
3142 /*
3143 * Create the registered MMIO range record for it.
3144 */
3145 PPGMREGMMIORANGE pNew;
3146 rc = pgmR3PhysMMIOExCreate(pVM, pDevIns, iSubDev, iRegion, cb, pszDesc, &pNew);
3147 if (RT_SUCCESS(rc))
3148 {
3149 uint32_t iSrcPage = 0;
3150 uint8_t *pbCurPages = (uint8_t *)pvPages;
3151 for (PPGMREGMMIORANGE pCur = pNew; pCur; pCur = pCur->pNextR3)
3152 {
3153 pCur->pvR3 = pbCurPages;
3154 pCur->RamRange.pvR3 = pbCurPages;
3155 pCur->idMmio2 = idMmio2;
3156 pCur->fFlags |= PGMREGMMIORANGE_F_MMIO2;
3157
3158 uint32_t iDstPage = pCur->RamRange.cb >> X86_PAGE_SHIFT;
3159 while (iDstPage-- > 0)
3160 {
3161 PGM_PAGE_INIT(&pNew->RamRange.aPages[iDstPage],
3162 paPages[iDstPage + iSrcPage].Phys,
3163 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
3164 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
3165 }
3166
3167 /* advance. */
3168 iSrcPage += pCur->RamRange.cb >> X86_PAGE_SHIFT;
3169 pbCurPages += pCur->RamRange.cb;
3170 idMmio2++;
3171 }
3172
3173 RTMemTmpFree(paPages);
3174
3175 /*
3176 * Update the page count stats, link the registration and we're done.
3177 */
3178 pVM->pgm.s.cAllPages += cPages;
3179 pVM->pgm.s.cPrivatePages += cPages;
3180
3181 pgmR3PhysMMIOExLink(pVM, pNew);
3182
3183 *ppv = pvPages;
3184 return VINF_SUCCESS;
3185 }
3186
3187 SUPR3PageFreeEx(pvPages, cPages);
3188 }
3189 }
3190 RTMemTmpFree(paPages);
3191 MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pszDesc);
3192 }
3193 if (pDevIns->iInstance > 0)
3194 MMR3HeapFree((void *)pszDesc);
3195 return rc;
3196}
3197
3198
3199/**
3200 * Deregisters and frees an MMIO2 region or a pre-registered MMIO region
3201 *
3202 * Any physical (and virtual) access handlers registered for the region must
3203 * be deregistered before calling this function.
3204 *
3205 * @returns VBox status code.
3206 * @param pVM The cross context VM structure.
3207 * @param pDevIns The device instance owning the region.
3208 * @param iSubDev The sub-device number. Pass UINT32_MAX for wildcard
3209 * matching.
3210 * @param iRegion The region. Pass UINT32_MAX for wildcard matching.
3211 */
3212VMMR3DECL(int) PGMR3PhysMMIOExDeregister(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion)
3213{
3214 /*
3215 * Validate input.
3216 */
3217 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3218 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3219 AssertReturn(iSubDev <= UINT8_MAX || iSubDev == UINT32_MAX, VERR_INVALID_PARAMETER);
3220 AssertReturn(iRegion <= UINT8_MAX || iRegion == UINT32_MAX, VERR_INVALID_PARAMETER);
3221
3222 /*
3223 * The loop here scanning all registrations will make sure that multi-chunk ranges
3224 * get properly deregistered, though it's original purpose was the wildcard iRegion.
3225 */
3226 pgmLock(pVM);
3227 int rc = VINF_SUCCESS;
3228 unsigned cFound = 0;
3229 PPGMREGMMIORANGE pPrev = NULL;
3230 PPGMREGMMIORANGE pCur = pVM->pgm.s.pRegMmioRangesR3;
3231 while (pCur)
3232 {
3233 if ( pCur->pDevInsR3 == pDevIns
3234 && ( iRegion == UINT32_MAX
3235 || pCur->iRegion == iRegion)
3236 && ( iSubDev == UINT32_MAX
3237 || pCur->iSubDev == iSubDev) )
3238 {
3239 cFound++;
3240
3241 /*
3242 * Unmap it if it's mapped.
3243 */
3244 if (pCur->fFlags & PGMREGMMIORANGE_F_MAPPED)
3245 {
3246 int rc2 = PGMR3PhysMMIOExUnmap(pVM, pCur->pDevInsR3, pCur->iSubDev, pCur->iRegion, pCur->RamRange.GCPhys);
3247 AssertRC(rc2);
3248 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3249 rc = rc2;
3250 }
3251
3252 /*
3253 * Must tell IOM about MMIO (first one only).
3254 */
3255 if ((pCur->fFlags & (PGMREGMMIORANGE_F_MMIO2 | PGMREGMMIORANGE_F_FIRST_CHUNK)) == PGMREGMMIORANGE_F_MMIO2)
3256 IOMR3MmioExNotifyDeregistered(pVM, pCur->pPhysHandlerR3->pvUserR3);
3257
3258 /*
3259 * Unlink it
3260 */
3261 PPGMREGMMIORANGE pNext = pCur->pNextR3;
3262 if (pPrev)
3263 pPrev->pNextR3 = pNext;
3264 else
3265 pVM->pgm.s.pRegMmioRangesR3 = pNext;
3266 pCur->pNextR3 = NULL;
3267
3268 uint8_t idMmio2 = pCur->idMmio2;
3269 if (idMmio2 != UINT8_MAX)
3270 {
3271 Assert(pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] == pCur);
3272 pVM->pgm.s.apMmio2RangesR3[idMmio2 - 1] = NULL;
3273 pVM->pgm.s.apMmio2RangesR0[idMmio2 - 1] = NIL_RTR0PTR;
3274 }
3275
3276 /*
3277 * Free the memory.
3278 */
3279 uint32_t const cPages = pCur->cbReal >> PAGE_SHIFT;
3280 if (pCur->fFlags & PGMREGMMIORANGE_F_MMIO2)
3281 {
3282 int rc2 = SUPR3PageFreeEx(pCur->pvR3, cPages);
3283 AssertRC(rc2);
3284 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3285 rc = rc2;
3286
3287 rc2 = MMR3AdjustFixedReservation(pVM, -(int32_t)cPages, pCur->RamRange.pszDesc);
3288 AssertRC(rc2);
3289 if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
3290 rc = rc2;
3291 }
3292
3293 /* we're leaking hyper memory here if done at runtime. */
3294#ifdef VBOX_STRICT
3295 VMSTATE const enmState = VMR3GetState(pVM);
3296 AssertMsg( enmState == VMSTATE_POWERING_OFF
3297 || enmState == VMSTATE_POWERING_OFF_LS
3298 || enmState == VMSTATE_OFF
3299 || enmState == VMSTATE_OFF_LS
3300 || enmState == VMSTATE_DESTROYING
3301 || enmState == VMSTATE_TERMINATED
3302 || enmState == VMSTATE_CREATING
3303 , ("%s\n", VMR3GetStateName(enmState)));
3304#endif
3305
3306 const bool fIsMmio2 = RT_BOOL(pCur->fFlags & PGMREGMMIORANGE_F_MMIO2);
3307 if (pCur->RamRange.fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)
3308 {
3309 const size_t cbRange = RT_UOFFSETOF_DYN(PGMREGMMIORANGE, RamRange.aPages[cPages]);
3310 size_t const cChunkPages = RT_ALIGN_Z(cbRange, PAGE_SIZE) >> PAGE_SHIFT;
3311 SUPR3PageFreeEx(pCur, cChunkPages);
3312 }
3313 /*else
3314 {
3315 rc = MMHyperFree(pVM, pCur); - does not work, see the alloc call.
3316 AssertRCReturn(rc, rc);
3317 } */
3318
3319
3320 /* update page count stats */
3321 pVM->pgm.s.cAllPages -= cPages;
3322 if (fIsMmio2)
3323 pVM->pgm.s.cPrivatePages -= cPages;
3324 else
3325 pVM->pgm.s.cPureMmioPages -= cPages;
3326
3327 /* next */
3328 pCur = pNext;
3329 }
3330 else
3331 {
3332 pPrev = pCur;
3333 pCur = pCur->pNextR3;
3334 }
3335 }
3336 pgmPhysInvalidatePageMapTLB(pVM);
3337 pgmUnlock(pVM);
3338 return !cFound && iRegion != UINT32_MAX && iSubDev != UINT32_MAX ? VERR_NOT_FOUND : rc;
3339}
3340
3341
3342/**
3343 * Maps a MMIO2 region or a pre-registered MMIO region.
3344 *
3345 * This is done when a guest / the bios / state loading changes the
3346 * PCI config. The replacing of base memory has the same restrictions
3347 * as during registration, of course.
3348 *
3349 * @returns VBox status code.
3350 *
3351 * @param pVM The cross context VM structure.
3352 * @param pDevIns The device instance owning the region.
3353 * @param iSubDev The sub-device number of the registered region.
3354 * @param iRegion The index of the registered region.
3355 * @param GCPhys The guest-physical address to be remapped.
3356 */
3357VMMR3DECL(int) PGMR3PhysMMIOExMap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS GCPhys)
3358{
3359 /*
3360 * Validate input.
3361 *
3362 * Note! It's safe to walk the MMIO/MMIO2 list since registrations only
3363 * happens during VM construction.
3364 */
3365 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3366 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3367 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3368 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3369 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
3370 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
3371 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3372
3373 PPGMREGMMIORANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion);
3374 AssertReturn(pFirstMmio, VERR_NOT_FOUND);
3375 Assert(pFirstMmio->fFlags & PGMREGMMIORANGE_F_FIRST_CHUNK);
3376
3377 PPGMREGMMIORANGE pLastMmio = pFirstMmio;
3378 RTGCPHYS cbRange = 0;
3379 for (;;)
3380 {
3381 AssertReturn(!(pLastMmio->fFlags & PGMREGMMIORANGE_F_MAPPED), VERR_WRONG_ORDER);
3382 Assert(pLastMmio->RamRange.GCPhys == NIL_RTGCPHYS);
3383 Assert(pLastMmio->RamRange.GCPhysLast == NIL_RTGCPHYS);
3384 Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
3385 Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
3386 Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
3387 cbRange += pLastMmio->RamRange.cb;
3388 if (pLastMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3389 break;
3390 pLastMmio = pLastMmio->pNextR3;
3391 }
3392
3393 RTGCPHYS GCPhysLast = GCPhys + cbRange - 1;
3394 AssertLogRelReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
3395
3396 /*
3397 * Find our location in the ram range list, checking for restriction
3398 * we don't bother implementing yet (partially overlapping, multiple
3399 * ram ranges).
3400 */
3401 pgmLock(pVM);
3402
3403 AssertReturnStmt(!(pFirstMmio->fFlags & PGMREGMMIORANGE_F_MAPPED), pgmUnlock(pVM), VERR_WRONG_ORDER);
3404
3405 bool fRamExists = false;
3406 PPGMRAMRANGE pRamPrev = NULL;
3407 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
3408 while (pRam && GCPhysLast >= pRam->GCPhys)
3409 {
3410 if ( GCPhys <= pRam->GCPhysLast
3411 && GCPhysLast >= pRam->GCPhys)
3412 {
3413 /* Completely within? */
3414 AssertLogRelMsgReturnStmt( GCPhys >= pRam->GCPhys
3415 && GCPhysLast <= pRam->GCPhysLast,
3416 ("%RGp-%RGp (MMIOEx/%s) falls partly outside %RGp-%RGp (%s)\n",
3417 GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc,
3418 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
3419 pgmUnlock(pVM),
3420 VERR_PGM_RAM_CONFLICT);
3421
3422 /* Check that all the pages are RAM pages. */
3423 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3424 uint32_t cPagesLeft = cbRange >> PAGE_SHIFT;
3425 while (cPagesLeft-- > 0)
3426 {
3427 AssertLogRelMsgReturnStmt(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
3428 ("%RGp isn't a RAM page (%d) - mapping %RGp-%RGp (MMIO2/%s).\n",
3429 GCPhys, PGM_PAGE_GET_TYPE(pPage), GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc),
3430 pgmUnlock(pVM),
3431 VERR_PGM_RAM_CONFLICT);
3432 pPage++;
3433 }
3434
3435 /* There can only be one MMIO/MMIO2 chunk matching here! */
3436 AssertLogRelMsgReturnStmt(pFirstMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK,
3437 ("%RGp-%RGp (MMIOEx/%s, flags %#X) consists of multiple chunks whereas the RAM somehow doesn't!\n",
3438 GCPhys, GCPhysLast, pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
3439 pgmUnlock(pVM),
3440 VERR_PGM_PHYS_MMIO_EX_IPE);
3441
3442 fRamExists = true;
3443 break;
3444 }
3445
3446 /* next */
3447 pRamPrev = pRam;
3448 pRam = pRam->pNextR3;
3449 }
3450 Log(("PGMR3PhysMMIOExMap: %RGp-%RGp fRamExists=%RTbool %s\n", GCPhys, GCPhysLast, fRamExists, pFirstMmio->RamRange.pszDesc));
3451
3452
3453 /*
3454 * Make the changes.
3455 */
3456 RTGCPHYS GCPhysCur = GCPhys;
3457 for (PPGMREGMMIORANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3458 {
3459 pCurMmio->RamRange.GCPhys = GCPhysCur;
3460 pCurMmio->RamRange.GCPhysLast = GCPhysCur + pCurMmio->RamRange.cb - 1;
3461 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3462 {
3463 Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
3464 break;
3465 }
3466 GCPhysCur += pCurMmio->RamRange.cb;
3467 }
3468
3469 if (fRamExists)
3470 {
3471 /*
3472 * Make all the pages in the range MMIO/ZERO pages, freeing any
3473 * RAM pages currently mapped here. This might not be 100% correct
3474 * for PCI memory, but we're doing the same thing for MMIO2 pages.
3475 *
3476 * We replace this MMIO/ZERO pages with real pages in the MMIO2 case.
3477 */
3478 Assert(pFirstMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK); /* Only one chunk */
3479
3480 int rc = pgmR3PhysFreePageRange(pVM, pRam, GCPhys, GCPhysLast, PGMPAGETYPE_MMIO);
3481 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
3482
3483 if (pFirstMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)
3484 {
3485 /* replace the pages, freeing all present RAM pages. */
3486 PPGMPAGE pPageSrc = &pFirstMmio->RamRange.aPages[0];
3487 PPGMPAGE pPageDst = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3488 uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> PAGE_SHIFT;
3489 while (cPagesLeft-- > 0)
3490 {
3491 Assert(PGM_PAGE_IS_MMIO(pPageDst));
3492
3493 RTHCPHYS const HCPhys = PGM_PAGE_GET_HCPHYS(pPageSrc);
3494 uint32_t const idPage = PGM_PAGE_GET_PAGEID(pPageSrc);
3495 PGM_PAGE_SET_PAGEID(pVM, pPageDst, idPage);
3496 PGM_PAGE_SET_HCPHYS(pVM, pPageDst, HCPhys);
3497 PGM_PAGE_SET_TYPE(pVM, pPageDst, PGMPAGETYPE_MMIO2);
3498 PGM_PAGE_SET_STATE(pVM, pPageDst, PGM_PAGE_STATE_ALLOCATED);
3499 PGM_PAGE_SET_PDE_TYPE(pVM, pPageDst, PGM_PAGE_PDE_TYPE_DONTCARE);
3500 PGM_PAGE_SET_PTE_INDEX(pVM, pPageDst, 0);
3501 PGM_PAGE_SET_TRACKING(pVM, pPageDst, 0);
3502 /* (We tell NEM at the end of the function.) */
3503
3504 pVM->pgm.s.cZeroPages--;
3505 GCPhys += PAGE_SIZE;
3506 pPageSrc++;
3507 pPageDst++;
3508 }
3509 }
3510
3511 /* Flush physical page map TLB. */
3512 pgmPhysInvalidatePageMapTLB(pVM);
3513
3514 /* Force a PGM pool flush as guest ram references have been changed. */
3515 /** @todo not entirely SMP safe; assuming for now the guest takes care of
3516 * this internally (not touch mapped mmio while changing the mapping). */
3517 PVMCPU pVCpu = VMMGetCpu(pVM);
3518 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3519 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3520 }
3521 else
3522 {
3523 /*
3524 * No RAM range, insert the ones prepared during registration.
3525 */
3526 for (PPGMREGMMIORANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3527 {
3528 /* Clear the tracking data of pages we're going to reactivate. */
3529 PPGMPAGE pPageSrc = &pCurMmio->RamRange.aPages[0];
3530 uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT;
3531 while (cPagesLeft-- > 0)
3532 {
3533 PGM_PAGE_SET_TRACKING(pVM, pPageSrc, 0);
3534 PGM_PAGE_SET_PTE_INDEX(pVM, pPageSrc, 0);
3535 pPageSrc++;
3536 }
3537
3538 /* link in the ram range */
3539 pgmR3PhysLinkRamRange(pVM, &pCurMmio->RamRange, pRamPrev);
3540
3541 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3542 {
3543 Assert(pCurMmio->RamRange.GCPhysLast == GCPhysLast);
3544 break;
3545 }
3546 pRamPrev = &pCurMmio->RamRange;
3547 }
3548 }
3549
3550 /*
3551 * Register the access handler if plain MMIO.
3552 *
3553 * We must register access handlers for each range since the access handler
3554 * code refuses to deal with multiple ranges (and we can).
3555 */
3556 if (!(pFirstMmio->fFlags & PGMREGMMIORANGE_F_MMIO2))
3557 {
3558 int rc = VINF_SUCCESS;
3559 for (PPGMREGMMIORANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3560 {
3561 Assert(!(pCurMmio->fFlags & PGMREGMMIORANGE_F_MAPPED));
3562 rc = pgmHandlerPhysicalExRegister(pVM, pCurMmio->pPhysHandlerR3, pCurMmio->RamRange.GCPhys,
3563 pCurMmio->RamRange.GCPhysLast);
3564 if (RT_FAILURE(rc))
3565 break;
3566 pCurMmio->fFlags |= PGMREGMMIORANGE_F_MAPPED; /* Use this to mark that the handler is registered. */
3567 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3568 {
3569 rc = IOMR3MmioExNotifyMapped(pVM, pFirstMmio->pPhysHandlerR3->pvUserR3, GCPhys);
3570 break;
3571 }
3572 }
3573 if (RT_FAILURE(rc))
3574 {
3575 /* Almost impossible, but try clean up properly and get out of here. */
3576 for (PPGMREGMMIORANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3577 {
3578 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_MAPPED)
3579 {
3580 pCurMmio->fFlags &= ~PGMREGMMIORANGE_F_MAPPED;
3581 pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3, fRamExists);
3582 }
3583
3584 if (!fRamExists)
3585 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
3586 else
3587 {
3588 Assert(pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK); /* Only one chunk */
3589
3590 uint32_t cPagesLeft = pCurMmio->RamRange.cb >> PAGE_SHIFT;
3591 PPGMPAGE pPageDst = &pRam->aPages[(pCurMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3592 while (cPagesLeft-- > 0)
3593 {
3594 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
3595 pPageDst++;
3596 }
3597 }
3598
3599 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS;
3600 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
3601 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3602 break;
3603 }
3604
3605 pgmUnlock(pVM);
3606 return rc;
3607 }
3608 }
3609
3610 /*
3611 * We're good, set the flags and invalid the mapping TLB.
3612 */
3613 for (PPGMREGMMIORANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3614 {
3615 pCurMmio->fFlags |= PGMREGMMIORANGE_F_MAPPED;
3616 if (fRamExists)
3617 pCurMmio->fFlags |= PGMREGMMIORANGE_F_OVERLAPPING;
3618 else
3619 pCurMmio->fFlags &= ~PGMREGMMIORANGE_F_OVERLAPPING;
3620 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3621 break;
3622 }
3623 pgmPhysInvalidatePageMapTLB(pVM);
3624
3625 /*
3626 * Notify NEM while holding the lock (experimental) and REM without (like always).
3627 */
3628 uint32_t const fNemNotify = (pFirstMmio->fFlags & PGMREGMMIORANGE_F_MMIO2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0)
3629 | (pFirstMmio->fFlags & PGMREGMMIORANGE_F_OVERLAPPING ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0);
3630 int rc = NEMR3NotifyPhysMmioExMap(pVM, GCPhys, cbRange, fNemNotify, pFirstMmio->pvR3);
3631
3632 pgmUnlock(pVM);
3633
3634#ifdef VBOX_WITH_REM
3635 if (!fRamExists && (pFirstMmio->fFlags & PGMREGMMIORANGE_F_MMIO2)) /** @todo this doesn't look right. */
3636 REMR3NotifyPhysRamRegister(pVM, GCPhys, cbRange, REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2);
3637#endif
3638 return rc;
3639}
3640
3641
3642/**
3643 * Unmaps a MMIO2 or a pre-registered MMIO region.
3644 *
3645 * This is done when a guest / the bios / state loading changes the
3646 * PCI config. The replacing of base memory has the same restrictions
3647 * as during registration, of course.
3648 */
3649VMMR3DECL(int) PGMR3PhysMMIOExUnmap(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS GCPhys)
3650{
3651 /*
3652 * Validate input
3653 */
3654 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3655 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3656 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3657 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3658 AssertReturn(GCPhys != NIL_RTGCPHYS, VERR_INVALID_PARAMETER);
3659 AssertReturn(GCPhys != 0, VERR_INVALID_PARAMETER);
3660 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
3661
3662 PPGMREGMMIORANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion);
3663 AssertReturn(pFirstMmio, VERR_NOT_FOUND);
3664 Assert(pFirstMmio->fFlags & PGMREGMMIORANGE_F_FIRST_CHUNK);
3665
3666 PPGMREGMMIORANGE pLastMmio = pFirstMmio;
3667 RTGCPHYS cbRange = 0;
3668 for (;;)
3669 {
3670 AssertReturn(pLastMmio->fFlags & PGMREGMMIORANGE_F_MAPPED, VERR_WRONG_ORDER);
3671 AssertReturn(pLastMmio->RamRange.GCPhys == GCPhys + cbRange, VERR_INVALID_PARAMETER);
3672 Assert(pLastMmio->pDevInsR3 == pFirstMmio->pDevInsR3);
3673 Assert(pLastMmio->iSubDev == pFirstMmio->iSubDev);
3674 Assert(pLastMmio->iRegion == pFirstMmio->iRegion);
3675 cbRange += pLastMmio->RamRange.cb;
3676 if (pLastMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3677 break;
3678 pLastMmio = pLastMmio->pNextR3;
3679 }
3680
3681 Log(("PGMR3PhysMMIOExUnmap: %RGp-%RGp %s\n",
3682 pFirstMmio->RamRange.GCPhys, pLastMmio->RamRange.GCPhysLast, pFirstMmio->RamRange.pszDesc));
3683
3684 int rc = pgmLock(pVM);
3685 AssertRCReturn(rc, rc);
3686 uint16_t const fOldFlags = pFirstMmio->fFlags;
3687 AssertReturnStmt(fOldFlags & PGMREGMMIORANGE_F_MAPPED, pgmUnlock(pVM), VERR_WRONG_ORDER);
3688
3689 /*
3690 * If plain MMIO, we must deregister the handlers first.
3691 */
3692 if (!(fOldFlags & PGMREGMMIORANGE_F_MMIO2))
3693 {
3694 PPGMREGMMIORANGE pCurMmio = pFirstMmio;
3695 rc = pgmHandlerPhysicalExDeregister(pVM, pFirstMmio->pPhysHandlerR3, RT_BOOL(fOldFlags & PGMREGMMIORANGE_F_OVERLAPPING));
3696 AssertRCReturnStmt(rc, pgmUnlock(pVM), rc);
3697 while (!(pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK))
3698 {
3699 pCurMmio = pCurMmio->pNextR3;
3700 rc = pgmHandlerPhysicalExDeregister(pVM, pCurMmio->pPhysHandlerR3, RT_BOOL(fOldFlags & PGMREGMMIORANGE_F_OVERLAPPING));
3701 AssertRCReturnStmt(rc, pgmUnlock(pVM), VERR_PGM_PHYS_MMIO_EX_IPE);
3702 }
3703
3704 IOMR3MmioExNotifyUnmapped(pVM, pFirstMmio->pPhysHandlerR3->pvUserR3, GCPhys);
3705 }
3706
3707 /*
3708 * Unmap it.
3709 */
3710 RTGCPHYS const GCPhysRangeNotify = pFirstMmio->RamRange.GCPhys;
3711 if (fOldFlags & PGMREGMMIORANGE_F_OVERLAPPING)
3712 {
3713 /*
3714 * We've replaced RAM, replace with zero pages.
3715 *
3716 * Note! This is where we might differ a little from a real system, because
3717 * it's likely to just show the RAM pages as they were before the
3718 * MMIO/MMIO2 region was mapped here.
3719 */
3720 /* Only one chunk allowed when overlapping! */
3721 Assert(fOldFlags & PGMREGMMIORANGE_F_LAST_CHUNK);
3722
3723 /* Restore the RAM pages we've replaced. */
3724 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
3725 while (pRam->GCPhys > pFirstMmio->RamRange.GCPhysLast)
3726 pRam = pRam->pNextR3;
3727
3728 uint32_t cPagesLeft = pFirstMmio->RamRange.cb >> PAGE_SHIFT;
3729 if (fOldFlags & PGMREGMMIORANGE_F_MMIO2)
3730 pVM->pgm.s.cZeroPages += cPagesLeft;
3731
3732 PPGMPAGE pPageDst = &pRam->aPages[(pFirstMmio->RamRange.GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
3733 while (cPagesLeft-- > 0)
3734 {
3735 PGM_PAGE_INIT_ZERO(pPageDst, pVM, PGMPAGETYPE_RAM);
3736 pPageDst++;
3737 }
3738
3739 /* Flush physical page map TLB. */
3740 pgmPhysInvalidatePageMapTLB(pVM);
3741
3742 /* Update range state. */
3743 pFirstMmio->RamRange.GCPhys = NIL_RTGCPHYS;
3744 pFirstMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
3745 pFirstMmio->fFlags &= ~(PGMREGMMIORANGE_F_OVERLAPPING | PGMREGMMIORANGE_F_MAPPED);
3746 }
3747 else
3748 {
3749 /*
3750 * Unlink the chunks related to the MMIO/MMIO2 region.
3751 */
3752 for (PPGMREGMMIORANGE pCurMmio = pFirstMmio; ; pCurMmio = pCurMmio->pNextR3)
3753 {
3754 pgmR3PhysUnlinkRamRange(pVM, &pCurMmio->RamRange);
3755 pCurMmio->RamRange.GCPhys = NIL_RTGCPHYS;
3756 pCurMmio->RamRange.GCPhysLast = NIL_RTGCPHYS;
3757 pCurMmio->fFlags &= ~(PGMREGMMIORANGE_F_OVERLAPPING | PGMREGMMIORANGE_F_MAPPED);
3758 if (pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK)
3759 break;
3760 }
3761 }
3762
3763 /* Force a PGM pool flush as guest ram references have been changed. */
3764 /** @todo not entirely SMP safe; assuming for now the guest takes care
3765 * of this internally (not touch mapped mmio while changing the
3766 * mapping). */
3767 PVMCPU pVCpu = VMMGetCpu(pVM);
3768 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3769 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3770
3771 pgmPhysInvalidatePageMapTLB(pVM);
3772 pgmPhysInvalidRamRangeTlbs(pVM);
3773
3774 /*
3775 * Notify NEM while holding the lock (experimental) and REM without (like always).
3776 */
3777 uint32_t const fNemFlags = (fOldFlags & PGMREGMMIORANGE_F_MMIO2 ? NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2 : 0)
3778 | (fOldFlags & PGMREGMMIORANGE_F_OVERLAPPING ? NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE : 0);
3779 rc = NEMR3NotifyPhysMmioExUnmap(pVM, GCPhysRangeNotify, cbRange, fNemFlags);
3780 pgmUnlock(pVM);
3781#ifdef VBOX_WITH_REM
3782 if ((fOldFlags & (PGMREGMMIORANGE_F_OVERLAPPING | PGMREGMMIORANGE_F_MMIO2)) == PGMREGMMIORANGE_F_MMIO2)
3783 REMR3NotifyPhysRamDeregister(pVM, GCPhysRangeNotify, cbRange);
3784#endif
3785 return rc;
3786}
3787
3788
3789/**
3790 * Reduces the mapping size of a MMIO2 or pre-registered MMIO region.
3791 *
3792 * This is mainly for dealing with old saved states after changing the default
3793 * size of a mapping region. See PGMDevHlpMMIOExReduce and
3794 * PDMPCIDEV::pfnRegionLoadChangeHookR3.
3795 *
3796 * The region must not currently be mapped when making this call. The VM state
3797 * must be state restore or VM construction.
3798 *
3799 * @returns VBox status code.
3800 * @param pVM The cross context VM structure.
3801 * @param pDevIns The device instance owning the region.
3802 * @param iSubDev The sub-device number of the registered region.
3803 * @param iRegion The index of the registered region.
3804 * @param cbRegion The new mapping size.
3805 */
3806VMMR3_INT_DECL(int) PGMR3PhysMMIOExReduce(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS cbRegion)
3807{
3808 /*
3809 * Validate input
3810 */
3811 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3812 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3813 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3814 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3815 AssertReturn(cbRegion >= X86_PAGE_SIZE, VERR_INVALID_PARAMETER);
3816 AssertReturn(!(cbRegion & X86_PAGE_OFFSET_MASK), VERR_UNSUPPORTED_ALIGNMENT);
3817 VMSTATE enmVmState = VMR3GetState(pVM);
3818 AssertLogRelMsgReturn( enmVmState == VMSTATE_CREATING
3819 || enmVmState == VMSTATE_LOADING,
3820 ("enmVmState=%d (%s)\n", enmVmState, VMR3GetStateName(enmVmState)),
3821 VERR_VM_INVALID_VM_STATE);
3822
3823 int rc = pgmLock(pVM);
3824 AssertRCReturn(rc, rc);
3825
3826 PPGMREGMMIORANGE pFirstMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion);
3827 if (pFirstMmio)
3828 {
3829 Assert(pFirstMmio->fFlags & PGMREGMMIORANGE_F_FIRST_CHUNK);
3830 if (!(pFirstMmio->fFlags & PGMREGMMIORANGE_F_MAPPED))
3831 {
3832 /*
3833 * NOTE! Current implementation does not support multiple ranges.
3834 * Implement when there is a real world need and thus a testcase.
3835 */
3836 AssertLogRelMsgStmt(pFirstMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK,
3837 ("%s: %#x\n", pFirstMmio->RamRange.pszDesc, pFirstMmio->fFlags),
3838 rc = VERR_NOT_SUPPORTED);
3839 if (RT_SUCCESS(rc))
3840 {
3841 /*
3842 * Make the change.
3843 */
3844 Log(("PGMR3PhysMMIOExReduce: %s changes from %RGp bytes (%RGp) to %RGp bytes.\n",
3845 pFirstMmio->RamRange.pszDesc, pFirstMmio->RamRange.cb, pFirstMmio->cbReal, cbRegion));
3846
3847 AssertLogRelMsgStmt(cbRegion <= pFirstMmio->cbReal,
3848 ("%s: cbRegion=%#RGp cbReal=%#RGp\n", pFirstMmio->RamRange.pszDesc, cbRegion, pFirstMmio->cbReal),
3849 rc = VERR_OUT_OF_RANGE);
3850 if (RT_SUCCESS(rc))
3851 {
3852 pFirstMmio->RamRange.cb = cbRegion;
3853 }
3854 }
3855 }
3856 else
3857 rc = VERR_WRONG_ORDER;
3858 }
3859 else
3860 rc = VERR_NOT_FOUND;
3861
3862 pgmUnlock(pVM);
3863 return rc;
3864}
3865
3866
3867/**
3868 * Checks if the given address is an MMIO2 or pre-registered MMIO base address
3869 * or not.
3870 *
3871 * @returns true/false accordingly.
3872 * @param pVM The cross context VM structure.
3873 * @param pDevIns The owner of the memory, optional.
3874 * @param GCPhys The address to check.
3875 */
3876VMMR3DECL(bool) PGMR3PhysMMIOExIsBase(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys)
3877{
3878 /*
3879 * Validate input
3880 */
3881 VM_ASSERT_EMT_RETURN(pVM, false);
3882 AssertPtrReturn(pDevIns, false);
3883 AssertReturn(GCPhys != NIL_RTGCPHYS, false);
3884 AssertReturn(GCPhys != 0, false);
3885 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), false);
3886
3887 /*
3888 * Search the list.
3889 */
3890 pgmLock(pVM);
3891 for (PPGMREGMMIORANGE pCurMmio = pVM->pgm.s.pRegMmioRangesR3; pCurMmio; pCurMmio = pCurMmio->pNextR3)
3892 if (pCurMmio->RamRange.GCPhys == GCPhys)
3893 {
3894 Assert(pCurMmio->fFlags & PGMREGMMIORANGE_F_MAPPED);
3895 bool fRet = RT_BOOL(pCurMmio->fFlags & PGMREGMMIORANGE_F_FIRST_CHUNK);
3896 pgmUnlock(pVM);
3897 return fRet;
3898 }
3899 pgmUnlock(pVM);
3900 return false;
3901}
3902
3903
3904/**
3905 * Gets the HC physical address of a page in the MMIO2 region.
3906 *
3907 * This is API is intended for MMHyper and shouldn't be called
3908 * by anyone else...
3909 *
3910 * @returns VBox status code.
3911 * @param pVM The cross context VM structure.
3912 * @param pDevIns The owner of the memory, optional.
3913 * @param iSubDev Sub-device number.
3914 * @param iRegion The region.
3915 * @param off The page expressed an offset into the MMIO2 region.
3916 * @param pHCPhys Where to store the result.
3917 */
3918VMMR3_INT_DECL(int) PGMR3PhysMMIO2GetHCPhys(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3919 RTGCPHYS off, PRTHCPHYS pHCPhys)
3920{
3921 /*
3922 * Validate input
3923 */
3924 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3925 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3926 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3927 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3928
3929 pgmLock(pVM);
3930 PPGMREGMMIORANGE pCurMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion);
3931 AssertReturn(pCurMmio, VERR_NOT_FOUND);
3932 AssertReturn(pCurMmio->fFlags & (PGMREGMMIORANGE_F_MMIO2 | PGMREGMMIORANGE_F_FIRST_CHUNK), VERR_WRONG_TYPE);
3933
3934 while ( off >= pCurMmio->RamRange.cb
3935 && !(pCurMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK))
3936 {
3937 off -= pCurMmio->RamRange.cb;
3938 pCurMmio = pCurMmio->pNextR3;
3939 }
3940 AssertReturn(off < pCurMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3941
3942 PCPGMPAGE pPage = &pCurMmio->RamRange.aPages[off >> PAGE_SHIFT];
3943 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
3944 pgmUnlock(pVM);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Maps a portion of an MMIO2 region into kernel space (host).
3951 *
3952 * The kernel mapping will become invalid when the MMIO2 memory is deregistered
3953 * or the VM is terminated.
3954 *
3955 * @return VBox status code.
3956 *
3957 * @param pVM The cross context VM structure.
3958 * @param pDevIns The device owning the MMIO2 memory.
3959 * @param iSubDev The sub-device number.
3960 * @param iRegion The region.
3961 * @param off The offset into the region. Must be page aligned.
3962 * @param cb The number of bytes to map. Must be page aligned.
3963 * @param pszDesc Mapping description.
3964 * @param pR0Ptr Where to store the R0 address.
3965 */
3966VMMR3_INT_DECL(int) PGMR3PhysMMIO2MapKernel(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
3967 RTGCPHYS off, RTGCPHYS cb, const char *pszDesc, PRTR0PTR pR0Ptr)
3968{
3969 /*
3970 * Validate input.
3971 */
3972 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
3973 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
3974 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
3975 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
3976
3977 PPGMREGMMIORANGE pFirstRegMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion);
3978 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
3979 AssertReturn(pFirstRegMmio->fFlags & (PGMREGMMIORANGE_F_MMIO2 | PGMREGMMIORANGE_F_FIRST_CHUNK), VERR_WRONG_TYPE);
3980 AssertReturn(off < pFirstRegMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3981 AssertReturn(cb <= pFirstRegMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3982 AssertReturn(off + cb <= pFirstRegMmio->RamRange.cb, VERR_INVALID_PARAMETER);
3983 NOREF(pszDesc);
3984
3985 /*
3986 * Pass the request on to the support library/driver.
3987 */
3988#if defined(RT_OS_WINDOWS) || defined(RT_OS_LINUX) || defined(RT_OS_OS2) /** @todo Fully implement RTR0MemObjMapKernelEx everywhere. */
3989 AssertLogRelReturn(off == 0, VERR_NOT_SUPPORTED);
3990 AssertLogRelReturn(pFirstRegMmio->fFlags & PGMREGMMIORANGE_F_LAST_CHUNK, VERR_NOT_SUPPORTED);
3991 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, 0 /*off*/, pFirstRegMmio->RamRange.cb, 0 /*fFlags*/, pR0Ptr);
3992#else
3993 int rc = SUPR3PageMapKernel(pFirstRegMmio->pvR3, off, cb, 0 /*fFlags*/, pR0Ptr);
3994#endif
3995
3996 return rc;
3997}
3998
3999
4000/**
4001 * Changes the region number of an MMIO2 or pre-registered MMIO region.
4002 *
4003 * This is only for dealing with save state issues, nothing else.
4004 *
4005 * @return VBox status code.
4006 *
4007 * @param pVM The cross context VM structure.
4008 * @param pDevIns The device owning the MMIO2 memory.
4009 * @param iSubDev The sub-device number.
4010 * @param iRegion The region.
4011 * @param iNewRegion The new region index.
4012 *
4013 * @sa @bugref{9359}
4014 */
4015VMMR3_INT_DECL(int) PGMR3PhysMMIOExChangeRegionNo(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion,
4016 uint32_t iNewRegion)
4017{
4018 /*
4019 * Validate input.
4020 */
4021 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
4022 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
4023 AssertReturn(iSubDev <= UINT8_MAX, VERR_INVALID_PARAMETER);
4024 AssertReturn(iRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
4025 AssertReturn(iNewRegion <= UINT8_MAX, VERR_INVALID_PARAMETER);
4026
4027 AssertReturn(pVM->enmVMState == VMSTATE_LOADING, VERR_INVALID_STATE);
4028
4029 PPGMREGMMIORANGE pFirstRegMmio = pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iRegion);
4030 AssertReturn(pFirstRegMmio, VERR_NOT_FOUND);
4031 AssertReturn(pgmR3PhysMMIOExFind(pVM, pDevIns, iSubDev, iNewRegion) == NULL, VERR_RESOURCE_IN_USE);
4032
4033 /*
4034 * Make the change.
4035 */
4036 pFirstRegMmio->iRegion = (uint8_t)iNewRegion;
4037
4038 return VINF_SUCCESS;
4039}
4040
4041
4042/**
4043 * Worker for PGMR3PhysRomRegister.
4044 *
4045 * This is here to simplify lock management, i.e. the caller does all the
4046 * locking and we can simply return without needing to remember to unlock
4047 * anything first.
4048 *
4049 * @returns VBox status code.
4050 * @param pVM The cross context VM structure.
4051 * @param pDevIns The device instance owning the ROM.
4052 * @param GCPhys First physical address in the range.
4053 * Must be page aligned!
4054 * @param cb The size of the range (in bytes).
4055 * Must be page aligned!
4056 * @param pvBinary Pointer to the binary data backing the ROM image.
4057 * @param cbBinary The size of the binary data pvBinary points to.
4058 * This must be less or equal to @a cb.
4059 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
4060 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
4061 * @param pszDesc Pointer to description string. This must not be freed.
4062 */
4063static int pgmR3PhysRomRegisterLocked(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
4064 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
4065{
4066 /*
4067 * Validate input.
4068 */
4069 AssertPtrReturn(pDevIns, VERR_INVALID_PARAMETER);
4070 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
4071 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
4072 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
4073 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
4074 AssertPtrReturn(pvBinary, VERR_INVALID_PARAMETER);
4075 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
4076 AssertReturn(!(fFlags & ~(PGMPHYS_ROM_FLAGS_SHADOWED | PGMPHYS_ROM_FLAGS_PERMANENT_BINARY)), VERR_INVALID_PARAMETER);
4077 VM_ASSERT_STATE_RETURN(pVM, VMSTATE_CREATING, VERR_VM_INVALID_VM_STATE);
4078
4079 const uint32_t cPages = cb >> PAGE_SHIFT;
4080
4081 /*
4082 * Find the ROM location in the ROM list first.
4083 */
4084 PPGMROMRANGE pRomPrev = NULL;
4085 PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3;
4086 while (pRom && GCPhysLast >= pRom->GCPhys)
4087 {
4088 if ( GCPhys <= pRom->GCPhysLast
4089 && GCPhysLast >= pRom->GCPhys)
4090 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
4091 GCPhys, GCPhysLast, pszDesc,
4092 pRom->GCPhys, pRom->GCPhysLast, pRom->pszDesc),
4093 VERR_PGM_RAM_CONFLICT);
4094 /* next */
4095 pRomPrev = pRom;
4096 pRom = pRom->pNextR3;
4097 }
4098
4099 /*
4100 * Find the RAM location and check for conflicts.
4101 *
4102 * Conflict detection is a bit different than for RAM
4103 * registration since a ROM can be located within a RAM
4104 * range. So, what we have to check for is other memory
4105 * types (other than RAM that is) and that we don't span
4106 * more than one RAM range (layz).
4107 */
4108 bool fRamExists = false;
4109 PPGMRAMRANGE pRamPrev = NULL;
4110 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
4111 while (pRam && GCPhysLast >= pRam->GCPhys)
4112 {
4113 if ( GCPhys <= pRam->GCPhysLast
4114 && GCPhysLast >= pRam->GCPhys)
4115 {
4116 /* completely within? */
4117 AssertLogRelMsgReturn( GCPhys >= pRam->GCPhys
4118 && GCPhysLast <= pRam->GCPhysLast,
4119 ("%RGp-%RGp (%s) falls partly outside %RGp-%RGp (%s)\n",
4120 GCPhys, GCPhysLast, pszDesc,
4121 pRam->GCPhys, pRam->GCPhysLast, pRam->pszDesc),
4122 VERR_PGM_RAM_CONFLICT);
4123 fRamExists = true;
4124 break;
4125 }
4126
4127 /* next */
4128 pRamPrev = pRam;
4129 pRam = pRam->pNextR3;
4130 }
4131 if (fRamExists)
4132 {
4133 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
4134 uint32_t cPagesLeft = cPages;
4135 while (cPagesLeft-- > 0)
4136 {
4137 AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM,
4138 ("%RGp (%R[pgmpage]) isn't a RAM page - registering %RGp-%RGp (%s).\n",
4139 pRam->GCPhys + ((RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << PAGE_SHIFT),
4140 pPage, GCPhys, GCPhysLast, pszDesc), VERR_PGM_RAM_CONFLICT);
4141 Assert(PGM_PAGE_IS_ZERO(pPage));
4142 pPage++;
4143 }
4144 }
4145
4146 /*
4147 * Update the base memory reservation if necessary.
4148 */
4149 uint32_t cExtraBaseCost = fRamExists ? 0 : cPages;
4150 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4151 cExtraBaseCost += cPages;
4152 if (cExtraBaseCost)
4153 {
4154 int rc = MMR3IncreaseBaseReservation(pVM, cExtraBaseCost);
4155 if (RT_FAILURE(rc))
4156 return rc;
4157 }
4158
4159 /*
4160 * Allocate memory for the virgin copy of the RAM.
4161 */
4162 PGMMALLOCATEPAGESREQ pReq;
4163 int rc = GMMR3AllocatePagesPrepare(pVM, &pReq, cPages, GMMACCOUNT_BASE);
4164 AssertRCReturn(rc, rc);
4165
4166 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4167 {
4168 pReq->aPages[iPage].HCPhysGCPhys = GCPhys + (iPage << PAGE_SHIFT);
4169 pReq->aPages[iPage].idPage = NIL_GMM_PAGEID;
4170 pReq->aPages[iPage].idSharedPage = NIL_GMM_PAGEID;
4171 }
4172
4173 rc = GMMR3AllocatePagesPerform(pVM, pReq);
4174 if (RT_FAILURE(rc))
4175 {
4176 GMMR3AllocatePagesCleanup(pReq);
4177 return rc;
4178 }
4179
4180 /*
4181 * Allocate the new ROM range and RAM range (if necessary).
4182 */
4183 PPGMROMRANGE pRomNew;
4184 rc = MMHyperAlloc(pVM, RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cPages]), 0, MM_TAG_PGM_PHYS, (void **)&pRomNew);
4185 if (RT_SUCCESS(rc))
4186 {
4187 PPGMRAMRANGE pRamNew = NULL;
4188 if (!fRamExists)
4189 rc = MMHyperAlloc(pVM, RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), sizeof(PGMPAGE), MM_TAG_PGM_PHYS, (void **)&pRamNew);
4190 if (RT_SUCCESS(rc))
4191 {
4192 /*
4193 * Initialize and insert the RAM range (if required).
4194 */
4195 PPGMROMPAGE pRomPage = &pRomNew->aPages[0];
4196 if (!fRamExists)
4197 {
4198 pRamNew->pSelfR0 = MMHyperCCToR0(pVM, pRamNew);
4199 pRamNew->GCPhys = GCPhys;
4200 pRamNew->GCPhysLast = GCPhysLast;
4201 pRamNew->cb = cb;
4202 pRamNew->pszDesc = pszDesc;
4203 pRamNew->fFlags = PGM_RAM_RANGE_FLAGS_AD_HOC_ROM;
4204 pRamNew->pvR3 = NULL;
4205 pRamNew->paLSPages = NULL;
4206
4207 PPGMPAGE pPage = &pRamNew->aPages[0];
4208 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
4209 {
4210 PGM_PAGE_INIT(pPage,
4211 pReq->aPages[iPage].HCPhysGCPhys,
4212 pReq->aPages[iPage].idPage,
4213 PGMPAGETYPE_ROM,
4214 PGM_PAGE_STATE_ALLOCATED);
4215
4216 pRomPage->Virgin = *pPage;
4217 }
4218
4219 pVM->pgm.s.cAllPages += cPages;
4220 pgmR3PhysLinkRamRange(pVM, pRamNew, pRamPrev);
4221 }
4222 else
4223 {
4224 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT];
4225 for (uint32_t iPage = 0; iPage < cPages; iPage++, pPage++, pRomPage++)
4226 {
4227 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_ROM);
4228 PGM_PAGE_SET_HCPHYS(pVM, pPage, pReq->aPages[iPage].HCPhysGCPhys);
4229 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
4230 PGM_PAGE_SET_PAGEID(pVM, pPage, pReq->aPages[iPage].idPage);
4231 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
4232 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
4233 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
4234
4235 pRomPage->Virgin = *pPage;
4236 }
4237
4238 pRamNew = pRam;
4239
4240 pVM->pgm.s.cZeroPages -= cPages;
4241 }
4242 pVM->pgm.s.cPrivatePages += cPages;
4243
4244 /* Flush physical page map TLB. */
4245 pgmPhysInvalidatePageMapTLB(pVM);
4246
4247
4248 /* Notify NEM before we register handlers. */
4249 uint32_t const fNemNotify = (fRamExists ? NEM_NOTIFY_PHYS_ROM_F_REPLACE : 0)
4250 | (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED ? NEM_NOTIFY_PHYS_ROM_F_SHADOW : 0);
4251 rc = NEMR3NotifyPhysRomRegisterEarly(pVM, GCPhys, cb, fNemNotify);
4252
4253 /*
4254 * !HACK ALERT! REM + (Shadowed) ROM ==> mess.
4255 *
4256 * If it's shadowed we'll register the handler after the ROM notification
4257 * so we get the access handler callbacks that we should. If it isn't
4258 * shadowed we'll do it the other way around to make REM use the built-in
4259 * ROM behavior and not the handler behavior (which is to route all access
4260 * to PGM atm).
4261 */
4262 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4263 {
4264#ifdef VBOX_WITH_REM
4265 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, true /* fShadowed */);
4266#endif
4267 if (RT_SUCCESS(rc))
4268 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType,
4269 pRomNew, MMHyperCCToR0(pVM, pRomNew), MMHyperCCToRC(pVM, pRomNew),
4270 pszDesc);
4271 }
4272 else
4273 {
4274 if (RT_SUCCESS(rc))
4275 rc = PGMHandlerPhysicalRegister(pVM, GCPhys, GCPhysLast, pVM->pgm.s.hRomPhysHandlerType,
4276 pRomNew, MMHyperCCToR0(pVM, pRomNew), MMHyperCCToRC(pVM, pRomNew),
4277 pszDesc);
4278#ifdef VBOX_WITH_REM
4279 REMR3NotifyPhysRomRegister(pVM, GCPhys, cb, NULL, false /* fShadowed */);
4280#endif
4281 }
4282 if (RT_SUCCESS(rc))
4283 {
4284 /*
4285 * Copy the image over to the virgin pages.
4286 * This must be done after linking in the RAM range.
4287 */
4288 size_t cbBinaryLeft = cbBinary;
4289 PPGMPAGE pRamPage = &pRamNew->aPages[(GCPhys - pRamNew->GCPhys) >> PAGE_SHIFT];
4290 for (uint32_t iPage = 0; iPage < cPages; iPage++, pRamPage++)
4291 {
4292 void *pvDstPage;
4293 rc = pgmPhysPageMap(pVM, pRamPage, GCPhys + (iPage << PAGE_SHIFT), &pvDstPage);
4294 if (RT_FAILURE(rc))
4295 {
4296 VMSetError(pVM, rc, RT_SRC_POS, "Failed to map virgin ROM page at %RGp", GCPhys);
4297 break;
4298 }
4299 if (cbBinaryLeft >= PAGE_SIZE)
4300 {
4301 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), PAGE_SIZE);
4302 cbBinaryLeft -= PAGE_SIZE;
4303 }
4304 else
4305 {
4306 ASMMemZeroPage(pvDstPage); /* (shouldn't be necessary, but can't hurt either) */
4307 if (cbBinaryLeft > 0)
4308 {
4309 memcpy(pvDstPage, (uint8_t const *)pvBinary + ((size_t)iPage << PAGE_SHIFT), cbBinaryLeft);
4310 cbBinaryLeft = 0;
4311 }
4312 }
4313 }
4314 if (RT_SUCCESS(rc))
4315 {
4316 /*
4317 * Initialize the ROM range.
4318 * Note that the Virgin member of the pages has already been initialized above.
4319 */
4320 pRomNew->GCPhys = GCPhys;
4321 pRomNew->GCPhysLast = GCPhysLast;
4322 pRomNew->cb = cb;
4323 pRomNew->fFlags = fFlags;
4324 pRomNew->idSavedState = UINT8_MAX;
4325 pRomNew->cbOriginal = cbBinary;
4326 pRomNew->pszDesc = pszDesc;
4327 pRomNew->pvOriginal = fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY
4328 ? pvBinary : RTMemDup(pvBinary, cbBinary);
4329 if (pRomNew->pvOriginal)
4330 {
4331 for (unsigned iPage = 0; iPage < cPages; iPage++)
4332 {
4333 PPGMROMPAGE pPage = &pRomNew->aPages[iPage];
4334 pPage->enmProt = PGMROMPROT_READ_ROM_WRITE_IGNORE;
4335 PGM_PAGE_INIT_ZERO(&pPage->Shadow, pVM, PGMPAGETYPE_ROM_SHADOW);
4336 }
4337
4338 /* update the page count stats for the shadow pages. */
4339 if (fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4340 {
4341 pVM->pgm.s.cZeroPages += cPages;
4342 pVM->pgm.s.cAllPages += cPages;
4343 }
4344
4345 /*
4346 * Insert the ROM range, tell REM and return successfully.
4347 */
4348 pRomNew->pNextR3 = pRom;
4349 pRomNew->pNextR0 = pRom ? MMHyperCCToR0(pVM, pRom) : NIL_RTR0PTR;
4350
4351 if (pRomPrev)
4352 {
4353 pRomPrev->pNextR3 = pRomNew;
4354 pRomPrev->pNextR0 = MMHyperCCToR0(pVM, pRomNew);
4355 }
4356 else
4357 {
4358 pVM->pgm.s.pRomRangesR3 = pRomNew;
4359 pVM->pgm.s.pRomRangesR0 = MMHyperCCToR0(pVM, pRomNew);
4360 }
4361
4362 pgmPhysInvalidatePageMapTLB(pVM);
4363 GMMR3AllocatePagesCleanup(pReq);
4364
4365 /* Notify NEM again. */
4366 return NEMR3NotifyPhysRomRegisterLate(pVM, GCPhys, cb, fNemNotify);
4367 }
4368
4369 /* bail out */
4370 rc = VERR_NO_MEMORY;
4371 }
4372
4373 int rc2 = PGMHandlerPhysicalDeregister(pVM, GCPhys);
4374 AssertRC(rc2);
4375 }
4376
4377 if (!fRamExists)
4378 {
4379 pgmR3PhysUnlinkRamRange2(pVM, pRamNew, pRamPrev);
4380 MMHyperFree(pVM, pRamNew);
4381 }
4382 }
4383 MMHyperFree(pVM, pRomNew);
4384 }
4385
4386 /** @todo Purge the mapping cache or something... */
4387 GMMR3FreeAllocatedPages(pVM, pReq);
4388 GMMR3AllocatePagesCleanup(pReq);
4389 return rc;
4390}
4391
4392
4393/**
4394 * Registers a ROM image.
4395 *
4396 * Shadowed ROM images requires double the amount of backing memory, so,
4397 * don't use that unless you have to. Shadowing of ROM images is process
4398 * where we can select where the reads go and where the writes go. On real
4399 * hardware the chipset provides means to configure this. We provide
4400 * PGMR3PhysProtectROM() for this purpose.
4401 *
4402 * A read-only copy of the ROM image will always be kept around while we
4403 * will allocate RAM pages for the changes on demand (unless all memory
4404 * is configured to be preallocated).
4405 *
4406 * @returns VBox status code.
4407 * @param pVM The cross context VM structure.
4408 * @param pDevIns The device instance owning the ROM.
4409 * @param GCPhys First physical address in the range.
4410 * Must be page aligned!
4411 * @param cb The size of the range (in bytes).
4412 * Must be page aligned!
4413 * @param pvBinary Pointer to the binary data backing the ROM image.
4414 * @param cbBinary The size of the binary data pvBinary points to.
4415 * This must be less or equal to @a cb.
4416 * @param fFlags Mask of flags. PGMPHYS_ROM_FLAGS_SHADOWED
4417 * and/or PGMPHYS_ROM_FLAGS_PERMANENT_BINARY.
4418 * @param pszDesc Pointer to description string. This must not be freed.
4419 *
4420 * @remark There is no way to remove the rom, automatically on device cleanup or
4421 * manually from the device yet. This isn't difficult in any way, it's
4422 * just not something we expect to be necessary for a while.
4423 */
4424VMMR3DECL(int) PGMR3PhysRomRegister(PVM pVM, PPDMDEVINS pDevIns, RTGCPHYS GCPhys, RTGCPHYS cb,
4425 const void *pvBinary, uint32_t cbBinary, uint32_t fFlags, const char *pszDesc)
4426{
4427 Log(("PGMR3PhysRomRegister: pDevIns=%p GCPhys=%RGp(-%RGp) cb=%RGp pvBinary=%p cbBinary=%#x fFlags=%#x pszDesc=%s\n",
4428 pDevIns, GCPhys, GCPhys + cb, cb, pvBinary, cbBinary, fFlags, pszDesc));
4429 pgmLock(pVM);
4430 int rc = pgmR3PhysRomRegisterLocked(pVM, pDevIns, GCPhys, cb, pvBinary, cbBinary, fFlags, pszDesc);
4431 pgmUnlock(pVM);
4432 return rc;
4433}
4434
4435
4436/**
4437 * Called by PGMR3MemSetup to reset the shadow, switch to the virgin, and verify
4438 * that the virgin part is untouched.
4439 *
4440 * This is done after the normal memory has been cleared.
4441 *
4442 * ASSUMES that the caller owns the PGM lock.
4443 *
4444 * @param pVM The cross context VM structure.
4445 */
4446int pgmR3PhysRomReset(PVM pVM)
4447{
4448 PGM_LOCK_ASSERT_OWNER(pVM);
4449 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
4450 {
4451 const uint32_t cPages = pRom->cb >> PAGE_SHIFT;
4452
4453 if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
4454 {
4455 /*
4456 * Reset the physical handler.
4457 */
4458 int rc = PGMR3PhysRomProtect(pVM, pRom->GCPhys, pRom->cb, PGMROMPROT_READ_ROM_WRITE_IGNORE);
4459 AssertRCReturn(rc, rc);
4460
4461 /*
4462 * What we do with the shadow pages depends on the memory
4463 * preallocation option. If not enabled, we'll just throw
4464 * out all the dirty pages and replace them by the zero page.
4465 */
4466 if (!pVM->pgm.s.fRamPreAlloc)
4467 {
4468 /* Free the dirty pages. */
4469 uint32_t cPendingPages = 0;
4470 PGMMFREEPAGESREQ pReq;
4471 rc = GMMR3FreePagesPrepare(pVM, &pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
4472 AssertRCReturn(rc, rc);
4473
4474 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4475 if ( !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow)
4476 && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow))
4477 {
4478 Assert(PGM_PAGE_GET_STATE(&pRom->aPages[iPage].Shadow) == PGM_PAGE_STATE_ALLOCATED);
4479 rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, &pRom->aPages[iPage].Shadow,
4480 pRom->GCPhys + (iPage << PAGE_SHIFT),
4481 (PGMPAGETYPE)PGM_PAGE_GET_TYPE(&pRom->aPages[iPage].Shadow));
4482 AssertLogRelRCReturn(rc, rc);
4483 }
4484
4485 if (cPendingPages)
4486 {
4487 rc = GMMR3FreePagesPerform(pVM, pReq, cPendingPages);
4488 AssertLogRelRCReturn(rc, rc);
4489 }
4490 GMMR3FreePagesCleanup(pReq);
4491 }
4492 else
4493 {
4494 /* clear all the shadow pages. */
4495 for (uint32_t iPage = 0; iPage < cPages; iPage++)
4496 {
4497 if (PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow))
4498 continue;
4499 Assert(!PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow));
4500 void *pvDstPage;
4501 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
4502 rc = pgmPhysPageMakeWritableAndMap(pVM, &pRom->aPages[iPage].Shadow, GCPhys, &pvDstPage);
4503 if (RT_FAILURE(rc))
4504 break;
4505 ASMMemZeroPage(pvDstPage);
4506 }
4507 AssertRCReturn(rc, rc);
4508 }
4509 }
4510
4511 /*
4512 * Restore the original ROM pages after a saved state load.
4513 * Also, in strict builds check that ROM pages remain unmodified.
4514 */
4515#ifndef VBOX_STRICT
4516 if (pVM->pgm.s.fRestoreRomPagesOnReset)
4517#endif
4518 {
4519 size_t cbSrcLeft = pRom->cbOriginal;
4520 uint8_t const *pbSrcPage = (uint8_t const *)pRom->pvOriginal;
4521 uint32_t cRestored = 0;
4522 for (uint32_t iPage = 0; iPage < cPages && cbSrcLeft > 0; iPage++, pbSrcPage += PAGE_SIZE)
4523 {
4524 const RTGCPHYS GCPhys = pRom->GCPhys + (iPage << PAGE_SHIFT);
4525 void const *pvDstPage;
4526 int rc = pgmPhysPageMapReadOnly(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPage);
4527 if (RT_FAILURE(rc))
4528 break;
4529
4530 if (memcmp(pvDstPage, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE)))
4531 {
4532 if (pVM->pgm.s.fRestoreRomPagesOnReset)
4533 {
4534 void *pvDstPageW;
4535 rc = pgmPhysPageMap(pVM, &pRom->aPages[iPage].Virgin, GCPhys, &pvDstPageW);
4536 AssertLogRelRCReturn(rc, rc);
4537 memcpy(pvDstPageW, pbSrcPage, RT_MIN(cbSrcLeft, PAGE_SIZE));
4538 cRestored++;
4539 }
4540 else
4541 LogRel(("pgmR3PhysRomReset: %RGp: ROM page changed (%s)\n", GCPhys, pRom->pszDesc));
4542 }
4543 cbSrcLeft -= RT_MIN(cbSrcLeft, PAGE_SIZE);
4544 }
4545 if (cRestored > 0)
4546 LogRel(("PGM: ROM \"%s\": Reloaded %u of %u pages.\n", pRom->pszDesc, cRestored, cPages));
4547 }
4548 }
4549
4550 /* Clear the ROM restore flag now as we only need to do this once after
4551 loading saved state. */
4552 pVM->pgm.s.fRestoreRomPagesOnReset = false;
4553
4554 return VINF_SUCCESS;
4555}
4556
4557
4558/**
4559 * Called by PGMR3Term to free resources.
4560 *
4561 * ASSUMES that the caller owns the PGM lock.
4562 *
4563 * @param pVM The cross context VM structure.
4564 */
4565void pgmR3PhysRomTerm(PVM pVM)
4566{
4567 /*
4568 * Free the heap copy of the original bits.
4569 */
4570 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
4571 {
4572 if ( pRom->pvOriginal
4573 && !(pRom->fFlags & PGMPHYS_ROM_FLAGS_PERMANENT_BINARY))
4574 {
4575 RTMemFree((void *)pRom->pvOriginal);
4576 pRom->pvOriginal = NULL;
4577 }
4578 }
4579}
4580
4581
4582/**
4583 * Change the shadowing of a range of ROM pages.
4584 *
4585 * This is intended for implementing chipset specific memory registers
4586 * and will not be very strict about the input. It will silently ignore
4587 * any pages that are not the part of a shadowed ROM.
4588 *
4589 * @returns VBox status code.
4590 * @retval VINF_PGM_SYNC_CR3
4591 *
4592 * @param pVM The cross context VM structure.
4593 * @param GCPhys Where to start. Page aligned.
4594 * @param cb How much to change. Page aligned.
4595 * @param enmProt The new ROM protection.
4596 */
4597VMMR3DECL(int) PGMR3PhysRomProtect(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, PGMROMPROT enmProt)
4598{
4599 /*
4600 * Check input
4601 */
4602 if (!cb)
4603 return VINF_SUCCESS;
4604 AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4605 AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
4606 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
4607 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
4608 AssertReturn(enmProt >= PGMROMPROT_INVALID && enmProt <= PGMROMPROT_END, VERR_INVALID_PARAMETER);
4609
4610 /*
4611 * Process the request.
4612 */
4613 pgmLock(pVM);
4614 int rc = VINF_SUCCESS;
4615 bool fFlushTLB = false;
4616 for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
4617 {
4618 if ( GCPhys <= pRom->GCPhysLast
4619 && GCPhysLast >= pRom->GCPhys
4620 && (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
4621 {
4622 /*
4623 * Iterate the relevant pages and make necessary the changes.
4624 */
4625 bool fChanges = false;
4626 uint32_t const cPages = pRom->GCPhysLast <= GCPhysLast
4627 ? pRom->cb >> PAGE_SHIFT
4628 : (GCPhysLast - pRom->GCPhys + 1) >> PAGE_SHIFT;
4629 for (uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
4630 iPage < cPages;
4631 iPage++)
4632 {
4633 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
4634 if (PGMROMPROT_IS_ROM(pRomPage->enmProt) != PGMROMPROT_IS_ROM(enmProt))
4635 {
4636 fChanges = true;
4637
4638 /* flush references to the page. */
4639 PPGMPAGE pRamPage = pgmPhysGetPage(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT));
4640 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage,
4641 true /*fFlushPTEs*/, &fFlushTLB);
4642 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2)))
4643 rc = rc2;
4644 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pRamPage);
4645
4646 PPGMPAGE pOld = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Virgin : &pRomPage->Shadow;
4647 PPGMPAGE pNew = PGMROMPROT_IS_ROM(pRomPage->enmProt) ? &pRomPage->Shadow : &pRomPage->Virgin;
4648
4649 *pOld = *pRamPage;
4650 *pRamPage = *pNew;
4651 /** @todo preserve the volatile flags (handlers) when these have been moved out of HCPhys! */
4652
4653 /* Tell NEM about the backing and protection change. */
4654 if (VM_IS_NEM_ENABLED(pVM))
4655 {
4656 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pNew);
4657 NEMHCNotifyPhysPageChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pOld), PGM_PAGE_GET_HCPHYS(pNew),
4658 pgmPhysPageCalcNemProtection(pRamPage, enmType), enmType, &u2State);
4659 PGM_PAGE_SET_NEM_STATE(pRamPage, u2State);
4660 }
4661 }
4662 pRomPage->enmProt = enmProt;
4663 }
4664
4665 /*
4666 * Reset the access handler if we made changes, no need
4667 * to optimize this.
4668 */
4669 if (fChanges)
4670 {
4671 int rc2 = PGMHandlerPhysicalReset(pVM, pRom->GCPhys);
4672 if (RT_FAILURE(rc2))
4673 {
4674 pgmUnlock(pVM);
4675 AssertRC(rc);
4676 return rc2;
4677 }
4678 }
4679
4680 /* Advance - cb isn't updated. */
4681 GCPhys = pRom->GCPhys + (cPages << PAGE_SHIFT);
4682 }
4683 }
4684 pgmUnlock(pVM);
4685 if (fFlushTLB)
4686 PGM_INVL_ALL_VCPU_TLBS(pVM);
4687
4688 return rc;
4689}
4690
4691
4692/**
4693 * Sets the Address Gate 20 state.
4694 *
4695 * @param pVCpu The cross context virtual CPU structure.
4696 * @param fEnable True if the gate should be enabled.
4697 * False if the gate should be disabled.
4698 */
4699VMMDECL(void) PGMR3PhysSetA20(PVMCPU pVCpu, bool fEnable)
4700{
4701 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVCpu->pgm.s.fA20Enabled));
4702 if (pVCpu->pgm.s.fA20Enabled != fEnable)
4703 {
4704#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4705 PCCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4706 if ( CPUMIsGuestInVmxRootMode(pCtx)
4707 && !fEnable)
4708 {
4709 Log(("Cannot enter A20M mode while in VMX root mode\n"));
4710 return;
4711 }
4712#endif
4713 pVCpu->pgm.s.fA20Enabled = fEnable;
4714 pVCpu->pgm.s.GCPhysA20Mask = ~((RTGCPHYS)!fEnable << 20);
4715#ifdef VBOX_WITH_REM
4716 REMR3A20Set(pVCpu->pVMR3, pVCpu, fEnable);
4717#endif
4718 NEMR3NotifySetA20(pVCpu, fEnable);
4719#ifdef PGM_WITH_A20
4720 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
4721 pgmR3RefreshShadowModeAfterA20Change(pVCpu);
4722 HMFlushTlb(pVCpu);
4723#endif
4724 IEMTlbInvalidateAllPhysical(pVCpu);
4725 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes);
4726 }
4727}
4728
4729
4730/**
4731 * Tree enumeration callback for dealing with age rollover.
4732 * It will perform a simple compression of the current age.
4733 */
4734static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
4735{
4736 /* Age compression - ASSUMES iNow == 4. */
4737 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
4738 if (pChunk->iLastUsed >= UINT32_C(0xffffff00))
4739 pChunk->iLastUsed = 3;
4740 else if (pChunk->iLastUsed >= UINT32_C(0xfffff000))
4741 pChunk->iLastUsed = 2;
4742 else if (pChunk->iLastUsed)
4743 pChunk->iLastUsed = 1;
4744 else /* iLastUsed = 0 */
4745 pChunk->iLastUsed = 4;
4746
4747 NOREF(pvUser);
4748 return 0;
4749}
4750
4751
4752/**
4753 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
4754 */
4755typedef struct PGMR3PHYSCHUNKUNMAPCB
4756{
4757 PVM pVM; /**< Pointer to the VM. */
4758 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
4759} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
4760
4761
4762/**
4763 * Callback used to find the mapping that's been unused for
4764 * the longest time.
4765 */
4766static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLU32NODECORE pNode, void *pvUser)
4767{
4768 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
4769 PPGMR3PHYSCHUNKUNMAPCB pArg = (PPGMR3PHYSCHUNKUNMAPCB)pvUser;
4770
4771 /*
4772 * Check for locks and compare when last used.
4773 */
4774 if (pChunk->cRefs)
4775 return 0;
4776 if (pChunk->cPermRefs)
4777 return 0;
4778 if ( pArg->pChunk
4779 && pChunk->iLastUsed >= pArg->pChunk->iLastUsed)
4780 return 0;
4781
4782 /*
4783 * Check that it's not in any of the TLBs.
4784 */
4785 PVM pVM = pArg->pVM;
4786 if ( pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(pChunk->Core.Key)].idChunk
4787 == pChunk->Core.Key)
4788 {
4789 pChunk = NULL;
4790 return 0;
4791 }
4792#ifdef VBOX_STRICT
4793 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
4794 {
4795 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk != pChunk);
4796 Assert(pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk != pChunk->Core.Key);
4797 }
4798#endif
4799
4800 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
4801 if (pVM->pgm.s.PhysTlbR0.aEntries[i].pMap == pChunk)
4802 return 0;
4803 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
4804 if (pVM->pgm.s.PhysTlbR3.aEntries[i].pMap == pChunk)
4805 return 0;
4806
4807 pArg->pChunk = pChunk;
4808 return 0;
4809}
4810
4811
4812/**
4813 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
4814 *
4815 * The candidate will not be part of any TLBs, so no need to flush
4816 * anything afterwards.
4817 *
4818 * @returns Chunk id.
4819 * @param pVM The cross context VM structure.
4820 */
4821static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
4822{
4823 PGM_LOCK_ASSERT_OWNER(pVM);
4824
4825 /*
4826 * Enumerate the age tree starting with the left most node.
4827 */
4828 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
4829 PGMR3PHYSCHUNKUNMAPCB Args;
4830 Args.pVM = pVM;
4831 Args.pChunk = NULL;
4832 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, &Args);
4833 Assert(Args.pChunk);
4834 if (Args.pChunk)
4835 {
4836 Assert(Args.pChunk->cRefs == 0);
4837 Assert(Args.pChunk->cPermRefs == 0);
4838 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
4839 return Args.pChunk->Core.Key;
4840 }
4841
4842 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkFindCandidate, a);
4843 return INT32_MAX;
4844}
4845
4846
4847/**
4848 * Rendezvous callback used by pgmR3PhysUnmapChunk that unmaps a chunk
4849 *
4850 * This is only called on one of the EMTs while the other ones are waiting for
4851 * it to complete this function.
4852 *
4853 * @returns VINF_SUCCESS (VBox strict status code).
4854 * @param pVM The cross context VM structure.
4855 * @param pVCpu The cross context virtual CPU structure of the calling EMT. Unused.
4856 * @param pvUser User pointer. Unused
4857 *
4858 */
4859static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysUnmapChunkRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
4860{
4861 int rc = VINF_SUCCESS;
4862 pgmLock(pVM);
4863 NOREF(pVCpu); NOREF(pvUser);
4864
4865 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
4866 {
4867 /* Flush the pgm pool cache; call the internal rendezvous handler as we're already in a rendezvous handler here. */
4868 /** @todo also not really efficient to unmap a chunk that contains PD
4869 * or PT pages. */
4870 pgmR3PoolClearAllRendezvous(pVM, pVM->apCpusR3[0], NULL /* no need to flush the REM TLB as we already did that above */);
4871
4872 /*
4873 * Request the ring-0 part to unmap a chunk to make space in the mapping cache.
4874 */
4875 GMMMAPUNMAPCHUNKREQ Req;
4876 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
4877 Req.Hdr.cbReq = sizeof(Req);
4878 Req.pvR3 = NULL;
4879 Req.idChunkMap = NIL_GMM_CHUNKID;
4880 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
4881 if (Req.idChunkUnmap != INT32_MAX)
4882 {
4883 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
4884 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
4885 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkUnmap, a);
4886 if (RT_SUCCESS(rc))
4887 {
4888 /*
4889 * Remove the unmapped one.
4890 */
4891 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
4892 AssertRelease(pUnmappedChunk);
4893 AssertRelease(!pUnmappedChunk->cRefs);
4894 AssertRelease(!pUnmappedChunk->cPermRefs);
4895 pUnmappedChunk->pv = NULL;
4896 pUnmappedChunk->Core.Key = UINT32_MAX;
4897#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4898 MMR3HeapFree(pUnmappedChunk);
4899#else
4900 MMR3UkHeapFree(pVM, pUnmappedChunk, MM_TAG_PGM_CHUNK_MAPPING);
4901#endif
4902 pVM->pgm.s.ChunkR3Map.c--;
4903 pVM->pgm.s.cUnmappedChunks++;
4904
4905 /*
4906 * Flush dangling PGM pointers (R3 & R0 ptrs to GC physical addresses).
4907 */
4908 /** @todo We should not flush chunks which include cr3 mappings. */
4909 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
4910 {
4911 PPGMCPU pPGM = &pVM->apCpusR3[idCpu]->pgm.s;
4912
4913 pPGM->pGst32BitPdR3 = NULL;
4914 pPGM->pGstPaePdptR3 = NULL;
4915 pPGM->pGstAmd64Pml4R3 = NULL;
4916#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4917 pPGM->pGst32BitPdR0 = NIL_RTR0PTR;
4918 pPGM->pGstPaePdptR0 = NIL_RTR0PTR;
4919 pPGM->pGstAmd64Pml4R0 = NIL_RTR0PTR;
4920#endif
4921 for (unsigned i = 0; i < RT_ELEMENTS(pPGM->apGstPaePDsR3); i++)
4922 {
4923 pPGM->apGstPaePDsR3[i] = NULL;
4924#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
4925 pPGM->apGstPaePDsR0[i] = NIL_RTR0PTR;
4926#endif
4927 }
4928
4929 /* Flush REM TLBs. */
4930 CPUMSetChangedFlags(pVM->apCpusR3[idCpu], CPUM_CHANGED_GLOBAL_TLB_FLUSH);
4931 }
4932#ifdef VBOX_WITH_REM
4933 /* Flush REM translation blocks. */
4934 REMFlushTBs(pVM);
4935#endif
4936 }
4937 }
4938 }
4939 pgmUnlock(pVM);
4940 return rc;
4941}
4942
4943/**
4944 * Unmap a chunk to free up virtual address space (request packet handler for pgmR3PhysChunkMap)
4945 *
4946 * @returns VBox status code.
4947 * @param pVM The cross context VM structure.
4948 */
4949void pgmR3PhysUnmapChunk(PVM pVM)
4950{
4951 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, pgmR3PhysUnmapChunkRendezvous, NULL);
4952 AssertRC(rc);
4953}
4954
4955
4956/**
4957 * Maps the given chunk into the ring-3 mapping cache.
4958 *
4959 * This will call ring-0.
4960 *
4961 * @returns VBox status code.
4962 * @param pVM The cross context VM structure.
4963 * @param idChunk The chunk in question.
4964 * @param ppChunk Where to store the chunk tracking structure.
4965 *
4966 * @remarks Called from within the PGM critical section.
4967 * @remarks Can be called from any thread!
4968 */
4969int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
4970{
4971 int rc;
4972
4973 PGM_LOCK_ASSERT_OWNER(pVM);
4974
4975 /*
4976 * Move the chunk time forward.
4977 */
4978 pVM->pgm.s.ChunkR3Map.iNow++;
4979 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
4980 {
4981 pVM->pgm.s.ChunkR3Map.iNow = 4;
4982 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, NULL);
4983 }
4984
4985 /*
4986 * Allocate a new tracking structure first.
4987 */
4988#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
4989 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
4990#else
4991 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3UkHeapAllocZ(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk), NULL);
4992#endif
4993 AssertReturn(pChunk, VERR_NO_MEMORY);
4994 pChunk->Core.Key = idChunk;
4995 pChunk->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
4996
4997 /*
4998 * Request the ring-0 part to map the chunk in question.
4999 */
5000 GMMMAPUNMAPCHUNKREQ Req;
5001 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
5002 Req.Hdr.cbReq = sizeof(Req);
5003 Req.pvR3 = NULL;
5004 Req.idChunkMap = idChunk;
5005 Req.idChunkUnmap = NIL_GMM_CHUNKID;
5006
5007 /* Must be callable from any thread, so can't use VMMR3CallR0. */
5008 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
5009 rc = SUPR3CallVMMR0Ex(VMCC_GET_VMR0_FOR_CALL(pVM), NIL_VMCPUID, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
5010 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatChunkMap, a);
5011 if (RT_SUCCESS(rc))
5012 {
5013 pChunk->pv = Req.pvR3;
5014
5015 /*
5016 * If we're running out of virtual address space, then we should
5017 * unmap another chunk.
5018 *
5019 * Currently, an unmap operation requires that all other virtual CPUs
5020 * are idling and not by chance making use of the memory we're
5021 * unmapping. So, we create an async unmap operation here.
5022 *
5023 * Now, when creating or restoring a saved state this wont work very
5024 * well since we may want to restore all guest RAM + a little something.
5025 * So, we have to do the unmap synchronously. Fortunately for us
5026 * though, during these operations the other virtual CPUs are inactive
5027 * and it should be safe to do this.
5028 */
5029 /** @todo Eventually we should lock all memory when used and do
5030 * map+unmap as one kernel call without any rendezvous or
5031 * other precautions. */
5032 if (pVM->pgm.s.ChunkR3Map.c + 1 >= pVM->pgm.s.ChunkR3Map.cMax)
5033 {
5034 switch (VMR3GetState(pVM))
5035 {
5036 case VMSTATE_LOADING:
5037 case VMSTATE_SAVING:
5038 {
5039 PVMCPU pVCpu = VMMGetCpu(pVM);
5040 if ( pVCpu
5041 && pVM->pgm.s.cDeprecatedPageLocks == 0)
5042 {
5043 pgmR3PhysUnmapChunkRendezvous(pVM, pVCpu, NULL);
5044 break;
5045 }
5046 }
5047 RT_FALL_THRU();
5048 default:
5049 rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, (PFNRT)pgmR3PhysUnmapChunk, 1, pVM);
5050 AssertRC(rc);
5051 break;
5052 }
5053 }
5054
5055 /*
5056 * Update the tree. We must do this after any unmapping to make sure
5057 * the chunk we're going to return isn't unmapped by accident.
5058 */
5059 AssertPtr(Req.pvR3);
5060 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
5061 AssertRelease(fRc);
5062 pVM->pgm.s.ChunkR3Map.c++;
5063 pVM->pgm.s.cMappedChunks++;
5064 }
5065 else
5066 {
5067 /** @todo this may fail because of /proc/sys/vm/max_map_count, so we
5068 * should probably restrict ourselves on linux. */
5069 AssertRC(rc);
5070#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
5071 MMR3HeapFree(pChunk);
5072#else
5073 MMR3UkHeapFree(pVM, pChunk, MM_TAG_PGM_CHUNK_MAPPING);
5074#endif
5075 pChunk = NULL;
5076 }
5077
5078 *ppChunk = pChunk;
5079 return rc;
5080}
5081
5082
5083/**
5084 * For VMMCALLRING3_PGM_MAP_CHUNK, considered internal.
5085 *
5086 * @returns see pgmR3PhysChunkMap.
5087 * @param pVM The cross context VM structure.
5088 * @param idChunk The chunk to map.
5089 */
5090VMMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
5091{
5092 PPGMCHUNKR3MAP pChunk;
5093 int rc;
5094
5095 pgmLock(pVM);
5096 rc = pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
5097 pgmUnlock(pVM);
5098 return rc;
5099}
5100
5101
5102/**
5103 * Invalidates the TLB for the ring-3 mapping cache.
5104 *
5105 * @param pVM The cross context VM structure.
5106 */
5107VMMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
5108{
5109 pgmLock(pVM);
5110 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
5111 {
5112 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
5113 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
5114 }
5115 /* The page map TLB references chunks, so invalidate that one too. */
5116 pgmPhysInvalidatePageMapTLB(pVM);
5117 pgmUnlock(pVM);
5118}
5119
5120
5121/**
5122 * Response to VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE to allocate a large
5123 * (2MB) page for use with a nested paging PDE.
5124 *
5125 * @returns The following VBox status codes.
5126 * @retval VINF_SUCCESS on success.
5127 * @retval VINF_EM_NO_MEMORY if we're out of memory.
5128 *
5129 * @param pVM The cross context VM structure.
5130 * @param GCPhys GC physical start address of the 2 MB range
5131 */
5132VMMR3DECL(int) PGMR3PhysAllocateLargeHandyPage(PVM pVM, RTGCPHYS GCPhys)
5133{
5134#ifdef PGM_WITH_LARGE_PAGES
5135 uint64_t u64TimeStamp1, u64TimeStamp2;
5136
5137 pgmLock(pVM);
5138
5139 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
5140 u64TimeStamp1 = RTTimeMilliTS();
5141 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE, 0, NULL);
5142 u64TimeStamp2 = RTTimeMilliTS();
5143 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatAllocLargePage, a);
5144 if (RT_SUCCESS(rc))
5145 {
5146 Assert(pVM->pgm.s.cLargeHandyPages == 1);
5147
5148 uint32_t idPage = pVM->pgm.s.aLargeHandyPage[0].idPage;
5149 RTHCPHYS HCPhys = pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys;
5150
5151 void *pv;
5152
5153 /* Map the large page into our address space.
5154 *
5155 * Note: assuming that within the 2 MB range:
5156 * - GCPhys + PAGE_SIZE = HCPhys + PAGE_SIZE (whole point of this exercise)
5157 * - user space mapping is continuous as well
5158 * - page id (GCPhys) + 1 = page id (GCPhys + PAGE_SIZE)
5159 */
5160 rc = pgmPhysPageMapByPageID(pVM, idPage, HCPhys, &pv);
5161 AssertLogRelMsg(RT_SUCCESS(rc), ("idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n", idPage, HCPhys, rc));
5162
5163 if (RT_SUCCESS(rc))
5164 {
5165 /*
5166 * Clear the pages.
5167 */
5168 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
5169 for (unsigned i = 0; i < _2M/PAGE_SIZE; i++)
5170 {
5171 ASMMemZeroPage(pv);
5172
5173 PPGMPAGE pPage;
5174 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
5175 AssertRC(rc);
5176
5177 Assert(PGM_PAGE_IS_ZERO(pPage));
5178 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
5179 pVM->pgm.s.cZeroPages--;
5180
5181 /*
5182 * Do the PGMPAGE modifications.
5183 */
5184 pVM->pgm.s.cPrivatePages++;
5185 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
5186 PGM_PAGE_SET_PAGEID(pVM, pPage, idPage);
5187 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
5188 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PDE);
5189 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
5190 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
5191
5192 /* Somewhat dirty assumption that page ids are increasing. */
5193 idPage++;
5194
5195 HCPhys += PAGE_SIZE;
5196 GCPhys += PAGE_SIZE;
5197
5198 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
5199
5200 Log3(("PGMR3PhysAllocateLargePage: idPage=%#x HCPhys=%RGp\n", idPage, HCPhys));
5201 }
5202 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->StatClearLargePage, b);
5203
5204 /* Flush all TLBs. */
5205 PGM_INVL_ALL_VCPU_TLBS(pVM);
5206 pgmPhysInvalidatePageMapTLB(pVM);
5207 }
5208 pVM->pgm.s.cLargeHandyPages = 0;
5209 }
5210
5211 if (RT_SUCCESS(rc))
5212 {
5213 static uint32_t cTimeOut = 0;
5214 uint64_t u64TimeStampDelta = u64TimeStamp2 - u64TimeStamp1;
5215
5216 if (u64TimeStampDelta > 100)
5217 {
5218 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatLargePageOverflow);
5219 if ( ++cTimeOut > 10
5220 || u64TimeStampDelta > 1000 /* more than one second forces an early retirement from allocating large pages. */)
5221 {
5222 /* If repeated attempts to allocate a large page takes more than 100 ms, then we fall back to normal 4k pages.
5223 * E.g. Vista 64 tries to move memory around, which takes a huge amount of time.
5224 */
5225 LogRel(("PGMR3PhysAllocateLargePage: allocating large pages takes too long (last attempt %d ms; nr of timeouts %d); DISABLE\n", u64TimeStampDelta, cTimeOut));
5226 PGMSetLargePageUsage(pVM, false);
5227 }
5228 }
5229 else
5230 if (cTimeOut > 0)
5231 cTimeOut--;
5232 }
5233
5234 pgmUnlock(pVM);
5235 return rc;
5236#else
5237 RT_NOREF(pVM, GCPhys);
5238 return VERR_NOT_IMPLEMENTED;
5239#endif /* PGM_WITH_LARGE_PAGES */
5240}
5241
5242
5243/**
5244 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES.
5245 *
5246 * This function will also work the VM_FF_PGM_NO_MEMORY force action flag, to
5247 * signal and clear the out of memory condition. When contracted, this API is
5248 * used to try clear the condition when the user wants to resume.
5249 *
5250 * @returns The following VBox status codes.
5251 * @retval VINF_SUCCESS on success. FFs cleared.
5252 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in
5253 * this case and it gets accompanied by VM_FF_PGM_NO_MEMORY.
5254 *
5255 * @param pVM The cross context VM structure.
5256 *
5257 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing
5258 * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and
5259 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF
5260 * handler.
5261 */
5262VMMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
5263{
5264 pgmLock(pVM);
5265
5266 /*
5267 * Allocate more pages, noting down the index of the first new page.
5268 */
5269 uint32_t iClear = pVM->pgm.s.cHandyPages;
5270 AssertMsgReturn(iClear <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d", iClear), VERR_PGM_HANDY_PAGE_IPE);
5271 Log(("PGMR3PhysAllocateHandyPages: %d -> %d\n", iClear, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
5272 int rcAlloc = VINF_SUCCESS;
5273 int rcSeed = VINF_SUCCESS;
5274 int rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
5275 while (rc == VERR_GMM_SEED_ME)
5276 {
5277 void *pvChunk;
5278 rcAlloc = rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
5279 if (RT_SUCCESS(rc))
5280 {
5281 rcSeed = rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
5282 if (RT_FAILURE(rc))
5283 SUPR3PageFree(pvChunk, GMM_CHUNK_SIZE >> PAGE_SHIFT);
5284 }
5285 if (RT_SUCCESS(rc))
5286 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
5287 }
5288
5289 /** @todo we should split this up into an allocate and flush operation. sometimes you want to flush and not allocate more (which will trigger the vm account limit error) */
5290 if ( rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT
5291 && pVM->pgm.s.cHandyPages > 0)
5292 {
5293 /* Still handy pages left, so don't panic. */
5294 rc = VINF_SUCCESS;
5295 }
5296
5297 if (RT_SUCCESS(rc))
5298 {
5299 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
5300 Assert(pVM->pgm.s.cHandyPages > 0);
5301 VM_FF_CLEAR(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
5302 VM_FF_CLEAR(pVM, VM_FF_PGM_NO_MEMORY);
5303
5304#ifdef VBOX_STRICT
5305 uint32_t i;
5306 for (i = iClear; i < pVM->pgm.s.cHandyPages; i++)
5307 if ( pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID
5308 || pVM->pgm.s.aHandyPages[i].idSharedPage != NIL_GMM_PAGEID
5309 || (pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & PAGE_OFFSET_MASK))
5310 break;
5311 if (i != pVM->pgm.s.cHandyPages)
5312 {
5313 RTAssertMsg1Weak(NULL, __LINE__, __FILE__, __FUNCTION__);
5314 RTAssertMsg2Weak("i=%d iClear=%d cHandyPages=%d\n", i, iClear, pVM->pgm.s.cHandyPages);
5315 for (uint32_t j = iClear; j < pVM->pgm.s.cHandyPages; j++)
5316 RTAssertMsg2Add("%03d: idPage=%d HCPhysGCPhys=%RHp idSharedPage=%d%\n", j,
5317 pVM->pgm.s.aHandyPages[j].idPage,
5318 pVM->pgm.s.aHandyPages[j].HCPhysGCPhys,
5319 pVM->pgm.s.aHandyPages[j].idSharedPage,
5320 j == i ? " <---" : "");
5321 RTAssertPanic();
5322 }
5323#endif
5324 /*
5325 * Clear the pages.
5326 */
5327 while (iClear < pVM->pgm.s.cHandyPages)
5328 {
5329 PGMMPAGEDESC pPage = &pVM->pgm.s.aHandyPages[iClear];
5330 void *pv;
5331 rc = pgmPhysPageMapByPageID(pVM, pPage->idPage, pPage->HCPhysGCPhys, &pv);
5332 AssertLogRelMsgBreak(RT_SUCCESS(rc),
5333 ("%u/%u: idPage=%#x HCPhysGCPhys=%RHp rc=%Rrc\n",
5334 iClear, pVM->pgm.s.cHandyPages, pPage->idPage, pPage->HCPhysGCPhys, rc));
5335 ASMMemZeroPage(pv);
5336 iClear++;
5337 Log3(("PGMR3PhysAllocateHandyPages: idPage=%#x HCPhys=%RGp\n", pPage->idPage, pPage->HCPhysGCPhys));
5338 }
5339 }
5340 else
5341 {
5342 uint64_t cAllocPages, cMaxPages, cBalloonPages;
5343
5344 /*
5345 * We should never get here unless there is a genuine shortage of
5346 * memory (or some internal error). Flag the error so the VM can be
5347 * suspended ASAP and the user informed. If we're totally out of
5348 * handy pages we will return failure.
5349 */
5350 /* Report the failure. */
5351 LogRel(("PGM: Failed to procure handy pages; rc=%Rrc rcAlloc=%Rrc rcSeed=%Rrc cHandyPages=%#x\n"
5352 " cAllPages=%#x cPrivatePages=%#x cSharedPages=%#x cZeroPages=%#x\n",
5353 rc, rcAlloc, rcSeed,
5354 pVM->pgm.s.cHandyPages,
5355 pVM->pgm.s.cAllPages,
5356 pVM->pgm.s.cPrivatePages,
5357 pVM->pgm.s.cSharedPages,
5358 pVM->pgm.s.cZeroPages));
5359
5360 if (GMMR3QueryMemoryStats(pVM, &cAllocPages, &cMaxPages, &cBalloonPages) == VINF_SUCCESS)
5361 {
5362 LogRel(("GMM: Statistics:\n"
5363 " Allocated pages: %RX64\n"
5364 " Maximum pages: %RX64\n"
5365 " Ballooned pages: %RX64\n", cAllocPages, cMaxPages, cBalloonPages));
5366 }
5367
5368 if ( rc != VERR_NO_MEMORY
5369 && rc != VERR_NO_PHYS_MEMORY
5370 && rc != VERR_LOCK_FAILED)
5371 {
5372 for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
5373 {
5374 LogRel(("PGM: aHandyPages[#%#04x] = {.HCPhysGCPhys=%RHp, .idPage=%#08x, .idSharedPage=%#08x}\n",
5375 i, pVM->pgm.s.aHandyPages[i].HCPhysGCPhys, pVM->pgm.s.aHandyPages[i].idPage,
5376 pVM->pgm.s.aHandyPages[i].idSharedPage));
5377 uint32_t const idPage = pVM->pgm.s.aHandyPages[i].idPage;
5378 if (idPage != NIL_GMM_PAGEID)
5379 {
5380 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3;
5381 pRam;
5382 pRam = pRam->pNextR3)
5383 {
5384 uint32_t const cPages = pRam->cb >> PAGE_SHIFT;
5385 for (uint32_t iPage = 0; iPage < cPages; iPage++)
5386 if (PGM_PAGE_GET_PAGEID(&pRam->aPages[iPage]) == idPage)
5387 LogRel(("PGM: Used by %RGp %R[pgmpage] (%s)\n",
5388 pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pRam->aPages[iPage], pRam->pszDesc));
5389 }
5390 }
5391 }
5392 }
5393
5394 if (rc == VERR_NO_MEMORY)
5395 {
5396 uint64_t cbHostRamAvail = 0;
5397 int rc2 = RTSystemQueryAvailableRam(&cbHostRamAvail);
5398 if (RT_SUCCESS(rc2))
5399 LogRel(("Host RAM: %RU64MB available\n", cbHostRamAvail / _1M));
5400 else
5401 LogRel(("Cannot determine the amount of available host memory\n"));
5402 }
5403
5404 /* Set the FFs and adjust rc. */
5405 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
5406 VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
5407 if ( rc == VERR_NO_MEMORY
5408 || rc == VERR_NO_PHYS_MEMORY
5409 || rc == VERR_LOCK_FAILED)
5410 rc = VINF_EM_NO_MEMORY;
5411 }
5412
5413 pgmUnlock(pVM);
5414 return rc;
5415}
5416
5417
5418/**
5419 * Frees the specified RAM page and replaces it with the ZERO page.
5420 *
5421 * This is used by ballooning, remapping MMIO2, RAM reset and state loading.
5422 *
5423 * @param pVM The cross context VM structure.
5424 * @param pReq Pointer to the request.
5425 * @param pcPendingPages Where the number of pages waiting to be freed are
5426 * kept. This will normally be incremented.
5427 * @param pPage Pointer to the page structure.
5428 * @param GCPhys The guest physical address of the page, if applicable.
5429 * @param enmNewType New page type for NEM notification, since several
5430 * callers will change the type upon successful return.
5431 *
5432 * @remarks The caller must own the PGM lock.
5433 */
5434int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys,
5435 PGMPAGETYPE enmNewType)
5436{
5437 /*
5438 * Assert sanity.
5439 */
5440 PGM_LOCK_ASSERT_OWNER(pVM);
5441 if (RT_UNLIKELY( PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
5442 && PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_ROM_SHADOW))
5443 {
5444 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
5445 return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
5446 }
5447
5448 /** @todo What about ballooning of large pages??! */
5449 Assert( PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
5450 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
5451
5452 if ( PGM_PAGE_IS_ZERO(pPage)
5453 || PGM_PAGE_IS_BALLOONED(pPage))
5454 return VINF_SUCCESS;
5455
5456 const uint32_t idPage = PGM_PAGE_GET_PAGEID(pPage);
5457 Log3(("pgmPhysFreePage: idPage=%#x GCPhys=%RGp pPage=%R[pgmpage]\n", idPage, GCPhys, pPage));
5458 if (RT_UNLIKELY( idPage == NIL_GMM_PAGEID
5459 || idPage > GMM_PAGEID_LAST
5460 || PGM_PAGE_GET_CHUNKID(pPage) == NIL_GMM_CHUNKID))
5461 {
5462 AssertMsgFailed(("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, pPage));
5463 return VMSetError(pVM, VERR_PGM_PHYS_INVALID_PAGE_ID, RT_SRC_POS, "GCPhys=%RGp idPage=%#x", GCPhys, pPage);
5464 }
5465 const RTHCPHYS HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
5466
5467 /* update page count stats. */
5468 if (PGM_PAGE_IS_SHARED(pPage))
5469 pVM->pgm.s.cSharedPages--;
5470 else
5471 pVM->pgm.s.cPrivatePages--;
5472 pVM->pgm.s.cZeroPages++;
5473
5474 /* Deal with write monitored pages. */
5475 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
5476 {
5477 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
5478 pVM->pgm.s.cWrittenToPages++;
5479 }
5480
5481 /*
5482 * pPage = ZERO page.
5483 */
5484 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
5485 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
5486 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
5487 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_DONTCARE);
5488 PGM_PAGE_SET_PTE_INDEX(pVM, pPage, 0);
5489 PGM_PAGE_SET_TRACKING(pVM, pPage, 0);
5490
5491 /* Flush physical page map TLB entry. */
5492 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
5493
5494 /* Notify NEM. */
5495 /** @todo consider doing batch NEM notifications. */
5496 if (VM_IS_NEM_ENABLED(pVM))
5497 {
5498 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
5499 NEMHCNotifyPhysPageChanged(pVM, GCPhys, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
5500 pgmPhysPageCalcNemProtection(pPage, enmNewType), enmNewType, &u2State);
5501 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
5502 }
5503
5504 /*
5505 * Make sure it's not in the handy page array.
5506 */
5507 for (uint32_t i = pVM->pgm.s.cHandyPages; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
5508 {
5509 if (pVM->pgm.s.aHandyPages[i].idPage == idPage)
5510 {
5511 pVM->pgm.s.aHandyPages[i].idPage = NIL_GMM_PAGEID;
5512 break;
5513 }
5514 if (pVM->pgm.s.aHandyPages[i].idSharedPage == idPage)
5515 {
5516 pVM->pgm.s.aHandyPages[i].idSharedPage = NIL_GMM_PAGEID;
5517 break;
5518 }
5519 }
5520
5521 /*
5522 * Push it onto the page array.
5523 */
5524 uint32_t iPage = *pcPendingPages;
5525 Assert(iPage < PGMPHYS_FREE_PAGE_BATCH_SIZE);
5526 *pcPendingPages += 1;
5527
5528 pReq->aPages[iPage].idPage = idPage;
5529
5530 if (iPage + 1 < PGMPHYS_FREE_PAGE_BATCH_SIZE)
5531 return VINF_SUCCESS;
5532
5533 /*
5534 * Flush the pages.
5535 */
5536 int rc = GMMR3FreePagesPerform(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE);
5537 if (RT_SUCCESS(rc))
5538 {
5539 GMMR3FreePagesRePrep(pVM, pReq, PGMPHYS_FREE_PAGE_BATCH_SIZE, GMMACCOUNT_BASE);
5540 *pcPendingPages = 0;
5541 }
5542 return rc;
5543}
5544
5545
5546/**
5547 * Converts a GC physical address to a HC ring-3 pointer, with some
5548 * additional checks.
5549 *
5550 * @returns VBox status code.
5551 * @retval VINF_SUCCESS on success.
5552 * @retval VINF_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
5553 * access handler of some kind.
5554 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
5555 * accesses or is odd in any way.
5556 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
5557 *
5558 * @param pVM The cross context VM structure.
5559 * @param GCPhys The GC physical address to convert. Since this is only
5560 * used for filling the REM TLB, the A20 mask must be
5561 * applied before calling this API.
5562 * @param fWritable Whether write access is required.
5563 * @param ppv Where to store the pointer corresponding to GCPhys on
5564 * success.
5565 */
5566VMMR3DECL(int) PGMR3PhysTlbGCPhys2Ptr(PVM pVM, RTGCPHYS GCPhys, bool fWritable, void **ppv)
5567{
5568 pgmLock(pVM);
5569 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
5570
5571 PPGMRAMRANGE pRam;
5572 PPGMPAGE pPage;
5573 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
5574 if (RT_SUCCESS(rc))
5575 {
5576 if (PGM_PAGE_IS_BALLOONED(pPage))
5577 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
5578 else if (!PGM_PAGE_HAS_ANY_HANDLERS(pPage))
5579 rc = VINF_SUCCESS;
5580 else
5581 {
5582 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
5583 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5584 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
5585 {
5586 /** @todo Handle TLB loads of virtual handlers so ./test.sh can be made to work
5587 * in -norawr0 mode. */
5588 if (fWritable)
5589 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
5590 }
5591 else
5592 {
5593 /* Temporarily disabled physical handler(s), since the recompiler
5594 doesn't get notified when it's reset we'll have to pretend it's
5595 operating normally. */
5596 if (pgmHandlerPhysicalIsAll(pVM, GCPhys))
5597 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5598 else
5599 rc = VINF_PGM_PHYS_TLB_CATCH_WRITE;
5600 }
5601 }
5602 if (RT_SUCCESS(rc))
5603 {
5604 int rc2;
5605
5606 /* Make sure what we return is writable. */
5607 if (fWritable)
5608 switch (PGM_PAGE_GET_STATE(pPage))
5609 {
5610 case PGM_PAGE_STATE_ALLOCATED:
5611 break;
5612 case PGM_PAGE_STATE_BALLOONED:
5613 AssertFailed();
5614 break;
5615 case PGM_PAGE_STATE_ZERO:
5616 case PGM_PAGE_STATE_SHARED:
5617 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
5618 break;
5619 RT_FALL_THRU();
5620 case PGM_PAGE_STATE_WRITE_MONITORED:
5621 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
5622 AssertLogRelRCReturn(rc2, rc2);
5623 break;
5624 }
5625
5626 /* Get a ring-3 mapping of the address. */
5627 PPGMPAGER3MAPTLBE pTlbe;
5628 rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
5629 AssertLogRelRCReturn(rc2, rc2);
5630 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
5631 /** @todo mapping/locking hell; this isn't horribly efficient since
5632 * pgmPhysPageLoadIntoTlb will repeat the lookup we've done here. */
5633
5634 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
5635 }
5636 else
5637 Log6(("PGMR3PhysTlbGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
5638
5639 /* else: handler catching all access, no pointer returned. */
5640 }
5641 else
5642 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
5643
5644 pgmUnlock(pVM);
5645 return rc;
5646}
5647
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette