VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

最後變更 在這個檔案是 108132,由 vboxsync 提交於 5 週 前

VMM/PGM: Merge and deduplicate code targeting x86 & amd64 in PGM.cpp. Don't bother compiling pool stuff on arm and darwin.amd64. jiraref:VBP-1531

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 212.0 KB
 
1/* $Id: PGMAllPhys.cpp 108132 2025-02-10 11:05:23Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM_PHYS
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/trpm.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/iom.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/nem.h>
44#include "PGMInternal.h"
45#include <VBox/vmm/vmcc.h>
46#include "PGMInline.h"
47#include <VBox/param.h>
48#include <VBox/err.h>
49#include <iprt/assert.h>
50#include <iprt/string.h>
51#include <VBox/log.h>
52#ifdef IN_RING3
53# include <iprt/thread.h>
54#elif defined(IN_RING0)
55# include <iprt/mem.h>
56# include <iprt/memobj.h>
57#endif
58
59
60/*********************************************************************************************************************************
61* Defined Constants And Macros *
62*********************************************************************************************************************************/
63/** Enable the physical TLB. */
64#define PGM_WITH_PHYS_TLB
65
66/** @def PGM_HANDLER_PHYS_IS_VALID_STATUS
67 * Checks if valid physical access handler return code (normal handler, not PF).
68 *
69 * Checks if the given strict status code is one of the expected ones for a
70 * physical access handler in the current context.
71 *
72 * @returns true or false.
73 * @param a_rcStrict The status code.
74 * @param a_fWrite Whether it is a write or read being serviced.
75 *
76 * @remarks We wish to keep the list of statuses here as short as possible.
77 * When changing, please make sure to update the PGMPhysRead,
78 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
79 */
80#ifdef IN_RING3
81# define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
82 ( (a_rcStrict) == VINF_SUCCESS \
83 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
84#elif defined(IN_RING0)
85#define PGM_HANDLER_PHYS_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
86 ( (a_rcStrict) == VINF_SUCCESS \
87 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT \
88 \
89 || (a_rcStrict) == ((a_fWrite) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ) \
90 || (a_rcStrict) == VINF_IOM_R3_MMIO_READ_WRITE \
91 || ((a_rcStrict) == VINF_IOM_R3_MMIO_COMMIT_WRITE && (a_fWrite)) \
92 \
93 || (a_rcStrict) == VINF_EM_RAW_EMULATE_INSTR \
94 || (a_rcStrict) == VINF_EM_DBG_STOP \
95 || (a_rcStrict) == VINF_EM_DBG_EVENT \
96 || (a_rcStrict) == VINF_EM_DBG_BREAKPOINT \
97 || (a_rcStrict) == VINF_EM_OFF \
98 || (a_rcStrict) == VINF_EM_SUSPEND \
99 || (a_rcStrict) == VINF_EM_RESET \
100 )
101#else
102# error "Context?"
103#endif
104
105/** @def PGM_HANDLER_VIRT_IS_VALID_STATUS
106 * Checks if valid virtual access handler return code (normal handler, not PF).
107 *
108 * Checks if the given strict status code is one of the expected ones for a
109 * virtual access handler in the current context.
110 *
111 * @returns true or false.
112 * @param a_rcStrict The status code.
113 * @param a_fWrite Whether it is a write or read being serviced.
114 *
115 * @remarks We wish to keep the list of statuses here as short as possible.
116 * When changing, please make sure to update the PGMPhysRead,
117 * PGMPhysWrite, PGMPhysReadGCPtr and PGMPhysWriteGCPtr docs too.
118 */
119#ifdef IN_RING3
120# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
121 ( (a_rcStrict) == VINF_SUCCESS \
122 || (a_rcStrict) == VINF_PGM_HANDLER_DO_DEFAULT)
123#elif defined(IN_RING0)
124# define PGM_HANDLER_VIRT_IS_VALID_STATUS(a_rcStrict, a_fWrite) \
125 (false /* no virtual handlers in ring-0! */ )
126#else
127# error "Context?"
128#endif
129
130
131
132/**
133 * Calculate the actual table size.
134 *
135 * The memory is layed out like this:
136 * - PGMPHYSHANDLERTREE (8 bytes)
137 * - Allocation bitmap (8-byte size align)
138 * - Slab of PGMPHYSHANDLER. Start is 64 byte aligned.
139 */
140uint32_t pgmHandlerPhysicalCalcTableSizes(uint32_t *pcEntries, uint32_t *pcbTreeAndBitmap)
141{
142 /*
143 * A minimum of 64 entries and a maximum of ~64K.
144 */
145 uint32_t cEntries = *pcEntries;
146 if (cEntries <= 64)
147 cEntries = 64;
148 else if (cEntries >= _64K)
149 cEntries = _64K;
150 else
151 cEntries = RT_ALIGN_32(cEntries, 16);
152
153 /*
154 * Do the initial calculation.
155 */
156 uint32_t cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
157 uint32_t cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
158 uint32_t cbTable = cEntries * sizeof(PGMPHYSHANDLER);
159 uint32_t cbTotal = cbTreeAndBitmap + cbTable;
160
161 /*
162 * Align the total and try use up extra space from that.
163 */
164 uint32_t cbTotalAligned = RT_ALIGN_32(cbTotal, RT_MAX(HOST_PAGE_SIZE, _16K));
165 uint32_t cAvail = cbTotalAligned - cbTotal;
166 cAvail /= sizeof(PGMPHYSHANDLER);
167 if (cAvail >= 1)
168 for (;;)
169 {
170 cbBitmap = RT_ALIGN_32(cEntries, 64) / 8;
171 cbTreeAndBitmap = RT_ALIGN_32(sizeof(PGMPHYSHANDLERTREE) + cbBitmap, 64);
172 cbTable = cEntries * sizeof(PGMPHYSHANDLER);
173 cbTotal = cbTreeAndBitmap + cbTable;
174 if (cbTotal <= cbTotalAligned)
175 break;
176 cEntries--;
177 Assert(cEntries >= 16);
178 }
179
180 /*
181 * Return the result.
182 */
183 *pcbTreeAndBitmap = cbTreeAndBitmap;
184 *pcEntries = cEntries;
185 return cbTotalAligned;
186}
187
188
189
190/*********************************************************************************************************************************
191* Access Handlers for ROM and MMIO2 *
192*********************************************************************************************************************************/
193
194#ifndef IN_RING3
195
196/**
197 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
198 * \#PF access handler callback for guest ROM range write access.}
199 *
200 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
201 */
202DECLCALLBACK(VBOXSTRICTRC) pgmPhysRomWritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
203 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
204
205{
206 AssertReturn(uUser < RT_ELEMENTS(pVM->pgmr0.s.apRomRanges), VINF_EM_RAW_EMULATE_INSTR);
207 PPGMROMRANGE const pRom = pVM->pgmr0.s.apRomRanges[uUser];
208 AssertReturn(pRom, VINF_EM_RAW_EMULATE_INSTR);
209
210 uint32_t const iPage = (GCPhysFault - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
211 AssertReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), VERR_INTERNAL_ERROR_3);
212#ifdef IN_RING0
213 AssertReturn(iPage < pVM->pgmr0.s.acRomRangePages[uUser], VERR_INTERNAL_ERROR_2);
214#endif
215
216 RT_NOREF(uErrorCode, pvFault);
217 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
218
219 int rc;
220 switch (pRom->aPages[iPage].enmProt)
221 {
222 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
223 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
224 {
225 /*
226 * If it's a simple instruction which doesn't change the cpu state
227 * we will simply skip it. Otherwise we'll have to defer it to REM.
228 */
229 uint32_t cbOp;
230 PDISSTATE pDis = &pVCpu->pgm.s.Dis;
231 rc = EMInterpretDisasCurrent(pVCpu, pDis, &cbOp);
232 if ( RT_SUCCESS(rc)
233 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
234 && !(pDis->x86.fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
235 {
236 switch (pDis->x86.bOpCode)
237 {
238 /** @todo Find other instructions we can safely skip, possibly
239 * adding this kind of detection to DIS or EM. */
240 case OP_MOV:
241 pCtx->rip += cbOp;
242 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteHandled);
243 return VINF_SUCCESS;
244 }
245 }
246 break;
247 }
248
249 case PGMROMPROT_READ_RAM_WRITE_RAM:
250 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
251 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
252 AssertRC(rc);
253 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
254
255 case PGMROMPROT_READ_ROM_WRITE_RAM:
256 /* Handle it in ring-3 because it's *way* easier there. */
257 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
258 break;
259
260 default:
261 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
262 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
263 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
264 }
265
266 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZGuestROMWriteUnhandled);
267 return VINF_EM_RAW_EMULATE_INSTR;
268}
269
270#endif /* !IN_RING3 */
271
272
273/**
274 * @callback_method_impl{FNPGMPHYSHANDLER,
275 * Access handler callback for ROM write accesses.}
276 *
277 * @remarks The @a uUser argument is the PGMROMRANGE::GCPhys value.
278 */
279DECLCALLBACK(VBOXSTRICTRC)
280pgmPhysRomWriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
281 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
282{
283 AssertReturn(uUser < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges), VERR_INTERNAL_ERROR_3);
284 PPGMROMRANGE const pRom = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges[uUser];
285 AssertReturn(pRom, VERR_INTERNAL_ERROR_3);
286
287 uint32_t const iPage = (GCPhys - pRom->GCPhys) >> GUEST_PAGE_SHIFT;
288 AssertReturn(iPage < (pRom->cb >> GUEST_PAGE_SHIFT), VERR_INTERNAL_ERROR_2);
289#ifdef IN_RING0
290 AssertReturn(iPage < pVM->pgmr0.s.acRomRangePages[uUser], VERR_INTERNAL_ERROR_2);
291#endif
292 PPGMROMPAGE const pRomPage = &pRom->aPages[iPage];
293
294 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
295 RT_NOREF(pVCpu, pvPhys, enmOrigin);
296
297 if (enmAccessType == PGMACCESSTYPE_READ)
298 {
299 switch (pRomPage->enmProt)
300 {
301 /*
302 * Take the default action.
303 */
304 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
305 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
306 case PGMROMPROT_READ_ROM_WRITE_RAM:
307 case PGMROMPROT_READ_RAM_WRITE_RAM:
308 return VINF_PGM_HANDLER_DO_DEFAULT;
309
310 default:
311 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
312 pRom->aPages[iPage].enmProt, iPage, GCPhys),
313 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
314 }
315 }
316 else
317 {
318 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
319 switch (pRomPage->enmProt)
320 {
321 /*
322 * Ignore writes.
323 */
324 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
325 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
326 return VINF_SUCCESS;
327
328 /*
329 * Write to the RAM page.
330 */
331 case PGMROMPROT_READ_ROM_WRITE_RAM:
332 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
333 {
334 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
335 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> GUEST_PAGE_SHIFT) == iPage);
336
337 /*
338 * Take the lock, do lazy allocation, map the page and copy the data.
339 *
340 * Note that we have to bypass the mapping TLB since it works on
341 * guest physical addresses and entering the shadow page would
342 * kind of screw things up...
343 */
344 PGM_LOCK_VOID(pVM);
345
346 PPGMPAGE pShadowPage = &pRomPage->Shadow;
347 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
348 {
349 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
350 AssertLogRelMsgReturnStmt(pShadowPage, ("%RGp\n", GCPhys), PGM_UNLOCK(pVM), VERR_PGM_PHYS_PAGE_GET_IPE);
351 }
352
353 void *pvDstPage;
354 int rc;
355#if defined(VBOX_WITH_PGM_NEM_MODE) && defined(IN_RING3)
356 if (PGM_IS_IN_NEM_MODE(pVM) && PGMROMPROT_IS_ROM(pRomPage->enmProt))
357 {
358 pvDstPage = &pRom->pbR3Alternate[GCPhys - pRom->GCPhys];
359 rc = VINF_SUCCESS;
360 }
361 else
362#endif
363 {
364 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK, &pvDstPage);
365 if (RT_SUCCESS(rc))
366 pvDstPage = (uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK);
367 }
368 if (RT_SUCCESS(rc))
369 {
370 memcpy((uint8_t *)pvDstPage + (GCPhys & GUEST_PAGE_OFFSET_MASK), pvBuf, cbBuf);
371 pRomPage->LiveSave.fWrittenTo = true;
372
373 AssertMsg( rc == VINF_SUCCESS
374 || ( rc == VINF_PGM_SYNC_CR3
375 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
376 , ("%Rrc\n", rc));
377 rc = VINF_SUCCESS;
378 }
379
380 PGM_UNLOCK(pVM);
381 return rc;
382 }
383
384 default:
385 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
386 pRom->aPages[iPage].enmProt, iPage, GCPhys),
387 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
388 }
389 }
390}
391
392
393/**
394 * Common worker for pgmPhysMmio2WriteHandler and pgmPhysMmio2WritePfHandler.
395 */
396static VBOXSTRICTRC pgmPhysMmio2WriteHandlerCommon(PVMCC pVM, PVMCPUCC pVCpu, uint64_t hMmio2, RTGCPHYS GCPhys, RTGCPTR GCPtr)
397{
398 /*
399 * Get the MMIO2 range.
400 */
401 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), VERR_INTERNAL_ERROR_3);
402 AssertReturn(hMmio2 != 0, VERR_INTERNAL_ERROR_3);
403 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[hMmio2 - 1];
404 Assert(pMmio2->idMmio2 == hMmio2);
405 AssertReturn((pMmio2->fFlags & PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES) == PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES,
406 VERR_INTERNAL_ERROR_4);
407
408 /*
409 * Get the page and make sure it's an MMIO2 page.
410 */
411 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
412 AssertReturn(pPage, VINF_EM_RAW_EMULATE_INSTR);
413 AssertReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2, VINF_EM_RAW_EMULATE_INSTR);
414
415 /*
416 * Set the dirty flag so we can avoid scanning all the pages when it isn't dirty.
417 * (The PGM_PAGE_HNDL_PHYS_STATE_DISABLED handler state indicates that a single
418 * page is dirty, saving the need for additional storage (bitmap).)
419 */
420 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_IS_DIRTY;
421
422 /*
423 * Disable the handler for this page.
424 */
425 int rc = PGMHandlerPhysicalPageTempOff(pVM, pMmio2->GCPhys, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
426 AssertRC(rc);
427#ifndef IN_RING3
428 if (RT_SUCCESS(rc) && GCPtr != ~(RTGCPTR)0)
429 {
430 rc = PGMShwMakePageWritable(pVCpu, GCPtr, PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT);
431 AssertMsgReturn(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT,
432 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", GCPtr, rc), rc);
433 }
434#else
435 RT_NOREF(pVCpu, GCPtr);
436#endif
437 return VINF_SUCCESS;
438}
439
440
441#ifndef IN_RING3
442/**
443 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
444 * \#PF access handler callback for guest MMIO2 dirty page tracing.}
445 *
446 * @remarks The @a uUser is the MMIO2 index.
447 */
448DECLCALLBACK(VBOXSTRICTRC) pgmPhysMmio2WritePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
449 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
450{
451 RT_NOREF(pVCpu, uErrorCode, pCtx);
452 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
453 if (RT_SUCCESS(rcStrict))
454 {
455 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhysFault, pvFault);
456 PGM_UNLOCK(pVM);
457 }
458 return rcStrict;
459}
460#endif /* !IN_RING3 */
461
462
463/**
464 * @callback_method_impl{FNPGMPHYSHANDLER,
465 * Access handler callback for MMIO2 dirty page tracing.}
466 *
467 * @remarks The @a uUser is the MMIO2 index.
468 */
469DECLCALLBACK(VBOXSTRICTRC)
470pgmPhysMmio2WriteHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
471 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
472{
473 VBOXSTRICTRC rcStrict = PGM_LOCK(pVM); /* We should already have it, but just make sure we do. */
474 if (RT_SUCCESS(rcStrict))
475 {
476 rcStrict = pgmPhysMmio2WriteHandlerCommon(pVM, pVCpu, uUser, GCPhys, ~(RTGCPTR)0);
477 PGM_UNLOCK(pVM);
478 if (rcStrict == VINF_SUCCESS)
479 rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
480 }
481 RT_NOREF(pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin);
482 return rcStrict;
483}
484
485
486
487/*********************************************************************************************************************************
488* RAM Ranges *
489*********************************************************************************************************************************/
490
491#ifdef VBOX_STRICT
492/**
493 * Asserts that the RAM range structures are sane.
494 */
495DECLHIDDEN(bool) pgmPhysAssertRamRangesLocked(PVMCC pVM, bool fInUpdate, bool fRamRelaxed)
496{
497 bool fRet = true;
498
499 /*
500 * Check the generation ID. This is stable since we own the PGM lock.
501 */
502 AssertStmt((pVM->pgm.s.RamRangeUnion.idGeneration & 1U) == (unsigned)fInUpdate, fRet = false);
503
504 /*
505 * Check the entry count and max ID.
506 */
507 uint32_t const idRamRangeMax = pVM->pgm.s.idRamRangeMax;
508 /* Since this is set to the highest ID, it cannot be the same as the table size. */
509 AssertStmt(idRamRangeMax < RT_ELEMENTS(pVM->pgm.s.apRamRanges), fRet = false);
510
511 /* Because ID=0 is reserved, it's one less than the table size and at most the
512 same as the max ID. */
513 uint32_t const cLookupEntries = pVM->pgm.s.RamRangeUnion.cLookupEntries;
514 AssertStmt(cLookupEntries < RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup), fRet = false);
515 AssertStmt(cLookupEntries <= idRamRangeMax, fRet = false);
516
517 /*
518 * Check the pointer table(s).
519 */
520 /* The first entry shall be empty. */
521 AssertStmt(pVM->pgm.s.apRamRanges[0] == NULL, fRet = false);
522# ifdef IN_RING0
523 AssertStmt(pVM->pgmr0.s.apRamRanges[0] == NULL, fRet = false);
524 AssertStmt(pVM->pgmr0.s.acRamRangePages[0] == 0, fRet = false);
525# endif
526
527 uint32_t cMappedRanges = 0;
528 for (uint32_t idRamRange = 1; idRamRange <= idRamRangeMax; idRamRange++)
529 {
530# ifdef IN_RING0
531 PPGMRAMRANGE const pRamRange = pVM->pgmr0.s.apRamRanges[idRamRange];
532 AssertContinueStmt(pRamRange, fRet = false);
533 AssertStmt(pVM->pgm.s.apRamRanges[idRamRange] != NIL_RTR3PTR, fRet = false);
534 AssertStmt( (pRamRange->cb >> GUEST_PAGE_SHIFT) == pVM->pgmr0.s.acRamRangePages[idRamRange]
535 || ( (pRamRange->cb >> GUEST_PAGE_SHIFT) < pVM->pgmr0.s.acRamRangePages[idRamRange]
536 && !(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX)),
537 fRet = false);
538# else
539 PPGMRAMRANGE const pRamRange = pVM->pgm.s.apRamRanges[idRamRange];
540 AssertContinueStmt(pRamRange, fRet = false);
541# endif
542 AssertStmt(pRamRange->idRange == idRamRange, fRet = false);
543 if (pRamRange->GCPhys != NIL_RTGCPHYS)
544 {
545 cMappedRanges++;
546 AssertStmt((pRamRange->GCPhys & GUEST_PAGE_OFFSET_MASK) == 0, fRet = false);
547 AssertStmt((pRamRange->GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, fRet = false);
548 AssertStmt(pRamRange->GCPhysLast > pRamRange->GCPhys, fRet = false);
549 AssertStmt(pRamRange->GCPhysLast - pRamRange->GCPhys + 1U == pRamRange->cb, fRet = false);
550 }
551 else
552 {
553 AssertStmt(pRamRange->GCPhysLast == NIL_RTGCPHYS, fRet = false);
554 AssertStmt(PGM_RAM_RANGE_IS_AD_HOC(pRamRange) || fRamRelaxed, fRet = false);
555 }
556 }
557
558 /*
559 * Check that the lookup table is sorted and contains the right information.
560 */
561 AssertMsgStmt(cMappedRanges == cLookupEntries,
562 ("cMappedRanges=%#x cLookupEntries=%#x\n", cMappedRanges, cLookupEntries),
563 fRet = false);
564 RTGCPHYS GCPhysPrev = ~(RTGCPHYS)0;
565 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries; idxLookup++)
566 {
567 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
568 AssertContinueStmt(idRamRange > 0 && idRamRange <= idRamRangeMax, fRet = false);
569 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm,pgmr0,pgmrc).s.apRamRanges[idRamRange];
570 AssertContinueStmt(pRamRange, fRet = false);
571
572 AssertStmt(pRamRange->idRange == idRamRange, fRet = false);
573 AssertStmt(pRamRange->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]),
574 fRet = false);
575 AssertStmt(pRamRange->GCPhysLast == pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast, fRet = false);
576
577 AssertStmt(pRamRange->GCPhys >= GCPhysPrev + 1U, fRet = false);
578 GCPhysPrev = pRamRange->GCPhysLast;
579 }
580
581 return fRet;
582}
583#endif /* VBOX_STRICT */
584
585
586/**
587 * Invalidates the RAM range TLBs.
588 *
589 * @param pVM The cross context VM structure.
590 */
591void pgmPhysInvalidRamRangeTlbs(PVMCC pVM)
592{
593 PGM_LOCK_VOID(pVM);
594
595 /* This is technically only required when freeing the PCNet MMIO2 range
596 during ancient saved state loading. The code freeing the RAM range
597 will make sure this function is called in both rings. */
598 RT_ZERO(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb);
599 VMCC_FOR_EACH_VMCPU_STMT(pVM, RT_ZERO(pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb));
600
601 PGM_UNLOCK(pVM);
602}
603
604
605/**
606 * Tests if a value of type RTGCPHYS is negative if the type had been signed
607 * instead of unsigned.
608 *
609 * @returns @c true if negative, @c false if positive or zero.
610 * @param a_GCPhys The value to test.
611 * @todo Move me to iprt/types.h.
612 */
613#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
614
615
616/**
617 * Slow worker for pgmPhysGetRange.
618 *
619 * @copydoc pgmPhysGetRange
620 * @note Caller owns the PGM lock.
621 */
622DECLHIDDEN(PPGMRAMRANGE) pgmPhysGetRangeSlow(PVMCC pVM, RTGCPHYS GCPhys)
623{
624 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
625
626 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
627 uint32_t idxStart = 0;
628 for (;;)
629 {
630 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
631 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
632 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
633 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
634 if (off <= cbEntryMinus1)
635 {
636 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
637 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
638 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
639 Assert(pRamRange);
640 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
641 return pRamRange;
642 }
643 if (RTGCPHYS_IS_NEGATIVE(off))
644 {
645 if (idxStart < idxLookup)
646 idxEnd = idxLookup;
647 else
648 break;
649 }
650 else
651 {
652 idxLookup += 1;
653 if (idxLookup < idxEnd)
654 idxStart = idxLookup;
655 else
656 break;
657 }
658 }
659 return NULL;
660}
661
662
663/**
664 * Slow worker for pgmPhysGetRangeAtOrAbove.
665 *
666 * @copydoc pgmPhysGetRangeAtOrAbove
667 */
668DECLHIDDEN(PPGMRAMRANGE) pgmPhysGetRangeAtOrAboveSlow(PVMCC pVM, RTGCPHYS GCPhys)
669{
670 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
671
672 uint32_t idRamRangeLastLeft = UINT32_MAX;
673 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
674 uint32_t idxStart = 0;
675 for (;;)
676 {
677 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
678 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
679 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
680 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
681 if (off <= cbEntryMinus1)
682 {
683 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
684 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
685 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
686 Assert(pRamRange);
687 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
688 return pRamRange;
689 }
690 if (RTGCPHYS_IS_NEGATIVE(off))
691 {
692 idRamRangeLastLeft = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
693 if (idxStart < idxLookup)
694 idxEnd = idxLookup;
695 else
696 break;
697 }
698 else
699 {
700 idxLookup += 1;
701 if (idxLookup < idxEnd)
702 idxStart = idxLookup;
703 else
704 break;
705 }
706 }
707 if (idRamRangeLastLeft != UINT32_MAX)
708 {
709 AssertReturn(idRamRangeLastLeft < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
710 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRangeLastLeft];
711 Assert(pRamRange);
712 return pRamRange;
713 }
714 return NULL;
715}
716
717
718/**
719 * Slow worker for pgmPhysGetPage.
720 *
721 * @copydoc pgmPhysGetPage
722 */
723DECLHIDDEN(PPGMPAGE) pgmPhysGetPageSlow(PVMCC pVM, RTGCPHYS GCPhys)
724{
725 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
726
727 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
728 uint32_t idxStart = 0;
729 for (;;)
730 {
731 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
732 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
733 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
734 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
735 if (off <= cbEntryMinus1)
736 {
737 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
738 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), NULL);
739 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
740 AssertReturn(pRamRange, NULL);
741 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
742
743 /* Get the page. */
744 Assert(off < pRamRange->cb);
745 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
746#ifdef IN_RING0
747 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], NULL);
748#endif
749 return &pRamRange->aPages[idxPage];
750 }
751 if (RTGCPHYS_IS_NEGATIVE(off))
752 {
753 if (idxStart < idxLookup)
754 idxEnd = idxLookup;
755 else
756 break;
757 }
758 else
759 {
760 idxLookup += 1;
761 if (idxLookup < idxEnd)
762 idxStart = idxLookup;
763 else
764 break;
765 }
766 }
767 return NULL;
768}
769
770
771/**
772 * Slow worker for pgmPhysGetPageEx.
773 *
774 * @copydoc pgmPhysGetPageEx
775 */
776DECLHIDDEN(int) pgmPhysGetPageExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
777{
778 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
779
780 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
781 uint32_t idxStart = 0;
782 for (;;)
783 {
784 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
785 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
786 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
787 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
788 if (off <= cbEntryMinus1)
789 {
790 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
791 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_PGM_PHYS_RAM_LOOKUP_IPE);
792 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
793 AssertReturn(pRamRange, VERR_PGM_PHYS_RAM_LOOKUP_IPE);
794 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
795
796 /* Get the page. */
797 Assert(off < pRamRange->cb);
798 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
799#ifdef IN_RING0
800 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], VERR_PGM_PHYS_RAM_LOOKUP_IPE);
801#endif
802 *ppPage = &pRamRange->aPages[idxPage];
803 return VINF_SUCCESS;
804 }
805 if (RTGCPHYS_IS_NEGATIVE(off))
806 {
807 if (idxStart < idxLookup)
808 idxEnd = idxLookup;
809 else
810 break;
811 }
812 else
813 {
814 idxLookup += 1;
815 if (idxLookup < idxEnd)
816 idxStart = idxLookup;
817 else
818 break;
819 }
820 }
821
822 *ppPage = NULL;
823 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
824}
825
826
827/**
828 * Slow worker for pgmPhysGetPageAndRangeEx.
829 *
830 * @copydoc pgmPhysGetPageAndRangeEx
831 */
832DECLHIDDEN(int) pgmPhysGetPageAndRangeExSlow(PVMCC pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
833{
834 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,RamRangeTlbMisses));
835
836 uint32_t idxEnd = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
837 uint32_t idxStart = 0;
838 for (;;)
839 {
840 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
841 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]);
842 RTGCPHYS const cbEntryMinus1 = pVM->pgm.s.aRamRangeLookup[idxLookup].GCPhysLast - GCPhysEntryFirst;
843 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
844 if (off <= cbEntryMinus1)
845 {
846 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
847 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_PGM_PHYS_RAM_LOOKUP_IPE);
848 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
849 AssertReturn(pRamRange, VERR_PGM_PHYS_RAM_LOOKUP_IPE);
850 pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
851
852 /* Get the page. */
853 Assert(off < pRamRange->cb);
854 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
855#ifdef IN_RING0
856 AssertReturn(idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange], VERR_PGM_PHYS_RAM_LOOKUP_IPE);
857#endif
858 *ppRam = pRamRange;
859 *ppPage = &pRamRange->aPages[idxPage];
860 return VINF_SUCCESS;
861 }
862 if (RTGCPHYS_IS_NEGATIVE(off))
863 {
864 if (idxStart < idxLookup)
865 idxEnd = idxLookup;
866 else
867 break;
868 }
869 else
870 {
871 idxLookup += 1;
872 if (idxLookup < idxEnd)
873 idxStart = idxLookup;
874 else
875 break;
876 }
877 }
878
879 *ppRam = NULL;
880 *ppPage = NULL;
881 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
882}
883
884
885/**
886 * Slow worker for pgmPhysGetPageAndRangeExLockless.
887 *
888 * @copydoc pgmPhysGetPageAndRangeExLockless
889 */
890DECLHIDDEN(int) pgmPhysGetPageAndRangeExSlowLockless(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys,
891 PGMPAGE volatile **ppPage, PGMRAMRANGE volatile **ppRam)
892{
893 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,RamRangeTlbMisses));
894
895 PGM::PGMRAMRANGEGENANDLOOKUPCOUNT RamRangeUnion;
896 RamRangeUnion.u64Combined = ASMAtomicUoReadU64(&pVM->pgm.s.RamRangeUnion.u64Combined);
897
898 uint32_t idxEnd = RT_MIN(RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
899 uint32_t idxStart = 0;
900 for (;;)
901 {
902 /* Read the entry as atomically as possible: */
903 uint32_t idxLookup = idxStart + (idxEnd - idxStart) / 2;
904 PGMRAMRANGELOOKUPENTRY Entry;
905#if (RTASM_HAVE_READ_U128+0) & 1
906 Entry.u128Normal = ASMAtomicUoReadU128U(&pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile);
907#else
908 Entry.u128Normal.s.Lo = pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile.s.Lo;
909 Entry.u128Normal.s.Hi = pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile.s.Hi;
910 ASMCompilerBarrier(); /*paranoia^2*/
911 if (RT_LIKELY(Entry.u128Normal.s.Lo == pVM->pgm.s.aRamRangeLookup[idxLookup].u128Volatile.s.Lo))
912 { /* likely */ }
913 else
914 break;
915#endif
916
917 /* Check how GCPhys relates to the entry: */
918 RTGCPHYS const GCPhysEntryFirst = PGMRAMRANGELOOKUPENTRY_GET_FIRST(Entry);
919 RTGCPHYS const cbEntryMinus1 = Entry.GCPhysLast - GCPhysEntryFirst;
920 RTGCPHYS const off = GCPhys - GCPhysEntryFirst;
921 if (off <= cbEntryMinus1)
922 {
923 /* We seem to have a match. If, however, anything doesn't match up
924 bail and redo owning the lock. No asserting here as we may be
925 racing removal/insertion. */
926 if (!RTGCPHYS_IS_NEGATIVE(off))
927 {
928 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(Entry);
929 if (idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges))
930 {
931 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
932 if (pRamRange)
933 {
934 if ( pRamRange->GCPhys == GCPhysEntryFirst
935 && pRamRange->cb == cbEntryMinus1 + 1U)
936 {
937 RTGCPHYS const idxPage = off >> GUEST_PAGE_SHIFT;
938#ifdef IN_RING0
939 if (idxPage < pVM->pgmr0.s.acRamRangePages[idRamRange])
940#endif
941 {
942 pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRamRange;
943 *ppRam = pRamRange;
944 *ppPage = &pRamRange->aPages[idxPage];
945 return VINF_SUCCESS;
946 }
947 }
948 }
949 }
950 }
951 break;
952 }
953 if (RTGCPHYS_IS_NEGATIVE(off))
954 {
955 if (idxStart < idxLookup)
956 idxEnd = idxLookup;
957 else
958 break;
959 }
960 else
961 {
962 idxLookup += 1;
963 if (idxLookup < idxEnd)
964 idxStart = idxLookup;
965 else
966 break;
967 }
968 }
969
970 /*
971 * If we get down here, we do the lookup again but while owning the PGM lock.
972 */
973 *ppRam = NULL;
974 *ppPage = NULL;
975 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,RamRangeTlbLocking));
976
977 PGM_LOCK_VOID(pVM);
978 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, (PPGMPAGE *)ppPage, (PPGMRAMRANGE *)ppRam);
979 PGM_UNLOCK(pVM);
980
981 PGMRAMRANGE volatile * const pRam = *ppRam;
982 if (pRam)
983 pVCpu->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRangesTlb[PGM_RAMRANGE_TLB_IDX(GCPhys)] = (PPGMRAMRANGE)pRam;
984 return rc;
985}
986
987
988/**
989 * Common worker for pgmR3PhysAllocateRamRange, PGMR0PhysAllocateRamRangeReq,
990 * and pgmPhysMmio2RegisterWorker2.
991 */
992DECLHIDDEN(int) pgmPhysRamRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint32_t fFlags, uint32_t *pidNewRange)
993{
994
995 /*
996 * Allocate the RAM range structure and map it into ring-3.
997 */
998 size_t const cbRamRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), HOST_PAGE_SIZE);
999#ifdef IN_RING0
1000 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
1001 int rc = RTR0MemObjAllocPage(&hMemObj, cbRamRange, false /*fExecutable*/);
1002#else
1003 PPGMRAMRANGE pRamRange;
1004 int rc = SUPR3PageAlloc(cbRamRange >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pRamRange);
1005#endif
1006 if (RT_SUCCESS(rc))
1007 {
1008 /* Zero the memory and do basic range init before mapping it into userland. */
1009#ifdef IN_RING0
1010 PPGMRAMRANGE const pRamRange = (PPGMRAMRANGE)RTR0MemObjAddress(hMemObj);
1011 if (!RTR0MemObjWasZeroInitialized(hMemObj))
1012#endif
1013 RT_BZERO(pRamRange, cbRamRange);
1014
1015 pRamRange->GCPhys = NIL_RTGCPHYS;
1016 pRamRange->cb = (RTGCPHYS)cPages << GUEST_PAGE_SHIFT;
1017 pRamRange->GCPhysLast = NIL_RTGCPHYS;
1018 pRamRange->fFlags = fFlags;
1019 pRamRange->idRange = UINT32_MAX / 2;
1020
1021#ifdef IN_RING0
1022 /* Map it into userland. */
1023 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
1024 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0 /*uAlignment*/,
1025 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
1026 if (RT_SUCCESS(rc))
1027#endif
1028 {
1029 /*
1030 * Grab the lock (unlikely to fail or block as caller typically owns it already).
1031 */
1032 rc = PGM_LOCK(pVM);
1033 if (RT_SUCCESS(rc))
1034 {
1035 /*
1036 * Allocate a range ID.
1037 */
1038 uint32_t idRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.idRamRangeMax + 1;
1039 if (idRamRange != 0 && idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges))
1040 {
1041#ifdef IN_RING0
1042 if (pVM->pgmr0.s.apRamRanges[idRamRange] == NULL)
1043#endif
1044 {
1045 if (pVM->pgm.s.apRamRanges[idRamRange] == NIL_RTR3PTR)
1046 {
1047 /*
1048 * Commit it.
1049 */
1050#ifdef IN_RING0
1051 pVM->pgmr0.s.apRamRanges[idRamRange] = pRamRange;
1052 pVM->pgmr0.s.acRamRangePages[idRamRange] = cPages;
1053 pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange] = hMemObj;
1054 pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] = hMapObj;
1055 pVM->pgmr0.s.idRamRangeMax = idRamRange;
1056#endif
1057
1058 pVM->pgm.s.idRamRangeMax = idRamRange;
1059#ifdef IN_RING0
1060 pVM->pgm.s.apRamRanges[idRamRange] = RTR0MemObjAddressR3(hMapObj);
1061#else
1062 pVM->pgm.s.apRamRanges[idRamRange] = pRamRange;
1063#endif
1064
1065 pRamRange->idRange = idRamRange;
1066 *pidNewRange = idRamRange;
1067
1068 PGM_UNLOCK(pVM);
1069 return VINF_SUCCESS;
1070 }
1071 }
1072
1073 /*
1074 * Bail out.
1075 */
1076 rc = VERR_INTERNAL_ERROR_5;
1077 }
1078 else
1079 rc = VERR_PGM_TOO_MANY_RAM_RANGES;
1080 PGM_UNLOCK(pVM);
1081 }
1082#ifdef IN_RING0
1083 RTR0MemObjFree(hMapObj, false /*fFreeMappings*/);
1084#endif
1085 }
1086#ifdef IN_RING0
1087 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
1088#else
1089 SUPR3PageFree(pRamRange, cbRamRange >> HOST_PAGE_SHIFT);
1090#endif
1091 }
1092 *pidNewRange = UINT32_MAX;
1093 return rc;
1094}
1095
1096
1097#ifdef IN_RING0
1098/**
1099 * This is called during VM initialization to allocate a RAM range.
1100 *
1101 * The range is not entered into the lookup table, that is something the caller
1102 * has to do. The PGMPAGE entries are zero'ed, but otherwise uninitialized.
1103 *
1104 * @returns VBox status code.
1105 * @param pGVM Pointer to the global VM structure.
1106 * @param pReq Where to get the parameters and return the range ID.
1107 * @thread EMT(0)
1108 */
1109VMMR0_INT_DECL(int) PGMR0PhysAllocateRamRangeReq(PGVM pGVM, PPGMPHYSALLOCATERAMRANGEREQ pReq)
1110{
1111 /*
1112 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1113 * while we're here).
1114 */
1115 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1116 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1117
1118 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1119
1120 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE);
1121 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_RAM_RANGE, VERR_OUT_OF_RANGE);
1122
1123 AssertMsgReturn(!(pReq->fFlags & ~(uint32_t)PGM_RAM_RANGE_FLAGS_VALID_MASK), ("fFlags=%#RX32\n", pReq->fFlags),
1124 VERR_INVALID_FLAGS);
1125
1126 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1127 VMSTATE const enmState = pGVM->enmVMState;
1128 AssertMsgReturn(enmState == VMSTATE_CREATING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1129 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1130
1131 /*
1132 * Call common worker.
1133 */
1134 return pgmPhysRamRangeAllocCommon(pGVM, pReq->cGuestPages, pReq->fFlags, &pReq->idNewRange);
1135}
1136#endif /* IN_RING0 */
1137
1138
1139/**
1140 * Frees a RAM range.
1141 *
1142 * This is not a typical occurence. Currently only used for a special MMIO2
1143 * saved state compatibility scenario involving PCNet and state saved before
1144 * VBox v4.3.6.
1145 */
1146static int pgmPhysRamRangeFree(PVMCC pVM, PPGMRAMRANGE pRamRange)
1147{
1148 /*
1149 * Some basic input validation.
1150 */
1151 AssertPtrReturn(pRamRange, VERR_INVALID_PARAMETER);
1152 uint32_t const idRamRange = ASMAtomicReadU32(&pRamRange->idRange);
1153 ASMCompilerBarrier();
1154 AssertReturn(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges), VERR_INVALID_PARAMETER);
1155 AssertReturn(pRamRange == pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange], VERR_INVALID_PARAMETER);
1156 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_RESOURCE_BUSY);
1157
1158 /*
1159 * Kill the range pointers and associated data.
1160 */
1161 pVM->pgm.s.apRamRanges[idRamRange] = NIL_RTR3PTR;
1162#ifdef IN_RING0
1163 pVM->pgmr0.s.apRamRanges[idRamRange] = NULL;
1164#endif
1165
1166 /*
1167 * Zap the pages and other RAM ranges properties to ensure there aren't any
1168 * stale references to anything hanging around should the freeing go awry.
1169 */
1170#ifdef IN_RING0
1171 uint32_t const cPages = pVM->pgmr0.s.acRamRangePages[idRamRange];
1172 pVM->pgmr0.s.acRamRangePages[idRamRange] = 0;
1173#else
1174 uint32_t const cPages = pRamRange->cb >> GUEST_PAGE_SHIFT;
1175#endif
1176 RT_BZERO(pRamRange->aPages, cPages * sizeof(pRamRange->aPages[0]));
1177
1178 pRamRange->fFlags = UINT32_MAX;
1179 pRamRange->cb = NIL_RTGCPHYS;
1180 pRamRange->pbR3 = NIL_RTR3PTR;
1181 pRamRange->pszDesc = NIL_RTR3PTR;
1182 pRamRange->paLSPages = NIL_RTR3PTR;
1183 pRamRange->idRange = UINT32_MAX / 8;
1184
1185 /*
1186 * Free the RAM range itself.
1187 */
1188#ifdef IN_RING0
1189 Assert(pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] != NIL_RTR0MEMOBJ);
1190 int rc = RTR0MemObjFree(pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange], true /*fFreeMappings*/);
1191 if (RT_SUCCESS(rc))
1192 {
1193 pVM->pgmr0.s.ahRamRangeMapObjs[idRamRange] = NIL_RTR0MEMOBJ;
1194 rc = RTR0MemObjFree(pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange], true /*fFreeMappings*/);
1195 if (RT_SUCCESS(rc))
1196 pVM->pgmr0.s.ahRamRangeMemObjs[idRamRange] = NIL_RTR0MEMOBJ;
1197 }
1198#else
1199 size_t const cbRamRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMRAMRANGE, aPages[cPages]), HOST_PAGE_SIZE);
1200 int rc = SUPR3PageFree(pRamRange, cbRamRange >> HOST_PAGE_SHIFT);
1201#endif
1202
1203 /*
1204 * Decrease the max ID if removal was successful and this was the final
1205 * RAM range entry.
1206 */
1207 if ( RT_SUCCESS(rc)
1208 && idRamRange == pVM->CTX_EXPR(pgm, pgmr0, pgm).s.idRamRangeMax)
1209 {
1210 pVM->pgm.s.idRamRangeMax = idRamRange - 1;
1211#ifdef IN_RING0
1212 pVM->pgmr0.s.idRamRangeMax = idRamRange - 1;
1213#endif
1214 }
1215
1216 /*
1217 * Make sure the RAM range TLB does not contain any stale pointers to this range.
1218 */
1219 pgmPhysInvalidRamRangeTlbs(pVM);
1220 return rc;
1221}
1222
1223
1224
1225/*********************************************************************************************************************************
1226* MMIO2 *
1227*********************************************************************************************************************************/
1228
1229/**
1230 * Calculates the number of chunks
1231 *
1232 * @returns Number of registration chunk needed.
1233 * @param cb The size of the MMIO/MMIO2 range.
1234 * @param pcPagesPerChunk Where to return the number of guest pages tracked by
1235 * each chunk. Optional.
1236 */
1237DECLHIDDEN(uint16_t) pgmPhysMmio2CalcChunkCount(RTGCPHYS cb, uint32_t *pcPagesPerChunk)
1238{
1239 /*
1240 * This is the same calculation as PGMR3PhysRegisterRam does, except we'll be
1241 * needing a few bytes extra the PGMREGMMIO2RANGE structure.
1242 *
1243 * Note! In additions, we've got a 24 bit sub-page range for MMIO2 ranges, leaving
1244 * us with an absolute maximum of 16777215 pages per chunk (close to 64 GB).
1245 */
1246 AssertCompile(PGM_MAX_PAGES_PER_RAM_RANGE < _16M);
1247 uint32_t const cPagesPerChunk = PGM_MAX_PAGES_PER_RAM_RANGE;
1248
1249 if (pcPagesPerChunk)
1250 *pcPagesPerChunk = cPagesPerChunk;
1251
1252 /* Calc the number of chunks we need. */
1253 RTGCPHYS const cGuestPages = cb >> GUEST_PAGE_SHIFT;
1254 uint16_t cChunks = (uint16_t)((cGuestPages + cPagesPerChunk - 1) / cPagesPerChunk);
1255#ifdef IN_RING3
1256 AssertRelease((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages);
1257#else
1258 AssertReturn((RTGCPHYS)cChunks * cPagesPerChunk >= cGuestPages, 0);
1259#endif
1260 return cChunks;
1261}
1262
1263
1264/**
1265 * Worker for PGMR3PhysMmio2Register and PGMR0PhysMmio2RegisterReq.
1266 *
1267 * (The caller already know which MMIO2 region ID will be assigned and how many
1268 * chunks will be used, so no output parameters required.)
1269 */
1270DECLHIDDEN(int) pgmPhysMmio2RegisterWorker(PVMCC pVM, uint32_t const cGuestPages, uint8_t const idMmio2,
1271 const uint8_t cChunks, PPDMDEVINSR3 const pDevIns, uint8_t
1272 const iSubDev, uint8_t const iRegion, uint32_t const fFlags)
1273{
1274 /*
1275 * Get the number of pages per chunk.
1276 */
1277 uint32_t cGuestPagesPerChunk;
1278 AssertReturn(pgmPhysMmio2CalcChunkCount((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT, &cGuestPagesPerChunk) == cChunks,
1279 VERR_PGM_PHYS_MMIO_EX_IPE);
1280 Assert(idMmio2 != 0);
1281
1282 /*
1283 * The first thing we need to do is the allocate the memory that will be
1284 * backing the whole range.
1285 */
1286 RTGCPHYS const cbMmio2Backing = (RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT;
1287 uint32_t const cHostPages = (cbMmio2Backing + HOST_PAGE_SIZE - 1U) >> HOST_PAGE_SHIFT;
1288 size_t const cbMmio2Aligned = cHostPages << HOST_PAGE_SHIFT;
1289 R3PTRTYPE(uint8_t *) pbMmio2BackingR3 = NIL_RTR3PTR;
1290#ifdef IN_RING0
1291 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
1292# ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1293 int rc = RTR0MemObjAllocPage(&hMemObj, cbMmio2Aligned, false /*fExecutable*/);
1294# else
1295 int rc = RTR0MemObjAllocPhysNC(&hMemObj, cbMmio2Aligned, NIL_RTHCPHYS);
1296# endif
1297#else /* !IN_RING0 */
1298 AssertReturn(PGM_IS_IN_NEM_MODE(pVM), VERR_INTERNAL_ERROR_4);
1299 int rc = SUPR3PageAlloc(cHostPages, pVM->pgm.s.fUseLargePages ? SUP_PAGE_ALLOC_F_LARGE_PAGES : 0, (void **)&pbMmio2BackingR3);
1300#endif /* !IN_RING0 */
1301 if (RT_SUCCESS(rc))
1302 {
1303 /*
1304 * Make sure it's is initialized to zeros before it's mapped to userland.
1305 */
1306#ifdef IN_RING0
1307# ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1308 uint8_t *pbMmio2BackingR0 = (uint8_t *)RTR0MemObjAddress(hMemObj);
1309 AssertPtr(pbMmio2BackingR0);
1310# endif
1311 rc = RTR0MemObjZeroInitialize(hMemObj, false /*fForce*/);
1312 AssertRCReturnStmt(rc, RTR0MemObjFree(hMemObj, true /*fFreeMappings*/), rc);
1313#else
1314 RT_BZERO(pbMmio2BackingR3, cbMmio2Aligned);
1315#endif
1316
1317#ifdef IN_RING0
1318 /*
1319 * Map it into ring-3.
1320 */
1321 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
1322 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
1323 if (RT_SUCCESS(rc))
1324 {
1325 pbMmio2BackingR3 = RTR0MemObjAddressR3(hMapObj);
1326#endif
1327
1328 /*
1329 * Create the MMIO2 registration records and associated RAM ranges.
1330 * The RAM range allocation may fail here.
1331 */
1332 RTGCPHYS offMmio2Backing = 0;
1333 uint32_t cGuestPagesLeft = cGuestPages;
1334 for (uint32_t iChunk = 0, idx = idMmio2 - 1; iChunk < cChunks; iChunk++, idx++)
1335 {
1336 uint32_t const cPagesTrackedByChunk = RT_MIN(cGuestPagesLeft, cGuestPagesPerChunk);
1337
1338 /*
1339 * Allocate the RAM range for this chunk.
1340 */
1341 uint32_t idRamRange = UINT32_MAX;
1342 rc = pgmPhysRamRangeAllocCommon(pVM, cPagesTrackedByChunk, PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX, &idRamRange);
1343 if (RT_FAILURE(rc))
1344 {
1345 /* We only zap the pointers to the backing storage.
1346 PGMR3Term and friends will clean up the RAM ranges and stuff. */
1347 while (iChunk-- > 0)
1348 {
1349 idx--;
1350#ifdef IN_RING0
1351 pVM->pgmr0.s.acMmio2RangePages[idx] = 0;
1352# ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM
1353 pVM->pgmr0.s.apbMmio2Backing[idx] = NULL;
1354# endif
1355#endif
1356
1357 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1358 pMmio2->pbR3 = NIL_RTR3PTR;
1359
1360 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1361 pRamRange->pbR3 = NIL_RTR3PTR;
1362 RT_BZERO(&pRamRange->aPages[0], sizeof(pRamRange->aPages) * cGuestPagesPerChunk);
1363 }
1364 break;
1365 }
1366
1367 pVM->pgm.s.apMmio2RamRanges[idx] = pVM->pgm.s.apRamRanges[idRamRange];
1368#ifdef IN_RING0
1369 pVM->pgmr0.s.apMmio2RamRanges[idx] = pVM->pgmr0.s.apRamRanges[idRamRange];
1370 pVM->pgmr0.s.acMmio2RangePages[idx] = cPagesTrackedByChunk;
1371#endif
1372
1373 /* Initialize the RAM range. */
1374 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
1375 pRamRange->pbR3 = pbMmio2BackingR3 + offMmio2Backing;
1376 uint32_t iDstPage = cPagesTrackedByChunk;
1377#ifdef IN_RING0
1378 AssertRelease(HOST_PAGE_SHIFT == GUEST_PAGE_SHIFT);
1379 while (iDstPage-- > 0)
1380 {
1381 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(hMemObj, iDstPage + (offMmio2Backing >> HOST_PAGE_SHIFT));
1382 Assert(HCPhys != NIL_RTHCPHYS);
1383 PGM_PAGE_INIT(&pRamRange->aPages[iDstPage], HCPhys, PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
1384 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1385 }
1386#else
1387 Assert(PGM_IS_IN_NEM_MODE(pVM));
1388 while (iDstPage-- > 0)
1389 PGM_PAGE_INIT(&pRamRange->aPages[iDstPage], UINT64_C(0x0000ffffffff0000),
1390 PGM_MMIO2_PAGEID_MAKE(idMmio2, iDstPage),
1391 PGMPAGETYPE_MMIO2, PGM_PAGE_STATE_ALLOCATED);
1392#endif
1393
1394 /*
1395 * Initialize the MMIO2 registration structure.
1396 */
1397 PPGMREGMMIO2RANGE const pMmio2 = &pVM->pgm.s.aMmio2Ranges[idx];
1398 pMmio2->pDevInsR3 = pDevIns;
1399 pMmio2->pbR3 = pbMmio2BackingR3 + offMmio2Backing;
1400 pMmio2->fFlags = 0;
1401 if (iChunk == 0)
1402 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_FIRST_CHUNK;
1403 if (iChunk + 1 == cChunks)
1404 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_LAST_CHUNK;
1405 if (fFlags & PGMPHYS_MMIO2_FLAGS_TRACK_DIRTY_PAGES)
1406 pMmio2->fFlags |= PGMREGMMIO2RANGE_F_TRACK_DIRTY_PAGES;
1407
1408 pMmio2->iSubDev = iSubDev;
1409 pMmio2->iRegion = iRegion;
1410 pMmio2->idSavedState = UINT8_MAX;
1411 pMmio2->idMmio2 = idMmio2 + iChunk;
1412 pMmio2->idRamRange = idRamRange;
1413 Assert(pMmio2->idRamRange == idRamRange);
1414 pMmio2->GCPhys = NIL_RTGCPHYS;
1415 pMmio2->cbReal = (RTGCPHYS)cPagesTrackedByChunk << GUEST_PAGE_SHIFT;
1416 pMmio2->pPhysHandlerR3 = NIL_RTR3PTR; /* Pre-alloc is done by ring-3 caller. */
1417 pMmio2->paLSPages = NIL_RTR3PTR;
1418
1419#if defined(IN_RING0) && !defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
1420 pVM->pgmr0.s.apbMmio2Backing[idx] = &pbMmio2BackingR0[offMmio2Backing];
1421#endif
1422
1423 /* Advance */
1424 cGuestPagesLeft -= cPagesTrackedByChunk;
1425 offMmio2Backing += (RTGCPHYS)cPagesTrackedByChunk << GUEST_PAGE_SHIFT;
1426 } /* chunk alloc loop */
1427 Assert(cGuestPagesLeft == 0 || RT_FAILURE_NP(rc));
1428 if (RT_SUCCESS(rc))
1429 {
1430 /*
1431 * Account for pages and ring-0 memory objects.
1432 */
1433 pVM->pgm.s.cAllPages += cGuestPages;
1434 pVM->pgm.s.cPrivatePages += cGuestPages;
1435#ifdef IN_RING0
1436 pVM->pgmr0.s.ahMmio2MemObjs[idMmio2 - 1] = hMemObj;
1437 pVM->pgmr0.s.ahMmio2MapObjs[idMmio2 - 1] = hMapObj;
1438#endif
1439 pVM->pgm.s.cMmio2Ranges = idMmio2 + cChunks - 1U;
1440
1441 /*
1442 * Done!.
1443 */
1444 return VINF_SUCCESS;
1445 }
1446
1447 /*
1448 * Bail.
1449 */
1450#ifdef IN_RING0
1451 RTR0MemObjFree(hMapObj, true /*fFreeMappings*/);
1452 }
1453 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
1454#else
1455 SUPR3PageFree(pbMmio2BackingR3, cHostPages);
1456#endif
1457 }
1458 else
1459 LogRel(("pgmPhysMmio2RegisterWorker: Failed to allocate %RGp bytes of MMIO2 backing memory: %Rrc\n", cbMmio2Aligned, rc));
1460 return rc;
1461}
1462
1463
1464#ifdef IN_RING0
1465/**
1466 * This is called during VM initialization to create an MMIO2 range.
1467 *
1468 * This does everything except setting the PGMRAMRANGE::pszDesc to a non-zero
1469 * value and preallocating the access handler for dirty bitmap tracking.
1470 *
1471 * The caller already knows which MMIO2 ID will be assigned to the registration
1472 * and how many chunks it requires, so there are no output fields in the request
1473 * structure.
1474 *
1475 * @returns VBox status code.
1476 * @param pGVM Pointer to the global VM structure.
1477 * @param pReq Where to get the parameters.
1478 * @thread EMT(0)
1479 */
1480VMMR0_INT_DECL(int) PGMR0PhysMmio2RegisterReq(PGVM pGVM, PPGMPHYSMMIO2REGISTERREQ pReq)
1481{
1482 /*
1483 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1484 * while we're here).
1485 */
1486 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1487 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1488
1489 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1490 VMSTATE const enmState = pGVM->enmVMState;
1491 AssertMsgReturn( enmState == VMSTATE_CREATING
1492 || enmState == VMSTATE_LOADING /* pre 4.3.6 state loading needs to ignore a MMIO2 region in PCNet. */
1493 , ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1494 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1495
1496 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1497 AssertReturn(GUEST_PAGE_SIZE == HOST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1498
1499 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE);
1500 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_MMIO2_REGION, VERR_OUT_OF_RANGE);
1501 AssertReturn(pReq->cGuestPages <= (MM_MMIO_64_MAX >> GUEST_PAGE_SHIFT), VERR_OUT_OF_RANGE);
1502
1503 AssertMsgReturn(!(pReq->fFlags & ~PGMPHYS_MMIO2_FLAGS_VALID_MASK), ("fFlags=%#x\n", pReq->fFlags), VERR_INVALID_FLAGS);
1504
1505 AssertMsgReturn( pReq->cChunks > 0
1506 && pReq->cChunks < PGM_MAX_MMIO2_RANGES
1507 && pReq->cChunks == pgmPhysMmio2CalcChunkCount((RTGCPHYS)pReq->cGuestPages << GUEST_PAGE_SHIFT, NULL),
1508 ("cChunks=%#x cGuestPages=%#x\n", pReq->cChunks, pReq->cGuestPages),
1509 VERR_INVALID_PARAMETER);
1510
1511 AssertMsgReturn( pReq->idMmio2 != 0
1512 && pReq->idMmio2 <= PGM_MAX_MMIO2_RANGES
1513 && (unsigned)pReq->idMmio2 + pReq->cChunks - 1U <= PGM_MAX_MMIO2_RANGES,
1514 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks),
1515 VERR_INVALID_PARAMETER);
1516
1517 for (uint32_t iChunk = 0, idx = pReq->idMmio2 - 1; iChunk < pReq->cChunks; iChunk++, idx++)
1518 {
1519 AssertReturn(pGVM->pgmr0.s.ahMmio2MapObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1520 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1521 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] == NULL, VERR_INVALID_STATE);
1522 }
1523
1524 /*
1525 * Make sure we're owning the PGM lock (caller should be), recheck idMmio2
1526 * and call the worker function we share with ring-3.
1527 */
1528 int rc = PGM_LOCK(pGVM);
1529 AssertRCReturn(rc, rc);
1530
1531 AssertReturnStmt(pGVM->pgm.s.cMmio2Ranges + 1U == pReq->idMmio2,
1532 PGM_UNLOCK(pGVM), VERR_INVALID_PARAMETER);
1533 AssertReturnStmt(pGVM->pgmr0.s.idRamRangeMax + 1U + pReq->cChunks <= RT_ELEMENTS(pGVM->pgmr0.s.apRamRanges),
1534 PGM_UNLOCK(pGVM), VERR_PGM_TOO_MANY_RAM_RANGES);
1535
1536 rc = pgmPhysMmio2RegisterWorker(pGVM, pReq->cGuestPages, pReq->idMmio2, pReq->cChunks,
1537 pReq->pDevIns, pReq->iSubDev, pReq->iRegion, pReq->fFlags);
1538
1539 PGM_UNLOCK(pGVM);
1540 return rc;
1541}
1542#endif /* IN_RING0 */
1543
1544
1545
1546/**
1547 * Worker for PGMR3PhysMmio2Deregister & PGMR0PhysMmio2DeregisterReq.
1548 */
1549DECLHIDDEN(int) pgmPhysMmio2DeregisterWorker(PVMCC pVM, uint8_t idMmio2, uint8_t cChunks, PPDMDEVINSR3 pDevIns)
1550{
1551 /*
1552 * The caller shall have made sure all this is true, but we check again
1553 * since we're paranoid.
1554 */
1555 AssertReturn(idMmio2 > 0 && idMmio2 <= RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), VERR_INTERNAL_ERROR_2);
1556 AssertReturn(cChunks >= 1, VERR_INTERNAL_ERROR_2);
1557 uint8_t const idxFirst = idMmio2 - 1U;
1558 AssertReturn(idxFirst + cChunks <= pVM->pgm.s.cMmio2Ranges, VERR_INTERNAL_ERROR_2);
1559 uint32_t cGuestPages = 0; /* (For accounting and calulating backing memory size) */
1560 for (uint32_t iChunk = 0, idx = idxFirst; iChunk < cChunks; iChunk++, idx++)
1561 {
1562 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 == pDevIns, VERR_NOT_OWNER);
1563 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_MAPPED), VERR_RESOURCE_BUSY);
1564 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].GCPhys == NIL_RTGCPHYS, VERR_INVALID_STATE);
1565 if (iChunk == 0)
1566 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, VERR_INVALID_PARAMETER);
1567 else
1568 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK), VERR_INVALID_PARAMETER);
1569 if (iChunk + 1 == cChunks)
1570 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK, VERR_INVALID_PARAMETER);
1571 else
1572 AssertReturn(!(pVM->pgm.s.aMmio2Ranges[idx].fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), VERR_INVALID_PARAMETER);
1573 AssertReturn(pVM->pgm.s.aMmio2Ranges[idx].pPhysHandlerR3 == NIL_RTR3PTR, VERR_INVALID_STATE); /* caller shall free this */
1574
1575#ifdef IN_RING0
1576 cGuestPages += pVM->pgmr0.s.acMmio2RangePages[idx];
1577#else
1578 cGuestPages += pVM->pgm.s.aMmio2Ranges[idx].cbReal >> GUEST_PAGE_SHIFT;
1579#endif
1580
1581 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1582 AssertPtrReturn(pRamRange, VERR_INVALID_STATE);
1583 AssertReturn(pRamRange->fFlags & PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO_EX, VERR_INVALID_STATE);
1584 AssertReturn(pRamRange->GCPhys == NIL_RTGCPHYS, VERR_INVALID_STATE);
1585 AssertReturn(pRamRange->GCPhysLast == NIL_RTGCPHYS, VERR_INVALID_STATE);
1586 }
1587
1588 /*
1589 * Remove everything except the backing memory first. We work the ranges
1590 * in reverse so that we can reduce the max RAM range ID when possible.
1591 */
1592#ifdef IN_RING3
1593 uint8_t * const pbMmio2Backing = pVM->pgm.s.aMmio2Ranges[idxFirst].pbR3;
1594 RTGCPHYS const cbMmio2Backing = RT_ALIGN_T((RTGCPHYS)cGuestPages << GUEST_PAGE_SHIFT, HOST_PAGE_SIZE, RTGCPHYS);
1595#endif
1596
1597 int rc = VINF_SUCCESS;
1598 uint32_t iChunk = cChunks;
1599 while (iChunk-- > 0)
1600 {
1601 uint32_t const idx = idxFirst + iChunk;
1602 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apMmio2RamRanges[idx];
1603
1604 /* Zap the MMIO2 region data. */
1605 pVM->pgm.s.apMmio2RamRanges[idx] = NIL_RTR3PTR;
1606#ifdef IN_RING0
1607 pVM->pgmr0.s.apMmio2RamRanges[idx] = NULL;
1608 pVM->pgmr0.s.acMmio2RangePages[idx] = 0;
1609#endif
1610 pVM->pgm.s.aMmio2Ranges[idx].pDevInsR3 = NIL_RTR3PTR;
1611 pVM->pgm.s.aMmio2Ranges[idx].pbR3 = NIL_RTR3PTR;
1612 pVM->pgm.s.aMmio2Ranges[idx].fFlags = 0;
1613 pVM->pgm.s.aMmio2Ranges[idx].iSubDev = UINT8_MAX;
1614 pVM->pgm.s.aMmio2Ranges[idx].iRegion = UINT8_MAX;
1615 pVM->pgm.s.aMmio2Ranges[idx].idSavedState = UINT8_MAX;
1616 pVM->pgm.s.aMmio2Ranges[idx].idMmio2 = UINT8_MAX;
1617 pVM->pgm.s.aMmio2Ranges[idx].idRamRange = UINT16_MAX;
1618 pVM->pgm.s.aMmio2Ranges[idx].GCPhys = NIL_RTGCPHYS;
1619 pVM->pgm.s.aMmio2Ranges[idx].cbReal = 0;
1620 pVM->pgm.s.aMmio2Ranges[idx].pPhysHandlerR3 = NIL_RTR3PTR;
1621 pVM->pgm.s.aMmio2Ranges[idx].paLSPages = NIL_RTR3PTR;
1622
1623 /* Free the RAM range. */
1624 int rc2 = pgmPhysRamRangeFree(pVM, pRamRange);
1625 AssertLogRelMsgStmt(RT_SUCCESS(rc2), ("rc=%Rrc idx=%u chunk=%u/%u\n", rc, idx, iChunk + 1, cChunks),
1626 rc = RT_SUCCESS(rc) ? rc2 : rc);
1627 }
1628
1629 /*
1630 * Final removal frees up the backing memory.
1631 */
1632#ifdef IN_RING3
1633 int const rcBacking = SUPR3PageFree(pbMmio2Backing, cbMmio2Backing >> HOST_PAGE_SHIFT);
1634 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking), ("rc=%Rrc %p LB %#zx\n", rcBacking, pbMmio2Backing, cbMmio2Backing),
1635 rc = RT_SUCCESS(rc) ? rcBacking : rc);
1636#else
1637 int rcBacking = RTR0MemObjFree(pVM->pgmr0.s.ahMmio2MapObjs[idxFirst], true /*fFreeMappings*/);
1638 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking),
1639 ("rc=%Rrc ahMmio2MapObjs[%u]=%p\n", rcBacking, pVM->pgmr0.s.ahMmio2MapObjs[idxFirst], idxFirst),
1640 rc = RT_SUCCESS(rc) ? rcBacking : rc);
1641 if (RT_SUCCESS(rcBacking))
1642 {
1643 pVM->pgmr0.s.ahMmio2MapObjs[idxFirst] = NIL_RTR0MEMOBJ;
1644
1645 rcBacking = RTR0MemObjFree(pVM->pgmr0.s.ahMmio2MemObjs[idxFirst], true /*fFreeMappings*/);
1646 AssertLogRelMsgStmt(RT_SUCCESS(rcBacking),
1647 ("rc=%Rrc ahMmio2MemObjs[%u]=%p\n", rcBacking, pVM->pgmr0.s.ahMmio2MemObjs[idxFirst], idxFirst),
1648 rc = RT_SUCCESS(rc) ? rcBacking : rc);
1649 if (RT_SUCCESS(rcBacking))
1650 pVM->pgmr0.s.ahMmio2MemObjs[idxFirst] = NIL_RTR0MEMOBJ;
1651 }
1652#endif
1653
1654 /*
1655 * Decrease the MMIO2 count if these were the last ones.
1656 */
1657 if (idxFirst + cChunks == pVM->pgm.s.cMmio2Ranges)
1658 pVM->pgm.s.cMmio2Ranges = idxFirst;
1659
1660 /*
1661 * Update page count stats.
1662 */
1663 pVM->pgm.s.cAllPages -= cGuestPages;
1664 pVM->pgm.s.cPrivatePages -= cGuestPages;
1665
1666 return rc;
1667}
1668
1669
1670#ifdef IN_RING0
1671/**
1672 * This is called during VM state loading to deregister an obsolete MMIO2 range.
1673 *
1674 * This does everything except TLB flushing and releasing the access handler.
1675 * The ranges must be unmapped and wihtout preallocated access handlers.
1676 *
1677 * @returns VBox status code.
1678 * @param pGVM Pointer to the global VM structure.
1679 * @param pReq Where to get the parameters.
1680 * @thread EMT(0)
1681 */
1682VMMR0_INT_DECL(int) PGMR0PhysMmio2DeregisterReq(PGVM pGVM, PPGMPHYSMMIO2DEREGISTERREQ pReq)
1683{
1684 /*
1685 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1686 * while we're here).
1687 */
1688 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1689 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1690
1691 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1692 /* Only LOADING, as this is special purpose for removing an unwanted PCNet MMIO2 region. */
1693 VMSTATE const enmState = pGVM->enmVMState;
1694 AssertMsgReturn(enmState == VMSTATE_LOADING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1695 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1696
1697 AssertMsgReturn( pReq->cChunks > 0
1698 && pReq->cChunks < PGM_MAX_MMIO2_RANGES,
1699 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks),
1700 VERR_INVALID_PARAMETER);
1701
1702 AssertMsgReturn( pReq->idMmio2 != 0
1703 && pReq->idMmio2 <= PGM_MAX_MMIO2_RANGES
1704 && (unsigned)pReq->idMmio2 + pReq->cChunks - 1U <= PGM_MAX_MMIO2_RANGES,
1705 ("idMmio2=%#x cChunks=%#x\n", pReq->idMmio2, pReq->cChunks),
1706 VERR_INVALID_PARAMETER);
1707
1708 /*
1709 * Validate that the requested range is for exactly one MMIO2 registration.
1710 *
1711 * This is safe to do w/o the lock because registration and deregistration
1712 * is restricted to EMT0, and we're on EMT0 so can't race ourselves.
1713 */
1714
1715 /* Check that the first entry is valid and has a memory object for the backing memory. */
1716 uint32_t idx = pReq->idMmio2 - 1;
1717 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] != NULL, VERR_INVALID_STATE);
1718 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] != NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1719
1720 /* Any additional regions must also have RAM ranges, but shall not have any backing memory. */
1721 idx++;
1722 for (uint32_t iChunk = 1; iChunk < pReq->cChunks; iChunk++, idx++)
1723 {
1724 AssertReturn(pGVM->pgmr0.s.apMmio2RamRanges[idx] != NULL, VERR_INVALID_STATE);
1725 AssertReturn(pGVM->pgmr0.s.ahMmio2MemObjs[idx] == NIL_RTR0MEMOBJ, VERR_INVALID_STATE);
1726 }
1727
1728 /* Check that the next entry is for a different region. */
1729 AssertReturn( idx >= RT_ELEMENTS(pGVM->pgmr0.s.apMmio2RamRanges)
1730 || pGVM->pgmr0.s.apMmio2RamRanges[idx] == NULL
1731 || pGVM->pgmr0.s.ahMmio2MemObjs[idx] != NIL_RTR0MEMOBJ,
1732 VERR_INVALID_PARAMETER);
1733
1734 /*
1735 * Make sure we're owning the PGM lock (caller should be) and call the
1736 * common worker code.
1737 */
1738 int rc = PGM_LOCK(pGVM);
1739 AssertRCReturn(rc, rc);
1740
1741 rc = pgmPhysMmio2DeregisterWorker(pGVM, pReq->idMmio2, pReq->cChunks, pReq->pDevIns);
1742
1743 PGM_UNLOCK(pGVM);
1744 return rc;
1745}
1746#endif /* IN_RING0 */
1747
1748
1749
1750
1751/*********************************************************************************************************************************
1752* ROM *
1753*********************************************************************************************************************************/
1754
1755
1756/**
1757 * Common worker for pgmR3PhysRomRegisterLocked and
1758 * PGMR0PhysRomAllocateRangeReq.
1759 */
1760DECLHIDDEN(int) pgmPhysRomRangeAllocCommon(PVMCC pVM, uint32_t cPages, uint8_t idRomRange, uint32_t fFlags)
1761{
1762 /*
1763 * Allocate the ROM range structure and map it into ring-3.
1764 */
1765 size_t const cbRomRange = RT_ALIGN_Z(RT_UOFFSETOF_DYN(PGMROMRANGE, aPages[cPages]), HOST_PAGE_SIZE);
1766#ifdef IN_RING0
1767 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
1768 int rc = RTR0MemObjAllocPage(&hMemObj, cbRomRange, false /*fExecutable*/);
1769#else
1770 PPGMROMRANGE pRomRange;
1771 int rc = SUPR3PageAlloc(cbRomRange >> HOST_PAGE_SHIFT, 0 /*fFlags*/, (void **)&pRomRange);
1772#endif
1773 if (RT_SUCCESS(rc))
1774 {
1775 /* Zero the memory and do basic range init before mapping it into userland. */
1776#ifdef IN_RING0
1777 PPGMROMRANGE const pRomRange = (PPGMROMRANGE)RTR0MemObjAddress(hMemObj);
1778 if (!RTR0MemObjWasZeroInitialized(hMemObj))
1779#endif
1780 RT_BZERO(pRomRange, cbRomRange);
1781
1782 pRomRange->GCPhys = NIL_RTGCPHYS;
1783 pRomRange->GCPhysLast = NIL_RTGCPHYS;
1784 pRomRange->cb = (RTGCPHYS)cPages << GUEST_PAGE_SHIFT;
1785 pRomRange->fFlags = fFlags;
1786 pRomRange->idSavedState = UINT8_MAX;
1787 pRomRange->idRamRange = UINT16_MAX;
1788 pRomRange->cbOriginal = 0;
1789 pRomRange->pvOriginal = NIL_RTR3PTR;
1790 pRomRange->pszDesc = NIL_RTR3PTR;
1791
1792#ifdef IN_RING0
1793 /* Map it into userland. */
1794 RTR0MEMOBJ hMapObj = NIL_RTR0MEMOBJ;
1795 rc = RTR0MemObjMapUser(&hMapObj, hMemObj, (RTR3PTR)-1, 0 /*uAlignment*/,
1796 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
1797 if (RT_SUCCESS(rc))
1798#endif
1799 {
1800 /*
1801 * Grab the lock (unlikely to fail or block as caller typically owns it already).
1802 */
1803 rc = PGM_LOCK(pVM);
1804 if (RT_SUCCESS(rc))
1805 {
1806 /*
1807 * Check that idRomRange is still free.
1808 */
1809 if (idRomRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRomRanges))
1810 {
1811#ifdef IN_RING0
1812 if (pVM->pgmr0.s.apRomRanges[idRomRange] == NULL)
1813#endif
1814 {
1815 if ( pVM->pgm.s.apRomRanges[idRomRange] == NIL_RTR3PTR
1816 && pVM->pgm.s.cRomRanges == idRomRange)
1817 {
1818 /*
1819 * Commit it.
1820 */
1821#ifdef IN_RING0
1822 pVM->pgmr0.s.apRomRanges[idRomRange] = pRomRange;
1823 pVM->pgmr0.s.acRomRangePages[idRomRange] = cPages;
1824 pVM->pgmr0.s.ahRomRangeMemObjs[idRomRange] = hMemObj;
1825 pVM->pgmr0.s.ahRomRangeMapObjs[idRomRange] = hMapObj;
1826#endif
1827
1828 pVM->pgm.s.cRomRanges = idRomRange + 1;
1829#ifdef IN_RING0
1830 pVM->pgm.s.apRomRanges[idRomRange] = RTR0MemObjAddressR3(hMapObj);
1831#else
1832 pVM->pgm.s.apRomRanges[idRomRange] = pRomRange;
1833#endif
1834
1835 PGM_UNLOCK(pVM);
1836 return VINF_SUCCESS;
1837 }
1838 }
1839
1840 /*
1841 * Bail out.
1842 */
1843 rc = VERR_INTERNAL_ERROR_5;
1844 }
1845 else
1846 rc = VERR_PGM_TOO_MANY_ROM_RANGES;
1847 PGM_UNLOCK(pVM);
1848 }
1849#ifdef IN_RING0
1850 RTR0MemObjFree(hMapObj, false /*fFreeMappings*/);
1851#endif
1852 }
1853#ifdef IN_RING0
1854 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
1855#else
1856 SUPR3PageFree(pRomRange, cbRomRange >> HOST_PAGE_SHIFT);
1857#endif
1858 }
1859 return rc;
1860}
1861
1862
1863#ifdef IN_RING0
1864/**
1865 * This is called during VM initialization to allocate a ROM range.
1866 *
1867 * The page array is zeroed, the rest is initialized as best we can based on the
1868 * information in @a pReq.
1869 *
1870 * @returns VBox status code.
1871 * @param pGVM Pointer to the global VM structure.
1872 * @param pReq Where to get the parameters and return the range ID.
1873 * @thread EMT(0)
1874 */
1875VMMR0_INT_DECL(int) PGMR0PhysRomAllocateRangeReq(PGVM pGVM, PPGMPHYSROMALLOCATERANGEREQ pReq)
1876{
1877 /*
1878 * Validate input (ASSUME pReq is a copy and can't be modified by ring-3
1879 * while we're here).
1880 */
1881 AssertPtrReturn(pReq, VERR_INVALID_POINTER);
1882 AssertMsgReturn(pReq->Hdr.cbReq == sizeof(*pReq), ("%#x < %#zx\n", pReq->Hdr.cbReq, sizeof(*pReq)), VERR_INVALID_PARAMETER);
1883
1884 AssertReturn(pReq->cbGuestPage == GUEST_PAGE_SIZE, VERR_INCOMPATIBLE_CONFIG);
1885
1886 AssertReturn(pReq->cGuestPages > 0, VERR_OUT_OF_RANGE);
1887 AssertReturn(pReq->cGuestPages <= PGM_MAX_PAGES_PER_ROM_RANGE, VERR_OUT_OF_RANGE);
1888
1889 AssertMsgReturn(!(pReq->fFlags & ~(uint32_t)PGMPHYS_ROM_FLAGS_VALID_MASK), ("fFlags=%#RX32\n", pReq->fFlags),
1890 VERR_INVALID_FLAGS);
1891
1892 AssertReturn(pReq->idRomRange < RT_ELEMENTS(pGVM->pgmr0.s.apRomRanges), VERR_OUT_OF_RANGE);
1893 AssertReturn(pReq->idRomRange == pGVM->pgm.s.cRomRanges, VERR_OUT_OF_RANGE);
1894
1895 /** @todo better VM state guard, enmVMState is ring-3 writable. */
1896 VMSTATE const enmState = pGVM->enmVMState;
1897 AssertMsgReturn(enmState == VMSTATE_CREATING, ("enmState=%d\n", enmState), VERR_VM_INVALID_VM_STATE);
1898 VM_ASSERT_EMT0_RETURN(pGVM, VERR_VM_THREAD_NOT_EMT);
1899
1900 /*
1901 * Call common worker.
1902 */
1903 return pgmPhysRomRangeAllocCommon(pGVM, pReq->cGuestPages, pReq->idRomRange, pReq->fFlags);
1904}
1905#endif /* IN_RING0 */
1906
1907
1908/*********************************************************************************************************************************
1909* Other stuff
1910*********************************************************************************************************************************/
1911
1912
1913
1914/**
1915 * Checks if Address Gate 20 is enabled or not.
1916 *
1917 * @returns true if enabled.
1918 * @returns false if disabled.
1919 * @param pVCpu The cross context virtual CPU structure.
1920 */
1921VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
1922{
1923 /* Must check that pVCpu isn't NULL here because PDM device helper are a little lazy. */
1924 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu && pVCpu->pgm.s.fA20Enabled));
1925 return pVCpu && pVCpu->pgm.s.fA20Enabled;
1926}
1927
1928
1929/**
1930 * Validates a GC physical address.
1931 *
1932 * @returns true if valid.
1933 * @returns false if invalid.
1934 * @param pVM The cross context VM structure.
1935 * @param GCPhys The physical address to validate.
1936 */
1937VMMDECL(bool) PGMPhysIsGCPhysValid(PVMCC pVM, RTGCPHYS GCPhys)
1938{
1939 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1940 return pPage != NULL;
1941}
1942
1943
1944/**
1945 * Checks if a GC physical address is a normal page,
1946 * i.e. not ROM, MMIO or reserved.
1947 *
1948 * @returns true if normal.
1949 * @returns false if invalid, ROM, MMIO or reserved page.
1950 * @param pVM The cross context VM structure.
1951 * @param GCPhys The physical address to check.
1952 */
1953VMMDECL(bool) PGMPhysIsGCPhysNormal(PVMCC pVM, RTGCPHYS GCPhys)
1954{
1955 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1956 return pPage
1957 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
1958}
1959
1960
1961/**
1962 * Converts a GC physical address to a HC physical address.
1963 *
1964 * @returns VINF_SUCCESS on success.
1965 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1966 * page but has no physical backing.
1967 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1968 * GC physical address.
1969 *
1970 * @param pVM The cross context VM structure.
1971 * @param GCPhys The GC physical address to convert.
1972 * @param pHCPhys Where to store the HC physical address on success.
1973 */
1974VMM_INT_DECL(int) PGMPhysGCPhys2HCPhys(PVMCC pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
1975{
1976 PGM_LOCK_VOID(pVM);
1977 PPGMPAGE pPage;
1978 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1979 if (RT_SUCCESS(rc))
1980 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & GUEST_PAGE_OFFSET_MASK);
1981 PGM_UNLOCK(pVM);
1982 return rc;
1983}
1984
1985
1986/**
1987 * Invalidates all page mapping TLBs.
1988 *
1989 * @param pVM The cross context VM structure.
1990 * @param fInRendezvous Set if we're in a rendezvous.
1991 */
1992void pgmPhysInvalidatePageMapTLB(PVMCC pVM, bool fInRendezvous)
1993{
1994 PGM_LOCK_VOID(pVM);
1995 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushes);
1996
1997 /* Clear the R3 & R0 TLBs completely. */
1998 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR0.aEntries); i++)
1999 {
2000 pVM->pgm.s.PhysTlbR0.aEntries[i].GCPhys = NIL_RTGCPHYS;
2001 pVM->pgm.s.PhysTlbR0.aEntries[i].pPage = 0;
2002 pVM->pgm.s.PhysTlbR0.aEntries[i].pv = 0;
2003 }
2004
2005 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbR3.aEntries); i++)
2006 {
2007 pVM->pgm.s.PhysTlbR3.aEntries[i].GCPhys = NIL_RTGCPHYS;
2008 pVM->pgm.s.PhysTlbR3.aEntries[i].pPage = 0;
2009 pVM->pgm.s.PhysTlbR3.aEntries[i].pMap = 0;
2010 pVM->pgm.s.PhysTlbR3.aEntries[i].pv = 0;
2011 }
2012
2013 /* For the per VCPU lockless TLBs, we only invalid the GCPhys members so that
2014 anyone concurrently using the entry can safely continue to do so while any
2015 subsequent attempts to use it will fail. (Emulating a scenario where we
2016 lost the PGM lock race and the concurrent TLB user wont it.) */
2017 VMCC_FOR_EACH_VMCPU(pVM)
2018 {
2019 if (!fInRendezvous && pVCpu != VMMGetCpu(pVM))
2020 for (unsigned idx = 0; idx < RT_ELEMENTS(pVCpu->pgm.s.PhysTlb.aEntries); idx++)
2021 ASMAtomicWriteU64(&pVCpu->pgm.s.PhysTlb.aEntries[idx].GCPhys, NIL_RTGCPHYS);
2022 else
2023 for (unsigned idx = 0; idx < RT_ELEMENTS(pVCpu->pgm.s.PhysTlb.aEntries); idx++)
2024 pVCpu->pgm.s.PhysTlb.aEntries[idx].GCPhys = NIL_RTGCPHYS;
2025 }
2026 VMCC_FOR_EACH_VMCPU_END(pVM);
2027
2028 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MISC);
2029 PGM_UNLOCK(pVM);
2030}
2031
2032
2033/**
2034 * Invalidates a page mapping TLB entry
2035 *
2036 * @param pVM The cross context VM structure.
2037 * @param GCPhys GCPhys entry to flush
2038 *
2039 * @note Caller is responsible for calling IEMTlbInvalidateAllPhysicalAllCpus
2040 * when needed.
2041 */
2042void pgmPhysInvalidatePageMapTLBEntry(PVMCC pVM, RTGCPHYS GCPhys)
2043{
2044 PGM_LOCK_ASSERT_OWNER(pVM);
2045
2046 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatPageMapTlbFlushEntry);
2047
2048 unsigned const idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
2049
2050 pVM->pgm.s.PhysTlbR0.aEntries[idx].GCPhys = NIL_RTGCPHYS;
2051 pVM->pgm.s.PhysTlbR0.aEntries[idx].pPage = 0;
2052 pVM->pgm.s.PhysTlbR0.aEntries[idx].pv = 0;
2053
2054 pVM->pgm.s.PhysTlbR3.aEntries[idx].GCPhys = NIL_RTGCPHYS;
2055 pVM->pgm.s.PhysTlbR3.aEntries[idx].pPage = 0;
2056 pVM->pgm.s.PhysTlbR3.aEntries[idx].pMap = 0;
2057 pVM->pgm.s.PhysTlbR3.aEntries[idx].pv = 0;
2058
2059 /* For the per VCPU lockless TLBs, we only invalid the GCPhys member so that
2060 anyone concurrently using the entry can safely continue to do so while any
2061 subsequent attempts to use it will fail. (Emulating a scenario where we
2062 lost the PGM lock race and the concurrent TLB user wont it.) */
2063 VMCC_FOR_EACH_VMCPU(pVM)
2064 {
2065 ASMAtomicWriteU64(&pVCpu->pgm.s.PhysTlb.aEntries[idx].GCPhys, NIL_RTGCPHYS);
2066 }
2067 VMCC_FOR_EACH_VMCPU_END(pVM);
2068}
2069
2070#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2071
2072/**
2073 * Makes sure that there is at least one handy page ready for use.
2074 *
2075 * This will also take the appropriate actions when reaching water-marks.
2076 *
2077 * @returns VBox status code.
2078 * @retval VINF_SUCCESS on success.
2079 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
2080 *
2081 * @param pVM The cross context VM structure.
2082 *
2083 * @remarks Must be called from within the PGM critical section. It may
2084 * nip back to ring-3/0 in some cases.
2085 */
2086static int pgmPhysEnsureHandyPage(PVMCC pVM)
2087{
2088 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
2089
2090 /*
2091 * Do we need to do anything special?
2092 */
2093# ifdef IN_RING3
2094 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
2095# else
2096 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
2097# endif
2098 {
2099 /*
2100 * Allocate pages only if we're out of them, or in ring-3, almost out.
2101 */
2102# ifdef IN_RING3
2103 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
2104# else
2105 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
2106# endif
2107 {
2108 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
2109 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
2110# ifdef IN_RING3
2111 int rc = PGMR3PhysAllocateHandyPages(pVM);
2112# else
2113 int rc = pgmR0PhysAllocateHandyPages(pVM, VMMGetCpuId(pVM), false /*fRing3*/);
2114# endif
2115 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2116 {
2117 if (RT_FAILURE(rc))
2118 return rc;
2119 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
2120 if (!pVM->pgm.s.cHandyPages)
2121 {
2122 LogRel(("PGM: no more handy pages!\n"));
2123 return VERR_EM_NO_MEMORY;
2124 }
2125 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
2126 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
2127# ifndef IN_RING3
2128 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
2129# endif
2130 }
2131 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
2132 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
2133 ("%u\n", pVM->pgm.s.cHandyPages),
2134 VERR_PGM_HANDY_PAGE_IPE);
2135 }
2136 else
2137 {
2138 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
2139 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
2140# ifndef IN_RING3
2141 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
2142 {
2143 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
2144 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
2145 }
2146# endif
2147 }
2148 }
2149
2150 return VINF_SUCCESS;
2151}
2152
2153
2154/**
2155 * Replace a zero or shared page with new page that we can write to.
2156 *
2157 * @returns The following VBox status codes.
2158 * @retval VINF_SUCCESS on success, pPage is modified.
2159 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2160 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
2161 *
2162 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
2163 *
2164 * @param pVM The cross context VM structure.
2165 * @param pPage The physical page tracking structure. This will
2166 * be modified on success.
2167 * @param GCPhys The address of the page.
2168 *
2169 * @remarks Must be called from within the PGM critical section. It may
2170 * nip back to ring-3/0 in some cases.
2171 *
2172 * @remarks This function shouldn't really fail, however if it does
2173 * it probably means we've screwed up the size of handy pages and/or
2174 * the low-water mark. Or, that some device I/O is causing a lot of
2175 * pages to be allocated while while the host is in a low-memory
2176 * condition. This latter should be handled elsewhere and in a more
2177 * controlled manner, it's on the @bugref{3170} todo list...
2178 */
2179int pgmPhysAllocPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2180{
2181 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
2182
2183 /*
2184 * Prereqs.
2185 */
2186 PGM_LOCK_ASSERT_OWNER(pVM);
2187 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
2188 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
2189
2190# ifdef PGM_WITH_LARGE_PAGES
2191 /*
2192 * Try allocate a large page if applicable.
2193 */
2194 if ( PGMIsUsingLargePages(pVM)
2195 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
2196 && !VM_IS_NEM_ENABLED(pVM)) /** @todo NEM: Implement large pages support. */
2197 {
2198 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
2199 PPGMPAGE pBasePage;
2200
2201 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
2202 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
2203 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
2204 {
2205 rc = pgmPhysAllocLargePage(pVM, GCPhys);
2206 if (rc == VINF_SUCCESS)
2207 return rc;
2208 }
2209 /* Mark the base as type page table, so we don't check over and over again. */
2210 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
2211
2212 /* fall back to 4KB pages. */
2213 }
2214# endif
2215
2216 /*
2217 * Flush any shadow page table mappings of the page.
2218 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
2219 */
2220 bool fFlushTLBs = false;
2221 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
2222 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
2223
2224 /*
2225 * Ensure that we've got a page handy, take it and use it.
2226 */
2227 int rc2 = pgmPhysEnsureHandyPage(pVM);
2228 if (RT_FAILURE(rc2))
2229 {
2230 if (fFlushTLBs)
2231 PGM_INVL_ALL_VCPU_TLBS(pVM);
2232 Assert(rc2 == VERR_EM_NO_MEMORY);
2233 return rc2;
2234 }
2235 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
2236 PGM_LOCK_ASSERT_OWNER(pVM);
2237 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
2238 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
2239
2240 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
2241 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
2242 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_GMMPAGEDESC_PHYS);
2243 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
2244 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
2245 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
2246
2247 /*
2248 * There are one or two action to be taken the next time we allocate handy pages:
2249 * - Tell the GMM (global memory manager) what the page is being used for.
2250 * (Speeds up replacement operations - sharing and defragmenting.)
2251 * - If the current backing is shared, it must be freed.
2252 */
2253 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
2254 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
2255
2256 void const *pvSharedPage = NULL;
2257 if (!PGM_PAGE_IS_SHARED(pPage))
2258 {
2259 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
2260 STAM_COUNTER_INC(&pVM->pgm.s.Stats.StatRZPageReplaceZero);
2261 pVM->pgm.s.cZeroPages--;
2262 }
2263 else
2264 {
2265 /* Mark this shared page for freeing/dereferencing. */
2266 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
2267 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
2268
2269 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
2270 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
2271 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageReplaceShared));
2272 pVM->pgm.s.cSharedPages--;
2273
2274 /* Grab the address of the page so we can make a copy later on. (safe) */
2275 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
2276 AssertRC(rc);
2277 }
2278
2279 /*
2280 * Do the PGMPAGE modifications.
2281 */
2282 pVM->pgm.s.cPrivatePages++;
2283 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
2284 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
2285 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
2286 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
2287 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
2288 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID,
2289 !pvSharedPage
2290 ? IEMTLBPHYSFLUSHREASON_ALLOCATED : IEMTLBPHYSFLUSHREASON_ALLOCATED_FROM_SHARED);
2291
2292 /* Copy the shared page contents to the replacement page. */
2293 if (!pvSharedPage)
2294 { /* likely */ }
2295 else
2296 {
2297 /* Get the virtual address of the new page. */
2298 PGMPAGEMAPLOCK PgMpLck;
2299 void *pvNewPage;
2300 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
2301 if (RT_SUCCESS(rc))
2302 {
2303 memcpy(pvNewPage, pvSharedPage, GUEST_PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
2304 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2305 }
2306 }
2307
2308 if ( fFlushTLBs
2309 && rc != VINF_PGM_GCPHYS_ALIASED)
2310 PGM_INVL_ALL_VCPU_TLBS(pVM);
2311
2312 /*
2313 * Notify NEM about the mapping change for this page.
2314 *
2315 * Note! Shadow ROM pages are complicated as they can definitely be
2316 * allocated while not visible, so play safe.
2317 */
2318 if (VM_IS_NEM_ENABLED(pVM))
2319 {
2320 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
2321 if ( enmType != PGMPAGETYPE_ROM_SHADOW
2322 || pgmPhysGetPage(pVM, GCPhys) == pPage)
2323 {
2324 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
2325 rc2 = NEMHCNotifyPhysPageAllocated(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, HCPhys,
2326 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
2327 if (RT_SUCCESS(rc))
2328 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
2329 else
2330 rc = rc2;
2331 }
2332 }
2333
2334 return rc;
2335}
2336
2337# ifdef PGM_WITH_LARGE_PAGES
2338
2339/**
2340 * Replace a 2 MB range of zero pages with new pages that we can write to.
2341 *
2342 * @returns The following VBox status codes.
2343 * @retval VINF_SUCCESS on success, pPage is modified.
2344 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2345 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
2346 *
2347 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
2348 *
2349 * @param pVM The cross context VM structure.
2350 * @param GCPhys The address of the page.
2351 *
2352 * @remarks Must be called from within the PGM critical section. It may block
2353 * on GMM and host mutexes/locks, leaving HM context.
2354 */
2355int pgmPhysAllocLargePage(PVMCC pVM, RTGCPHYS GCPhys)
2356{
2357 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
2358 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
2359 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
2360
2361 /*
2362 * Check Prereqs.
2363 */
2364 PGM_LOCK_ASSERT_OWNER(pVM);
2365 Assert(PGMIsUsingLargePages(pVM));
2366
2367 /*
2368 * All the pages must be unallocated RAM pages, i.e. mapping the ZERO page.
2369 */
2370 PPGMPAGE pFirstPage;
2371 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
2372 if ( RT_SUCCESS(rc)
2373 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM
2374 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
2375 {
2376 /*
2377 * Further they should have PDE type set to PGM_PAGE_PDE_TYPE_DONTCARE,
2378 * since they are unallocated.
2379 */
2380 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
2381 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
2382 if (uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE)
2383 {
2384 /*
2385 * Now, make sure all the other pages in the 2 MB is in the same state.
2386 */
2387 GCPhys = GCPhysBase;
2388 unsigned cLeft = _2M / GUEST_PAGE_SIZE;
2389 while (cLeft-- > 0)
2390 {
2391 PPGMPAGE pSubPage = pgmPhysGetPage(pVM, GCPhys);
2392 if ( pSubPage
2393 && PGM_PAGE_GET_TYPE(pSubPage) == PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
2394 && PGM_PAGE_GET_STATE(pSubPage) == PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
2395 {
2396 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
2397 GCPhys += GUEST_PAGE_SIZE;
2398 }
2399 else
2400 {
2401 LogFlow(("pgmPhysAllocLargePage: Found page %RGp with wrong attributes (type=%d; state=%d); cancel check.\n",
2402 GCPhys, pSubPage ? PGM_PAGE_GET_TYPE(pSubPage) : -1, pSubPage ? PGM_PAGE_GET_STATE(pSubPage) : -1));
2403
2404 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
2405 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
2406 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
2407 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2408 }
2409 }
2410
2411 /*
2412 * Do the allocation.
2413 */
2414# ifdef IN_RING3
2415 rc = VMMR3CallR0(pVM, VMMR0_DO_PGM_ALLOCATE_LARGE_PAGE, GCPhysBase, NULL);
2416# elif defined(IN_RING0)
2417 rc = pgmR0PhysAllocateLargePage(pVM, VMMGetCpuId(pVM), GCPhysBase);
2418# else
2419# error "Port me"
2420# endif
2421 if (RT_SUCCESS(rc))
2422 {
2423 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
2424 pVM->pgm.s.cLargePages++;
2425 return VINF_SUCCESS;
2426 }
2427
2428 /* If we fail once, it most likely means the host's memory is too
2429 fragmented; don't bother trying again. */
2430 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
2431 return rc;
2432 }
2433 }
2434 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2435}
2436
2437
2438/**
2439 * Recheck the entire 2 MB range to see if we can use it again as a large page.
2440 *
2441 * @returns The following VBox status codes.
2442 * @retval VINF_SUCCESS on success, the large page can be used again
2443 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
2444 *
2445 * @param pVM The cross context VM structure.
2446 * @param GCPhys The address of the page.
2447 * @param pLargePage Page structure of the base page
2448 */
2449int pgmPhysRecheckLargePage(PVMCC pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
2450{
2451 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
2452
2453 Assert(!VM_IS_NEM_ENABLED(pVM)); /** @todo NEM: Large page support. */
2454
2455 AssertCompile(X86_PDE2M_PAE_PG_MASK == EPT_PDE2M_PG_MASK); /* Paranoia: Caller uses this for guest EPT tables as well. */
2456 GCPhys &= X86_PDE2M_PAE_PG_MASK;
2457
2458 /* Check the base page. */
2459 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
2460 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
2461 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
2462 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
2463 {
2464 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
2465 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2466 }
2467
2468 STAM_PROFILE_START(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
2469 /* Check all remaining pages in the 2 MB range. */
2470 unsigned i;
2471 GCPhys += GUEST_PAGE_SIZE;
2472 for (i = 1; i < _2M / GUEST_PAGE_SIZE; i++)
2473 {
2474 PPGMPAGE pPage;
2475 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2476 AssertRCBreak(rc);
2477
2478 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
2479 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
2480 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
2481 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
2482 {
2483 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
2484 break;
2485 }
2486
2487 GCPhys += GUEST_PAGE_SIZE;
2488 }
2489 STAM_PROFILE_STOP(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,IsValidLargePage), a);
2490
2491 if (i == _2M / GUEST_PAGE_SIZE)
2492 {
2493 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
2494 pVM->pgm.s.cLargePagesDisabled--;
2495 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
2496 return VINF_SUCCESS;
2497 }
2498
2499 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
2500}
2501
2502# endif /* PGM_WITH_LARGE_PAGES */
2503#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
2504
2505
2506
2507/**
2508 * Deal with a write monitored page.
2509 *
2510 * @param pVM The cross context VM structure.
2511 * @param pPage The physical page tracking structure.
2512 * @param GCPhys The guest physical address of the page.
2513 * PGMPhysReleasePageMappingLock() passes NIL_RTGCPHYS in a
2514 * very unlikely situation where it is okay that we let NEM
2515 * fix the page access in a lazy fasion.
2516 *
2517 * @remarks Called from within the PGM critical section.
2518 */
2519void pgmPhysPageMakeWriteMonitoredWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2520{
2521 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
2522 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
2523 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
2524 if (PGM_PAGE_IS_CODE_PAGE(pPage))
2525 {
2526 PGM_PAGE_CLEAR_CODE_PAGE(pVM, pPage);
2527 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID, IEMTLBPHYSFLUSHREASON_MADE_WRITABLE);
2528 }
2529
2530 Assert(pVM->pgm.s.cMonitoredPages > 0);
2531 pVM->pgm.s.cMonitoredPages--;
2532 pVM->pgm.s.cWrittenToPages++;
2533
2534#ifdef VBOX_WITH_NATIVE_NEM
2535 /*
2536 * Notify NEM about the protection change so we won't spin forever.
2537 *
2538 * Note! NEM need to be handle to lazily correct page protection as we cannot
2539 * really get it 100% right here it seems. The page pool does this too.
2540 */
2541 if (VM_IS_NEM_ENABLED(pVM) && GCPhys != NIL_RTGCPHYS)
2542 {
2543 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
2544 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
2545 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
2546 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
2547 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhys) : NULL,
2548 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
2549 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
2550 }
2551#else
2552 RT_NOREF(GCPhys);
2553#endif
2554}
2555
2556
2557/**
2558 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
2559 *
2560 * @returns VBox strict status code.
2561 * @retval VINF_SUCCESS on success.
2562 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2563 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2564 *
2565 * @param pVM The cross context VM structure.
2566 * @param pPage The physical page tracking structure.
2567 * @param GCPhys The address of the page.
2568 *
2569 * @remarks Called from within the PGM critical section.
2570 */
2571int pgmPhysPageMakeWritable(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2572{
2573 PGM_LOCK_ASSERT_OWNER(pVM);
2574 switch (PGM_PAGE_GET_STATE(pPage))
2575 {
2576 case PGM_PAGE_STATE_WRITE_MONITORED:
2577 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, GCPhys);
2578 RT_FALL_THRU();
2579 default: /* to shut up GCC */
2580 case PGM_PAGE_STATE_ALLOCATED:
2581 return VINF_SUCCESS;
2582
2583 /*
2584 * Zero pages can be dummy pages for MMIO or reserved memory,
2585 * so we need to check the flags before joining cause with
2586 * shared page replacement.
2587 */
2588 case PGM_PAGE_STATE_ZERO:
2589 if (PGM_PAGE_IS_MMIO(pPage))
2590 return VERR_PGM_PHYS_PAGE_RESERVED;
2591 RT_FALL_THRU();
2592 case PGM_PAGE_STATE_SHARED:
2593#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2594 return pgmPhysAllocPage(pVM, pPage, GCPhys);
2595#else
2596 AssertFailed(); /** @todo not sure if we make use of ZERO pages or not in NEM-mode, but I can't see how pgmPhysAllocPage would work. */
2597 return VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE;
2598#endif
2599
2600 /* Not allowed to write to ballooned pages. */
2601 case PGM_PAGE_STATE_BALLOONED:
2602 return VERR_PGM_PHYS_PAGE_BALLOONED;
2603 }
2604}
2605
2606#if 0 /* unused */
2607/**
2608 * Internal usage: Map the page specified by its GMM ID.
2609 *
2610 * This is similar to pgmPhysPageMap
2611 *
2612 * @returns VBox status code.
2613 *
2614 * @param pVM The cross context VM structure.
2615 * @param idPage The Page ID.
2616 * @param HCPhys The physical address (for SUPR0HCPhysToVirt).
2617 * @param ppv Where to store the mapping address.
2618 *
2619 * @remarks Called from within the PGM critical section. The mapping is only
2620 * valid while you are inside this section.
2621 */
2622int pgmPhysPageMapByPageID(PVMCC pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
2623{
2624 /*
2625 * Validation.
2626 */
2627 PGM_LOCK_ASSERT_OWNER(pVM);
2628 AssertReturn(HCPhys && !(HCPhys & GUEST_PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2629 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
2630 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
2631
2632#ifdef IN_RING0
2633# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
2634 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)GUEST_PAGE_OFFSET_MASK, ppv);
2635# else
2636 return GMMR0PageIdToVirt(pVM, idPage, ppv);
2637# endif
2638
2639#else
2640 /*
2641 * Find/make Chunk TLB entry for the mapping chunk.
2642 */
2643 PPGMCHUNKR3MAP pMap;
2644 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
2645 if (pTlbe->idChunk == idChunk)
2646 {
2647 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
2648 pMap = pTlbe->pChunk;
2649 }
2650 else
2651 {
2652 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
2653
2654 /*
2655 * Find the chunk, map it if necessary.
2656 */
2657 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
2658 if (pMap)
2659 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
2660 else
2661 {
2662 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
2663 if (RT_FAILURE(rc))
2664 return rc;
2665 }
2666
2667 /*
2668 * Enter it into the Chunk TLB.
2669 */
2670 pTlbe->idChunk = idChunk;
2671 pTlbe->pChunk = pMap;
2672 }
2673
2674 *ppv = (uint8_t *)pMap->pv + ((idPage & GMM_PAGEID_IDX_MASK) << GUEST_PAGE_SHIFT);
2675 return VINF_SUCCESS;
2676#endif
2677}
2678#endif /* unused */
2679
2680/**
2681 * Maps a page into the current virtual address space so it can be accessed.
2682 *
2683 * @returns VBox status code.
2684 * @retval VINF_SUCCESS on success.
2685 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2686 *
2687 * @param pVM The cross context VM structure.
2688 * @param pPage The physical page tracking structure.
2689 * @param GCPhys The address of the page.
2690 * @param ppMap Where to store the address of the mapping tracking structure.
2691 * @param ppv Where to store the mapping address of the page. The page
2692 * offset is masked off!
2693 *
2694 * @remarks Called from within the PGM critical section.
2695 */
2696static int pgmPhysPageMapCommon(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
2697{
2698 PGM_LOCK_ASSERT_OWNER(pVM);
2699 NOREF(GCPhys);
2700
2701 /*
2702 * Special cases: MMIO2 and specially aliased MMIO pages.
2703 */
2704 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
2705 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
2706 {
2707 *ppMap = NULL;
2708
2709 /* Decode the page id to a page in a MMIO2 ram range. */
2710 uint8_t const idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
2711 uint32_t const iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
2712 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges),
2713 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
2714 RT_ELEMENTS(pVM->pgm.s.aMmio2Ranges), PGM_PAGE_GET_TYPE(pPage), GCPhys,
2715 pPage->s.idPage, pPage->s.uStateY),
2716 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2717 PPGMREGMMIO2RANGE const pMmio2Range = &pVM->pgm.s.aMmio2Ranges[idMmio2 - 1];
2718 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2719 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2720#ifndef IN_RING0
2721 uint32_t const idRamRange = pMmio2Range->idRamRange;
2722 AssertLogRelReturn(idRamRange < RT_ELEMENTS(pVM->pgm.s.apRamRanges), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2723 PPGMRAMRANGE const pRamRange = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
2724 AssertLogRelReturn(pRamRange, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2725 AssertLogRelReturn(iPage < (pRamRange->cb >> GUEST_PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2726 *ppv = pMmio2Range->pbR3 + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
2727 return VINF_SUCCESS;
2728
2729#else /* IN_RING0 */
2730 AssertLogRelReturn(iPage < pVM->pgmr0.s.acMmio2RangePages[idMmio2 - 1], VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
2731# ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM
2732 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
2733# else
2734 AssertPtr(pVM->pgmr0.s.apbMmio2Backing[idMmio2 - 1]);
2735 *ppv = pVM->pgmr0.s.apbMmio2Backing[idMmio2 - 1] + ((uintptr_t)iPage << GUEST_PAGE_SHIFT);
2736 return VINF_SUCCESS;
2737# endif
2738#endif
2739 }
2740
2741#ifdef VBOX_WITH_PGM_NEM_MODE
2742# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2743 if (pVM->pgm.s.fNemMode)
2744# endif
2745 {
2746# ifdef IN_RING3
2747 /*
2748 * Find the corresponding RAM range and use that to locate the mapping address.
2749 */
2750 /** @todo Use the page ID for some kind of indexing as we do with MMIO2 above. */
2751 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhys);
2752 AssertLogRelMsgReturn(pRam, ("%RTGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
2753 size_t const idxPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
2754 Assert(pPage == &pRam->aPages[idxPage]);
2755 *ppMap = NULL;
2756 *ppv = (uint8_t *)pRam->pbR3 + (idxPage << GUEST_PAGE_SHIFT);
2757 return VINF_SUCCESS;
2758# else
2759 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
2760# endif
2761 }
2762#endif /* VBOX_WITH_PGM_NEM_MODE */
2763#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2764
2765 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
2766 if (idChunk == NIL_GMM_CHUNKID)
2767 {
2768 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
2769 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
2770 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2771 {
2772 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
2773 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
2774 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
2775 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
2776 *ppv = pVM->pgm.s.abZeroPg;
2777 }
2778 else
2779 *ppv = pVM->pgm.s.abZeroPg;
2780 *ppMap = NULL;
2781 return VINF_SUCCESS;
2782 }
2783
2784# if defined(IN_RING0) && defined(VBOX_WITH_LINEAR_HOST_PHYS_MEM)
2785 /*
2786 * Just use the physical address.
2787 */
2788 *ppMap = NULL;
2789 return SUPR0HCPhysToVirt(PGM_PAGE_GET_HCPHYS(pPage), ppv);
2790
2791# elif defined(IN_RING0)
2792 /*
2793 * Go by page ID thru GMMR0.
2794 */
2795 *ppMap = NULL;
2796 return GMMR0PageIdToVirt(pVM, PGM_PAGE_GET_PAGEID(pPage), ppv);
2797
2798# else
2799 /*
2800 * Find/make Chunk TLB entry for the mapping chunk.
2801 */
2802 PPGMCHUNKR3MAP pMap;
2803 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
2804 if (pTlbe->idChunk == idChunk)
2805 {
2806 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
2807 pMap = pTlbe->pChunk;
2808 AssertPtr(pMap->pv);
2809 }
2810 else
2811 {
2812 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
2813
2814 /*
2815 * Find the chunk, map it if necessary.
2816 */
2817 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
2818 if (pMap)
2819 {
2820 AssertPtr(pMap->pv);
2821 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
2822 }
2823 else
2824 {
2825 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
2826 if (RT_FAILURE(rc))
2827 return rc;
2828 AssertPtr(pMap->pv);
2829 }
2830
2831 /*
2832 * Enter it into the Chunk TLB.
2833 */
2834 pTlbe->idChunk = idChunk;
2835 pTlbe->pChunk = pMap;
2836 }
2837
2838 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << GUEST_PAGE_SHIFT);
2839 *ppMap = pMap;
2840 return VINF_SUCCESS;
2841# endif /* !IN_RING0 */
2842#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
2843}
2844
2845
2846/**
2847 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
2848 *
2849 * This is typically used is paths where we cannot use the TLB methods (like ROM
2850 * pages) or where there is no point in using them since we won't get many hits.
2851 *
2852 * @returns VBox strict status code.
2853 * @retval VINF_SUCCESS on success.
2854 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
2855 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2856 *
2857 * @param pVM The cross context VM structure.
2858 * @param pPage The physical page tracking structure.
2859 * @param GCPhys The address of the page.
2860 * @param ppv Where to store the mapping address of the page. The page
2861 * offset is masked off!
2862 *
2863 * @remarks Called from within the PGM critical section. The mapping is only
2864 * valid while you are inside section.
2865 */
2866int pgmPhysPageMakeWritableAndMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
2867{
2868 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
2869 if (RT_SUCCESS(rc))
2870 {
2871 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
2872 PPGMPAGEMAP pMapIgnore;
2873 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
2874 if (RT_FAILURE(rc2)) /* preserve rc */
2875 rc = rc2;
2876 }
2877 return rc;
2878}
2879
2880
2881/**
2882 * Maps a page into the current virtual address space so it can be accessed for
2883 * both writing and reading.
2884 *
2885 * This is typically used is paths where we cannot use the TLB methods (like ROM
2886 * pages) or where there is no point in using them since we won't get many hits.
2887 *
2888 * @returns VBox status code.
2889 * @retval VINF_SUCCESS on success.
2890 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2891 *
2892 * @param pVM The cross context VM structure.
2893 * @param pPage The physical page tracking structure. Must be in the
2894 * allocated state.
2895 * @param GCPhys The address of the page.
2896 * @param ppv Where to store the mapping address of the page. The page
2897 * offset is masked off!
2898 *
2899 * @remarks Called from within the PGM critical section. The mapping is only
2900 * valid while you are inside section.
2901 */
2902int pgmPhysPageMap(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
2903{
2904 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
2905 PPGMPAGEMAP pMapIgnore;
2906 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
2907}
2908
2909
2910/**
2911 * Maps a page into the current virtual address space so it can be accessed for
2912 * reading.
2913 *
2914 * This is typically used is paths where we cannot use the TLB methods (like ROM
2915 * pages) or where there is no point in using them since we won't get many hits.
2916 *
2917 * @returns VBox status code.
2918 * @retval VINF_SUCCESS on success.
2919 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
2920 *
2921 * @param pVM The cross context VM structure.
2922 * @param pPage The physical page tracking structure.
2923 * @param GCPhys The address of the page.
2924 * @param ppv Where to store the mapping address of the page. The page
2925 * offset is masked off!
2926 *
2927 * @remarks Called from within the PGM critical section. The mapping is only
2928 * valid while you are inside this section.
2929 */
2930int pgmPhysPageMapReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
2931{
2932 PPGMPAGEMAP pMapIgnore;
2933 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
2934}
2935
2936
2937/**
2938 * Load a guest page into the ring-3 physical TLB.
2939 *
2940 * @returns VBox status code.
2941 * @retval VINF_SUCCESS on success
2942 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2943 * @param pVM The cross context VM structure.
2944 * @param GCPhys The guest physical address in question.
2945 */
2946int pgmPhysPageLoadIntoTlb(PVMCC pVM, RTGCPHYS GCPhys)
2947{
2948 PGM_LOCK_ASSERT_OWNER(pVM);
2949
2950 /*
2951 * Find the ram range and page and hand it over to the with-page function.
2952 * 99.8% of requests are expected to be in the first range.
2953 */
2954 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
2955 if (!pPage)
2956 {
2957 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
2958 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2959 }
2960
2961 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
2962}
2963
2964
2965/**
2966 * Load a guest page into the ring-3 physical TLB.
2967 *
2968 * @returns VBox status code.
2969 * @retval VINF_SUCCESS on success
2970 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
2971 *
2972 * @param pVM The cross context VM structure.
2973 * @param pPage Pointer to the PGMPAGE structure corresponding to
2974 * GCPhys.
2975 * @param GCPhys The guest physical address in question.
2976 */
2977int pgmPhysPageLoadIntoTlbWithPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
2978{
2979 PGM_LOCK_ASSERT_OWNER(pVM);
2980 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PageMapTlbMisses));
2981
2982 /*
2983 * Map the page.
2984 * Make a special case for the zero page as it is kind of special.
2985 */
2986 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTX_SUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
2987 if ( !PGM_PAGE_IS_ZERO(pPage)
2988 && !PGM_PAGE_IS_BALLOONED(pPage))
2989 {
2990 void *pv;
2991 PPGMPAGEMAP pMap;
2992 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
2993 if (RT_FAILURE(rc))
2994 return rc;
2995#ifndef IN_RING0
2996 pTlbe->pMap = pMap;
2997#endif
2998 pTlbe->pv = pv;
2999 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
3000 }
3001 else
3002 {
3003#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
3004 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
3005#endif
3006#ifndef IN_RING0
3007 pTlbe->pMap = NULL;
3008#endif
3009 pTlbe->pv = pVM->pgm.s.abZeroPg;
3010 }
3011#ifdef PGM_WITH_PHYS_TLB
3012 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
3013 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
3014 pTlbe->GCPhys = GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
3015 else
3016 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
3017#else
3018 pTlbe->GCPhys = NIL_RTGCPHYS;
3019#endif
3020 pTlbe->pPage = pPage;
3021 return VINF_SUCCESS;
3022}
3023
3024
3025#ifdef IN_RING3 /** @todo Need ensure a ring-0 version gets invalidated safely */
3026/**
3027 * Load a guest page into the lockless ring-3 physical TLB for the calling EMT.
3028 *
3029 * @returns VBox status code.
3030 * @retval VINF_SUCCESS on success
3031 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3032 *
3033 * @param pVCpu The cross context virtual CPU structure.
3034 * @param pPage Pointer to the PGMPAGE structure corresponding to
3035 * GCPhys.
3036 * @param GCPhys The guest physical address in question.
3037 */
3038DECLHIDDEN(int) pgmPhysPageLoadIntoLocklessTlbWithPage(PVMCPUCC pVCpu, PPGMPAGE pPage, RTGCPHYS GCPhys)
3039{
3040 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageMapTlbMisses));
3041 PPGMPAGEMAPTLBE const pLocklessTlbe = &pVCpu->pgm.s.PhysTlb.aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
3042 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3043
3044 PGM_LOCK_VOID(pVM);
3045
3046 PPGMPAGEMAPTLBE pSharedTlbe;
3047 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pSharedTlbe);
3048 if (RT_SUCCESS(rc))
3049 *pLocklessTlbe = *pSharedTlbe;
3050
3051 PGM_UNLOCK(pVM);
3052 return rc;
3053}
3054#endif /* IN_RING3 */
3055
3056
3057/**
3058 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
3059 * own the PGM lock and therefore not need to lock the mapped page.
3060 *
3061 * @returns VBox status code.
3062 * @retval VINF_SUCCESS on success.
3063 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3064 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3065 *
3066 * @param pVM The cross context VM structure.
3067 * @param GCPhys The guest physical address of the page that should be mapped.
3068 * @param pPage Pointer to the PGMPAGE structure for the page.
3069 * @param ppv Where to store the address corresponding to GCPhys.
3070 *
3071 * @internal
3072 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
3073 */
3074int pgmPhysGCPhys2CCPtrInternalDepr(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
3075{
3076 int rc;
3077 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
3078 PGM_LOCK_ASSERT_OWNER(pVM);
3079 pVM->pgm.s.cDeprecatedPageLocks++;
3080
3081 /*
3082 * Make sure the page is writable.
3083 */
3084 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
3085 {
3086 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3087 if (RT_FAILURE(rc))
3088 return rc;
3089 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3090 }
3091 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM));
3092
3093 /*
3094 * Get the mapping address.
3095 */
3096 PPGMPAGEMAPTLBE pTlbe;
3097 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3098 if (RT_FAILURE(rc))
3099 return rc;
3100 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3101 return VINF_SUCCESS;
3102}
3103
3104
3105/**
3106 * Locks a page mapping for writing.
3107 *
3108 * @param pVM The cross context VM structure.
3109 * @param pPage The page.
3110 * @param pTlbe The mapping TLB entry for the page.
3111 * @param pLock The lock structure (output).
3112 */
3113DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
3114{
3115# ifndef IN_RING0
3116 PPGMPAGEMAP pMap = pTlbe->pMap;
3117 if (pMap)
3118 pMap->cRefs++;
3119# else
3120 RT_NOREF(pTlbe);
3121# endif
3122
3123 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
3124 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
3125 {
3126 if (cLocks == 0)
3127 pVM->pgm.s.cWriteLockedPages++;
3128 PGM_PAGE_INC_WRITE_LOCKS(pPage);
3129 }
3130 else if (cLocks != PGM_PAGE_MAX_LOCKS)
3131 {
3132 PGM_PAGE_INC_WRITE_LOCKS(pPage);
3133 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
3134# ifndef IN_RING0
3135 if (pMap)
3136 pMap->cRefs++; /* Extra ref to prevent it from going away. */
3137# endif
3138 }
3139
3140 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
3141# ifndef IN_RING0
3142 pLock->pvMap = pMap;
3143# else
3144 pLock->pvMap = NULL;
3145# endif
3146}
3147
3148/**
3149 * Locks a page mapping for reading.
3150 *
3151 * @param pVM The cross context VM structure.
3152 * @param pPage The page.
3153 * @param pTlbe The mapping TLB entry for the page.
3154 * @param pLock The lock structure (output).
3155 */
3156DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
3157{
3158# ifndef IN_RING0
3159 PPGMPAGEMAP pMap = pTlbe->pMap;
3160 if (pMap)
3161 pMap->cRefs++;
3162# else
3163 RT_NOREF(pTlbe);
3164# endif
3165
3166 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
3167 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
3168 {
3169 if (cLocks == 0)
3170 pVM->pgm.s.cReadLockedPages++;
3171 PGM_PAGE_INC_READ_LOCKS(pPage);
3172 }
3173 else if (cLocks != PGM_PAGE_MAX_LOCKS)
3174 {
3175 PGM_PAGE_INC_READ_LOCKS(pPage);
3176 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
3177# ifndef IN_RING0
3178 if (pMap)
3179 pMap->cRefs++; /* Extra ref to prevent it from going away. */
3180# endif
3181 }
3182
3183 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
3184# ifndef IN_RING0
3185 pLock->pvMap = pMap;
3186# else
3187 pLock->pvMap = NULL;
3188# endif
3189}
3190
3191
3192/**
3193 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
3194 * own the PGM lock and have access to the page structure.
3195 *
3196 * @returns VBox status code.
3197 * @retval VINF_SUCCESS on success.
3198 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3199 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3200 *
3201 * @param pVM The cross context VM structure.
3202 * @param GCPhys The guest physical address of the page that should be mapped.
3203 * @param pPage Pointer to the PGMPAGE structure for the page.
3204 * @param ppv Where to store the address corresponding to GCPhys.
3205 * @param pLock Where to store the lock information that
3206 * pgmPhysReleaseInternalPageMappingLock needs.
3207 *
3208 * @internal
3209 */
3210int pgmPhysGCPhys2CCPtrInternal(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
3211{
3212 int rc;
3213 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
3214 PGM_LOCK_ASSERT_OWNER(pVM);
3215
3216 /*
3217 * Make sure the page is writable.
3218 */
3219 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
3220 {
3221 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3222 if (RT_FAILURE(rc))
3223 return rc;
3224 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3225 }
3226 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM));
3227
3228 /*
3229 * Do the job.
3230 */
3231 PPGMPAGEMAPTLBE pTlbe;
3232 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3233 if (RT_FAILURE(rc))
3234 return rc;
3235 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3236 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3237 return VINF_SUCCESS;
3238}
3239
3240
3241/**
3242 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
3243 * own the PGM lock and have access to the page structure.
3244 *
3245 * @returns VBox status code.
3246 * @retval VINF_SUCCESS on success.
3247 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3248 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3249 *
3250 * @param pVM The cross context VM structure.
3251 * @param GCPhys The guest physical address of the page that should be mapped.
3252 * @param pPage Pointer to the PGMPAGE structure for the page.
3253 * @param ppv Where to store the address corresponding to GCPhys.
3254 * @param pLock Where to store the lock information that
3255 * pgmPhysReleaseInternalPageMappingLock needs.
3256 *
3257 * @internal
3258 */
3259int pgmPhysGCPhys2CCPtrInternalReadOnly(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
3260{
3261 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
3262 PGM_LOCK_ASSERT_OWNER(pVM);
3263 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM));
3264
3265 /*
3266 * Do the job.
3267 */
3268 PPGMPAGEMAPTLBE pTlbe;
3269 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3270 if (RT_FAILURE(rc))
3271 return rc;
3272 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3273 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3274 return VINF_SUCCESS;
3275}
3276
3277
3278/**
3279 * Requests the mapping of a guest page into the current context.
3280 *
3281 * This API should only be used for very short term, as it will consume scarse
3282 * resources (R0 and GC) in the mapping cache. When you're done with the page,
3283 * call PGMPhysReleasePageMappingLock() ASAP to release it.
3284 *
3285 * This API will assume your intention is to write to the page, and will
3286 * therefore replace shared and zero pages. If you do not intend to modify
3287 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
3288 *
3289 * @returns VBox status code.
3290 * @retval VINF_SUCCESS on success.
3291 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3292 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3293 *
3294 * @param pVM The cross context VM structure.
3295 * @param GCPhys The guest physical address of the page that should be
3296 * mapped.
3297 * @param ppv Where to store the address corresponding to GCPhys.
3298 * @param pLock Where to store the lock information that
3299 * PGMPhysReleasePageMappingLock needs.
3300 *
3301 * @remarks The caller is responsible for dealing with access handlers.
3302 * @todo Add an informational return code for pages with access handlers?
3303 *
3304 * @remark Avoid calling this API from within critical sections (other than
3305 * the PGM one) because of the deadlock risk. External threads may
3306 * need to delegate jobs to the EMTs.
3307 * @remarks Only one page is mapped! Make no assumption about what's after or
3308 * before the returned page!
3309 * @thread Any thread.
3310 */
3311VMM_INT_DECL(int) PGMPhysGCPhys2CCPtr(PVMCC pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
3312{
3313 int rc = PGM_LOCK(pVM);
3314 AssertRCReturn(rc, rc);
3315
3316 /*
3317 * Query the Physical TLB entry for the page (may fail).
3318 */
3319 PPGMPAGEMAPTLBE pTlbe;
3320 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
3321 if (RT_SUCCESS(rc))
3322 {
3323 /*
3324 * If the page is shared, the zero page, or being write monitored
3325 * it must be converted to a page that's writable if possible.
3326 */
3327 PPGMPAGE pPage = pTlbe->pPage;
3328 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
3329 {
3330 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
3331 if (RT_SUCCESS(rc))
3332 {
3333 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3334 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
3335 }
3336 }
3337 if (RT_SUCCESS(rc))
3338 {
3339 /*
3340 * Now, just perform the locking and calculate the return address.
3341 */
3342 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
3343 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3344 }
3345 }
3346
3347 PGM_UNLOCK(pVM);
3348 return rc;
3349}
3350
3351
3352/**
3353 * Requests the mapping of a guest page into the current context.
3354 *
3355 * This API should only be used for very short term, as it will consume scarse
3356 * resources (R0 and GC) in the mapping cache. When you're done with the page,
3357 * call PGMPhysReleasePageMappingLock() ASAP to release it.
3358 *
3359 * @returns VBox status code.
3360 * @retval VINF_SUCCESS on success.
3361 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3362 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3363 *
3364 * @param pVM The cross context VM structure.
3365 * @param GCPhys The guest physical address of the page that should be
3366 * mapped.
3367 * @param ppv Where to store the address corresponding to GCPhys.
3368 * @param pLock Where to store the lock information that
3369 * PGMPhysReleasePageMappingLock needs.
3370 *
3371 * @remarks The caller is responsible for dealing with access handlers.
3372 * @todo Add an informational return code for pages with access handlers?
3373 *
3374 * @remarks Avoid calling this API from within critical sections (other than
3375 * the PGM one) because of the deadlock risk.
3376 * @remarks Only one page is mapped! Make no assumption about what's after or
3377 * before the returned page!
3378 * @thread Any thread.
3379 */
3380VMM_INT_DECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVMCC pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
3381{
3382 int rc = PGM_LOCK(pVM);
3383 AssertRCReturn(rc, rc);
3384
3385 /*
3386 * Query the Physical TLB entry for the page (may fail).
3387 */
3388 PPGMPAGEMAPTLBE pTlbe;
3389 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
3390 if (RT_SUCCESS(rc))
3391 {
3392 /* MMIO pages doesn't have any readable backing. */
3393 PPGMPAGE pPage = pTlbe->pPage;
3394 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
3395 rc = VERR_PGM_PHYS_PAGE_RESERVED;
3396 else
3397 {
3398 /*
3399 * Now, just perform the locking and calculate the return address.
3400 */
3401 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
3402 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3403 }
3404 }
3405
3406 PGM_UNLOCK(pVM);
3407 return rc;
3408}
3409
3410
3411/**
3412 * Requests the mapping of a guest page given by virtual address into the current context.
3413 *
3414 * This API should only be used for very short term, as it will consume
3415 * scarse resources (R0 and GC) in the mapping cache. When you're done
3416 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
3417 *
3418 * This API will assume your intention is to write to the page, and will
3419 * therefore replace shared and zero pages. If you do not intend to modify
3420 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
3421 *
3422 * @returns VBox status code.
3423 * @retval VINF_SUCCESS on success.
3424 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
3425 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
3426 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3427 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3428 *
3429 * @param pVCpu The cross context virtual CPU structure.
3430 * @param GCPtr The guest physical address of the page that should be
3431 * mapped.
3432 * @param ppv Where to store the address corresponding to GCPhys.
3433 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
3434 *
3435 * @remark Avoid calling this API from within critical sections (other than
3436 * the PGM one) because of the deadlock risk.
3437 * @thread EMT
3438 */
3439VMM_INT_DECL(int) PGMPhysGCPtr2CCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
3440{
3441 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3442 RTGCPHYS GCPhys;
3443 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
3444 if (RT_SUCCESS(rc))
3445 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
3446 return rc;
3447}
3448
3449
3450/**
3451 * Requests the mapping of a guest page given by virtual address into the current context.
3452 *
3453 * This API should only be used for very short term, as it will consume
3454 * scarse resources (R0 and GC) in the mapping cache. When you're done
3455 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
3456 *
3457 * @returns VBox status code.
3458 * @retval VINF_SUCCESS on success.
3459 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
3460 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
3461 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
3462 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
3463 *
3464 * @param pVCpu The cross context virtual CPU structure.
3465 * @param GCPtr The guest physical address of the page that should be
3466 * mapped.
3467 * @param ppv Where to store the address corresponding to GCPtr.
3468 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
3469 *
3470 * @remark Avoid calling this API from within critical sections (other than
3471 * the PGM one) because of the deadlock risk.
3472 * @thread EMT(pVCpu)
3473 */
3474VMM_INT_DECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPUCC pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
3475{
3476 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3477 RTGCPHYS GCPhys;
3478 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
3479 if (RT_SUCCESS(rc))
3480 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
3481 return rc;
3482}
3483
3484
3485/**
3486 * Release the mapping of a guest page.
3487 *
3488 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
3489 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
3490 *
3491 * @param pVM The cross context VM structure.
3492 * @param pLock The lock structure initialized by the mapping function.
3493 */
3494VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
3495{
3496# ifndef IN_RING0
3497 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
3498# endif
3499 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
3500 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
3501
3502 pLock->uPageAndType = 0;
3503 pLock->pvMap = NULL;
3504
3505 PGM_LOCK_VOID(pVM);
3506 if (fWriteLock)
3507 {
3508 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
3509 Assert(cLocks > 0);
3510 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3511 {
3512 if (cLocks == 1)
3513 {
3514 Assert(pVM->pgm.s.cWriteLockedPages > 0);
3515 pVM->pgm.s.cWriteLockedPages--;
3516 }
3517 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
3518 }
3519
3520 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
3521 { /* probably extremely likely */ }
3522 else
3523 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
3524 }
3525 else
3526 {
3527 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
3528 Assert(cLocks > 0);
3529 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3530 {
3531 if (cLocks == 1)
3532 {
3533 Assert(pVM->pgm.s.cReadLockedPages > 0);
3534 pVM->pgm.s.cReadLockedPages--;
3535 }
3536 PGM_PAGE_DEC_READ_LOCKS(pPage);
3537 }
3538 }
3539
3540# ifndef IN_RING0
3541 if (pMap)
3542 {
3543 Assert(pMap->cRefs >= 1);
3544 pMap->cRefs--;
3545 }
3546# endif
3547 PGM_UNLOCK(pVM);
3548}
3549
3550
3551#ifdef IN_RING3
3552/**
3553 * Release the mapping of multiple guest pages.
3554 *
3555 * This is the counter part to PGMR3PhysBulkGCPhys2CCPtrExternal() and
3556 * PGMR3PhysBulkGCPhys2CCPtrReadOnlyExternal().
3557 *
3558 * @param pVM The cross context VM structure.
3559 * @param cPages Number of pages to unlock.
3560 * @param paLocks Array of locks lock structure initialized by the mapping
3561 * function.
3562 */
3563VMMDECL(void) PGMPhysBulkReleasePageMappingLocks(PVMCC pVM, uint32_t cPages, PPGMPAGEMAPLOCK paLocks)
3564{
3565 Assert(cPages > 0);
3566 bool const fWriteLock = (paLocks[0].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
3567#ifdef VBOX_STRICT
3568 for (uint32_t i = 1; i < cPages; i++)
3569 {
3570 Assert(fWriteLock == ((paLocks[i].uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE));
3571 AssertPtr(paLocks[i].uPageAndType);
3572 }
3573#endif
3574
3575 PGM_LOCK_VOID(pVM);
3576 if (fWriteLock)
3577 {
3578 /*
3579 * Write locks:
3580 */
3581 for (uint32_t i = 0; i < cPages; i++)
3582 {
3583 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
3584 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
3585 Assert(cLocks > 0);
3586 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3587 {
3588 if (cLocks == 1)
3589 {
3590 Assert(pVM->pgm.s.cWriteLockedPages > 0);
3591 pVM->pgm.s.cWriteLockedPages--;
3592 }
3593 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
3594 }
3595
3596 if (PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED)
3597 { /* probably extremely likely */ }
3598 else
3599 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage, NIL_RTGCPHYS);
3600
3601 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
3602 if (pMap)
3603 {
3604 Assert(pMap->cRefs >= 1);
3605 pMap->cRefs--;
3606 }
3607
3608 /* Yield the lock: */
3609 if ((i & 1023) == 1023 && i + 1 < cPages)
3610 {
3611 PGM_UNLOCK(pVM);
3612 PGM_LOCK_VOID(pVM);
3613 }
3614 }
3615 }
3616 else
3617 {
3618 /*
3619 * Read locks:
3620 */
3621 for (uint32_t i = 0; i < cPages; i++)
3622 {
3623 PPGMPAGE pPage = (PPGMPAGE)(paLocks[i].uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
3624 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
3625 Assert(cLocks > 0);
3626 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
3627 {
3628 if (cLocks == 1)
3629 {
3630 Assert(pVM->pgm.s.cReadLockedPages > 0);
3631 pVM->pgm.s.cReadLockedPages--;
3632 }
3633 PGM_PAGE_DEC_READ_LOCKS(pPage);
3634 }
3635
3636 PPGMPAGEMAP pMap = (PPGMPAGEMAP)paLocks[i].pvMap;
3637 if (pMap)
3638 {
3639 Assert(pMap->cRefs >= 1);
3640 pMap->cRefs--;
3641 }
3642
3643 /* Yield the lock: */
3644 if ((i & 1023) == 1023 && i + 1 < cPages)
3645 {
3646 PGM_UNLOCK(pVM);
3647 PGM_LOCK_VOID(pVM);
3648 }
3649 }
3650 }
3651 PGM_UNLOCK(pVM);
3652
3653 RT_BZERO(paLocks, sizeof(paLocks[0]) * cPages);
3654}
3655#endif /* IN_RING3 */
3656
3657
3658/**
3659 * Release the internal mapping of a guest page.
3660 *
3661 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
3662 * pgmPhysGCPhys2CCPtrInternalReadOnly.
3663 *
3664 * @param pVM The cross context VM structure.
3665 * @param pLock The lock structure initialized by the mapping function.
3666 *
3667 * @remarks Caller must hold the PGM lock.
3668 */
3669void pgmPhysReleaseInternalPageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock)
3670{
3671 PGM_LOCK_ASSERT_OWNER(pVM);
3672 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
3673}
3674
3675
3676/**
3677 * Converts a GC physical address to a HC ring-3 pointer.
3678 *
3679 * @returns VINF_SUCCESS on success.
3680 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
3681 * page but has no physical backing.
3682 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
3683 * GC physical address.
3684 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
3685 * a dynamic ram chunk boundary
3686 *
3687 * @param pVM The cross context VM structure.
3688 * @param GCPhys The GC physical address to convert.
3689 * @param pR3Ptr Where to store the R3 pointer on success.
3690 *
3691 * @deprecated Avoid when possible!
3692 */
3693int pgmPhysGCPhys2R3Ptr(PVMCC pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
3694{
3695/** @todo this is kind of hacky and needs some more work. */
3696#ifndef DEBUG_sandervl
3697 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
3698#endif
3699
3700 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
3701 PGM_LOCK_VOID(pVM);
3702
3703 PPGMRAMRANGE pRam;
3704 PPGMPAGE pPage;
3705 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
3706 if (RT_SUCCESS(rc))
3707 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
3708
3709 PGM_UNLOCK(pVM);
3710 Assert(rc <= VINF_SUCCESS);
3711 return rc;
3712}
3713
3714
3715/**
3716 * Special lockless guest physical to current context pointer convertor.
3717 *
3718 * This is mainly for the page table walking and such.
3719 */
3720int pgmPhysGCPhys2CCPtrLockless(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv)
3721{
3722 VMCPU_ASSERT_EMT(pVCpu);
3723
3724 /*
3725 * Get the RAM range and page structure.
3726 */
3727 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
3728 PGMRAMRANGE volatile *pRam;
3729 PGMPAGE volatile *pPage;
3730 int rc = pgmPhysGetPageAndRangeExLockless(pVM, pVCpu, GCPhys, &pPage, &pRam);
3731 if (RT_SUCCESS(rc))
3732 {
3733 /*
3734 * Now, make sure it's writable (typically it is).
3735 */
3736 if (RT_LIKELY(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED))
3737 { /* likely, typically */ }
3738 else
3739 {
3740 PGM_LOCK_VOID(pVM);
3741 rc = pgmPhysPageMakeWritable(pVM, (PPGMPAGE)pPage, GCPhys);
3742 if (RT_SUCCESS(rc))
3743 rc = pgmPhysGetPageAndRangeExLockless(pVM, pVCpu, GCPhys, &pPage, &pRam);
3744 PGM_UNLOCK(pVM);
3745 if (RT_FAILURE(rc))
3746 return rc;
3747 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
3748 }
3749 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0 || PGM_IS_IN_NEM_MODE(pVM));
3750
3751 /*
3752 * Get the mapping address.
3753 */
3754 uint8_t *pb;
3755#ifdef IN_RING3
3756 if (PGM_IS_IN_NEM_MODE(pVM))
3757 pb = &pRam->pbR3[(RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT];
3758 else
3759#endif
3760 {
3761#ifdef IN_RING3
3762# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
3763 PPGMPAGEMAPTLBE pTlbe;
3764 rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, (PPGMPAGE)pPage, GCPhys, &pTlbe);
3765 AssertLogRelRCReturn(rc, rc);
3766 pb = (uint8_t *)pTlbe->pv;
3767 RT_NOREF(pVM);
3768# endif
3769#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
3770 PGM_LOCK(pVM);
3771 PPGMPAGEMAPTLBE pTlbe;
3772 rc = pgmPhysPageQueryTlbeWithPage(pVM, (PPGMPAGE)pPage, GCPhys, &pTlbe);
3773 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
3774 pb = (uint8_t *)pTlbe->pv;
3775 PGM_UNLOCK(pVM);
3776 RT_NOREF(pVCpu);
3777#endif
3778 }
3779 *ppv = (void *)((uintptr_t)pb | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
3780 return VINF_SUCCESS;
3781 }
3782 Assert(rc <= VINF_SUCCESS);
3783 return rc;
3784}
3785
3786
3787/**
3788 * Converts a guest pointer to a GC physical address.
3789 *
3790 * This uses the current CR3/CR0/CR4 of the guest.
3791 *
3792 * @returns VBox status code.
3793 * @param pVCpu The cross context virtual CPU structure.
3794 * @param GCPtr The guest pointer to convert.
3795 * @param pGCPhys Where to store the GC physical address.
3796 * @thread EMT(pVCpu)
3797 */
3798VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
3799{
3800 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3801 PGMPTWALK Walk;
3802 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
3803 if (pGCPhys && RT_SUCCESS(rc))
3804 *pGCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK);
3805 return rc;
3806}
3807
3808
3809/**
3810 * Converts a guest pointer to a HC physical address.
3811 *
3812 * This uses the current CR3/CR0/CR4 of the guest.
3813 *
3814 * @returns VBox status code.
3815 * @param pVCpu The cross context virtual CPU structure.
3816 * @param GCPtr The guest pointer to convert.
3817 * @param pHCPhys Where to store the HC physical address.
3818 * @thread EMT(pVCpu)
3819 */
3820VMM_INT_DECL(int) PGMPhysGCPtr2HCPhys(PVMCPUCC pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
3821{
3822 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
3823 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3824 PGMPTWALK Walk;
3825 int rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtr, &Walk);
3826 if (RT_SUCCESS(rc))
3827 rc = PGMPhysGCPhys2HCPhys(pVM, Walk.GCPhys | ((RTGCUINTPTR)GCPtr & GUEST_PAGE_OFFSET_MASK), pHCPhys);
3828 return rc;
3829}
3830
3831
3832
3833#undef LOG_GROUP
3834#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
3835
3836
3837#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
3838/**
3839 * Cache PGMPhys memory access
3840 *
3841 * @param pVM The cross context VM structure.
3842 * @param pCache Cache structure pointer
3843 * @param GCPhys GC physical address
3844 * @param pbR3 HC pointer corresponding to physical page
3845 *
3846 * @thread EMT.
3847 */
3848static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
3849{
3850 uint32_t iCacheIndex;
3851
3852 Assert(VM_IS_EMT(pVM));
3853
3854 GCPhys &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
3855 pbR3 = (uint8_t *)((uintptr_t)pbR3 & ~(uintptr_t)GUEST_PAGE_OFFSET_MASK);
3856
3857 iCacheIndex = ((GCPhys >> GUEST_PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
3858
3859 ASMBitSet(&pCache->aEntries, iCacheIndex);
3860
3861 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
3862 pCache->Entry[iCacheIndex].pbR3 = pbR3;
3863}
3864#endif /* IN_RING3 */
3865
3866
3867/**
3868 * Deals with reading from a page with one or more ALL access handlers.
3869 *
3870 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
3871 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
3872 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
3873 *
3874 * @param pVM The cross context VM structure.
3875 * @param pPage The page descriptor.
3876 * @param GCPhys The physical address to start reading at.
3877 * @param pvBuf Where to put the bits we read.
3878 * @param cb How much to read - less or equal to a page.
3879 * @param enmOrigin The origin of this call.
3880 */
3881static VBOXSTRICTRC pgmPhysReadHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb,
3882 PGMACCESSORIGIN enmOrigin)
3883{
3884 /*
3885 * The most frequent access here is MMIO and shadowed ROM.
3886 * The current code ASSUMES all these access handlers covers full pages!
3887 */
3888
3889 /*
3890 * Whatever we do we need the source page, map it first.
3891 */
3892 PGMPAGEMAPLOCK PgMpLck;
3893 const void *pvSrc = NULL;
3894 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
3895/** @todo Check how this can work for MMIO pages? */
3896 if (RT_FAILURE(rc))
3897 {
3898 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
3899 GCPhys, pPage, rc));
3900 memset(pvBuf, 0xff, cb);
3901 return VINF_SUCCESS;
3902 }
3903
3904 VBOXSTRICTRC rcStrict = VINF_PGM_HANDLER_DO_DEFAULT;
3905
3906 /*
3907 * Deal with any physical handlers.
3908 */
3909 PVMCPUCC pVCpu = VMMGetCpu(pVM);
3910 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
3911 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
3912 {
3913 PPGMPHYSHANDLER pCur;
3914 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
3915 if (RT_SUCCESS(rc))
3916 {
3917 Assert(pCur && GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
3918 Assert((pCur->Key & GUEST_PAGE_OFFSET_MASK) == 0);
3919 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
3920#ifndef IN_RING3
3921 if (enmOrigin != PGMACCESSORIGIN_IEM)
3922 {
3923 /* Cannot reliably handle informational status codes in this context */
3924 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3925 return VERR_PGM_PHYS_WR_HIT_HANDLER;
3926 }
3927#endif
3928 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
3929 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler; Assert(pfnHandler);
3930 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
3931 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
3932
3933 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pCur->pszDesc) ));
3934 STAM_PROFILE_START(&pCur->Stat, h);
3935 PGM_LOCK_ASSERT_OWNER(pVM);
3936
3937 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
3938 PGM_UNLOCK(pVM);
3939 /* If the access origins with a device, make sure the buffer is initialized
3940 as a guard against leaking heap, stack and other info via badly written
3941 MMIO handling. @bugref{10651} */
3942 if (enmOrigin == PGMACCESSORIGIN_DEVICE)
3943 memset(pvBuf, 0xff, cb);
3944 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, uUser);
3945 PGM_LOCK_VOID(pVM);
3946
3947 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
3948 pCur = NULL; /* might not be valid anymore. */
3949 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, false),
3950 ("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys));
3951 if ( rcStrict != VINF_PGM_HANDLER_DO_DEFAULT
3952 && !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
3953 {
3954 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3955 return rcStrict;
3956 }
3957 }
3958 else if (rc == VERR_NOT_FOUND)
3959 AssertLogRelMsgFailed(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb));
3960 else
3961 AssertLogRelMsgFailedReturn(("rc=%Rrc GCPhys=%RGp cb=%#x\n", rc, GCPhys, cb), rc);
3962 }
3963
3964 /*
3965 * Take the default action.
3966 */
3967 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
3968 {
3969 memcpy(pvBuf, pvSrc, cb);
3970 rcStrict = VINF_SUCCESS;
3971 }
3972 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
3973 return rcStrict;
3974}
3975
3976
3977/**
3978 * Read physical memory.
3979 *
3980 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
3981 * want to ignore those.
3982 *
3983 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
3984 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
3985 * @retval VINF_SUCCESS in all context - read completed.
3986 *
3987 * @retval VINF_EM_OFF in RC and R0 - read completed.
3988 * @retval VINF_EM_SUSPEND in RC and R0 - read completed.
3989 * @retval VINF_EM_RESET in RC and R0 - read completed.
3990 * @retval VINF_EM_HALT in RC and R0 - read completed.
3991 * @retval VINF_SELM_SYNC_GDT in RC only - read completed.
3992 *
3993 * @retval VINF_EM_DBG_STOP in RC and R0 - read completed.
3994 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - read completed.
3995 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
3996 *
3997 * @retval VINF_IOM_R3_MMIO_READ in RC and R0.
3998 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
3999 *
4000 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
4001 *
4002 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
4003 * haven't been cleared for strict status codes yet.
4004 *
4005 * @param pVM The cross context VM structure.
4006 * @param GCPhys Physical address start reading from.
4007 * @param pvBuf Where to put the read bits.
4008 * @param cbRead How many bytes to read.
4009 * @param enmOrigin The origin of this call.
4010 */
4011VMMDECL(VBOXSTRICTRC) PGMPhysRead(PVMCC pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
4012{
4013 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
4014 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
4015
4016 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysRead));
4017 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
4018
4019 PGM_LOCK_VOID(pVM);
4020
4021 /*
4022 * Copy loop on ram ranges.
4023 */
4024 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4025 for (;;)
4026 {
4027 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
4028
4029 /* Inside range or not? */
4030 if (pRam && GCPhys >= pRam->GCPhys)
4031 {
4032 /*
4033 * Must work our way thru this page by page.
4034 */
4035 RTGCPHYS off = GCPhys - pRam->GCPhys;
4036 while (off < pRam->cb)
4037 {
4038 unsigned iPage = off >> GUEST_PAGE_SHIFT;
4039 PPGMPAGE pPage = &pRam->aPages[iPage];
4040 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
4041 if (cb > cbRead)
4042 cb = cbRead;
4043
4044 /*
4045 * Normal page? Get the pointer to it.
4046 */
4047 if ( !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
4048 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4049 {
4050 /*
4051 * Get the pointer to the page.
4052 */
4053 PGMPAGEMAPLOCK PgMpLck;
4054 const void *pvSrc;
4055 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
4056 if (RT_SUCCESS(rc))
4057 {
4058 memcpy(pvBuf, pvSrc, cb);
4059 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4060 }
4061 else
4062 {
4063 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
4064 pRam->GCPhys + off, pPage, rc));
4065 memset(pvBuf, 0xff, cb);
4066 }
4067 }
4068 /*
4069 * Have ALL/MMIO access handlers.
4070 */
4071 else
4072 {
4073 VBOXSTRICTRC rcStrict2 = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
4074 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
4075 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
4076 else
4077 {
4078 /* Set the remaining buffer to a known value. */
4079 memset(pvBuf, 0xff, cbRead);
4080 PGM_UNLOCK(pVM);
4081 return rcStrict2;
4082 }
4083 }
4084
4085 /* next page */
4086 if (cb >= cbRead)
4087 {
4088 PGM_UNLOCK(pVM);
4089 return rcStrict;
4090 }
4091 cbRead -= cb;
4092 off += cb;
4093 pvBuf = (char *)pvBuf + cb;
4094 } /* walk pages in ram range. */
4095
4096 GCPhys = pRam->GCPhysLast + 1;
4097 }
4098 else
4099 {
4100 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
4101
4102 /*
4103 * Unassigned address space.
4104 */
4105 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
4106 if (cb >= cbRead)
4107 {
4108 memset(pvBuf, 0xff, cbRead);
4109 break;
4110 }
4111 memset(pvBuf, 0xff, cb);
4112
4113 cbRead -= cb;
4114 pvBuf = (char *)pvBuf + cb;
4115 GCPhys += cb;
4116 }
4117
4118 } /* Ram range walk */
4119
4120 PGM_UNLOCK(pVM);
4121 return rcStrict;
4122}
4123
4124
4125/**
4126 * Deals with writing to a page with one or more WRITE or ALL access handlers.
4127 *
4128 * @returns Strict VBox status code in ring-0 and raw-mode, ignorable in ring-3.
4129 * See PGM_HANDLER_PHYS_IS_VALID_STATUS and
4130 * PGM_HANDLER_VIRT_IS_VALID_STATUS for details.
4131 *
4132 * @param pVM The cross context VM structure.
4133 * @param pPage The page descriptor.
4134 * @param GCPhys The physical address to start writing at.
4135 * @param pvBuf What to write.
4136 * @param cbWrite How much to write - less or equal to a page.
4137 * @param enmOrigin The origin of this call.
4138 */
4139static VBOXSTRICTRC pgmPhysWriteHandler(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
4140 PGMACCESSORIGIN enmOrigin)
4141{
4142 PGMPAGEMAPLOCK PgMpLck;
4143 void *pvDst = NULL;
4144 VBOXSTRICTRC rcStrict;
4145
4146 /*
4147 * Give priority to physical handlers (like #PF does).
4148 *
4149 * Hope for a lonely physical handler first that covers the whole write
4150 * area. This should be a pretty frequent case with MMIO and the heavy
4151 * usage of full page handlers in the page pool.
4152 */
4153 PVMCPUCC pVCpu = VMMGetCpu(pVM);
4154 PPGMPHYSHANDLER pCur;
4155 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
4156 if (RT_SUCCESS(rcStrict))
4157 {
4158 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
4159#ifndef IN_RING3
4160 if (enmOrigin != PGMACCESSORIGIN_IEM)
4161 /* Cannot reliably handle informational status codes in this context */
4162 return VERR_PGM_PHYS_WR_HIT_HANDLER;
4163#endif
4164 size_t cbRange = pCur->KeyLast - GCPhys + 1;
4165 if (cbRange > cbWrite)
4166 cbRange = cbWrite;
4167
4168 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->pfnHandler);
4169 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
4170 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
4171 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
4172 rcStrict = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
4173 else
4174 rcStrict = VINF_SUCCESS;
4175 if (RT_SUCCESS(rcStrict))
4176 {
4177 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
4178 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
4179 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pCur->uUser
4180 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pCur->uUser);
4181 STAM_PROFILE_START(&pCur->Stat, h);
4182
4183 /* Most handlers will want to release the PGM lock for deadlock prevention
4184 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
4185 dirty page trackers will want to keep it for performance reasons. */
4186 PGM_LOCK_ASSERT_OWNER(pVM);
4187 if (pCurType->fKeepPgmLock)
4188 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4189 else
4190 {
4191 PGM_UNLOCK(pVM);
4192 rcStrict = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4193 PGM_LOCK_VOID(pVM);
4194 }
4195
4196 STAM_PROFILE_STOP(&pCur->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
4197 pCur = NULL; /* might not be valid anymore. */
4198 if (rcStrict == VINF_PGM_HANDLER_DO_DEFAULT)
4199 {
4200 if (pvDst)
4201 memcpy(pvDst, pvBuf, cbRange);
4202 rcStrict = VINF_SUCCESS;
4203 }
4204 else
4205 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict, true),
4206 ("rcStrict=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n",
4207 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pCur ? R3STRING(pCur->pszDesc) : ""));
4208 }
4209 else
4210 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
4211 GCPhys, pPage, VBOXSTRICTRC_VAL(rcStrict)), rcStrict);
4212 if (RT_LIKELY(cbRange == cbWrite) || !PGM_PHYS_RW_IS_SUCCESS(rcStrict))
4213 {
4214 if (pvDst)
4215 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4216 return rcStrict;
4217 }
4218
4219 /* more fun to be had below */
4220 cbWrite -= cbRange;
4221 GCPhys += cbRange;
4222 pvBuf = (uint8_t *)pvBuf + cbRange;
4223 pvDst = (uint8_t *)pvDst + cbRange;
4224 }
4225 else if (rcStrict == VERR_NOT_FOUND) /* The handler is somewhere else in the page, deal with it below. */
4226 rcStrict = VINF_SUCCESS;
4227 else
4228 AssertMsgFailedReturn(("rcStrict=%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
4229 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all GUEST_PAGE_SIZEed! */
4230
4231 /*
4232 * Deal with all the odd ends (used to be deal with virt+phys).
4233 */
4234 Assert(rcStrict != VINF_PGM_HANDLER_DO_DEFAULT);
4235
4236 /* We need a writable destination page. */
4237 if (!pvDst)
4238 {
4239 int rc2 = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
4240 AssertLogRelMsgReturn(RT_SUCCESS(rc2),
4241 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n", GCPhys, pPage, rc2),
4242 rc2);
4243 }
4244
4245 /** @todo clean up this code some more now there are no virtual handlers any
4246 * more. */
4247 /* The loop state (big + ugly). */
4248 PPGMPHYSHANDLER pPhys = NULL;
4249 uint32_t offPhys = GUEST_PAGE_SIZE;
4250 uint32_t offPhysLast = GUEST_PAGE_SIZE;
4251 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
4252
4253 /* The loop. */
4254 for (;;)
4255 {
4256 if (fMorePhys && !pPhys)
4257 {
4258 rcStrict = pgmHandlerPhysicalLookup(pVM, GCPhys, &pPhys);
4259 if (RT_SUCCESS_NP(rcStrict))
4260 {
4261 offPhys = 0;
4262 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
4263 }
4264 else
4265 {
4266 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
4267
4268 rcStrict = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
4269 GCPhys, &pPhys);
4270 AssertMsgReturn(RT_SUCCESS(rcStrict) || rcStrict == VERR_NOT_FOUND,
4271 ("%Rrc GCPhys=%RGp\n", VBOXSTRICTRC_VAL(rcStrict), GCPhys), rcStrict);
4272
4273 if ( RT_SUCCESS(rcStrict)
4274 && pPhys->Key <= GCPhys + (cbWrite - 1))
4275 {
4276 offPhys = pPhys->Key - GCPhys;
4277 offPhysLast = pPhys->KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
4278 Assert(pPhys->KeyLast - pPhys->Key < _4G);
4279 }
4280 else
4281 {
4282 pPhys = NULL;
4283 fMorePhys = false;
4284 offPhys = offPhysLast = GUEST_PAGE_SIZE;
4285 }
4286 }
4287 }
4288
4289 /*
4290 * Handle access to space without handlers (that's easy).
4291 */
4292 VBOXSTRICTRC rcStrict2 = VINF_PGM_HANDLER_DO_DEFAULT;
4293 uint32_t cbRange = (uint32_t)cbWrite;
4294 Assert(cbRange == cbWrite);
4295
4296 /*
4297 * Physical handler.
4298 */
4299 if (!offPhys)
4300 {
4301#ifndef IN_RING3
4302 if (enmOrigin != PGMACCESSORIGIN_IEM)
4303 /* Cannot reliably handle informational status codes in this context */
4304 return VERR_PGM_PHYS_WR_HIT_HANDLER;
4305#endif
4306 if (cbRange > offPhysLast + 1)
4307 cbRange = offPhysLast + 1;
4308
4309 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pPhys);
4310 PFNPGMPHYSHANDLER const pfnHandler = pCurType->pfnHandler;
4311 uint64_t const uUser = !pCurType->fRing0DevInsIdx ? pPhys->uUser
4312 : (uintptr_t)PDMDeviceRing0IdxToInstance(pVM, pPhys->uUser);
4313
4314 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
4315 STAM_PROFILE_START(&pPhys->Stat, h);
4316
4317 /* Most handlers will want to release the PGM lock for deadlock prevention
4318 (esp. MMIO), though some PGM internal ones like the page pool and MMIO2
4319 dirty page trackers will want to keep it for performance reasons. */
4320 PGM_LOCK_ASSERT_OWNER(pVM);
4321 if (pCurType->fKeepPgmLock)
4322 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4323 else
4324 {
4325 PGM_UNLOCK(pVM);
4326 rcStrict2 = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, uUser);
4327 PGM_LOCK_VOID(pVM);
4328 }
4329
4330 STAM_PROFILE_STOP(&pPhys->Stat, h); /* no locking needed, entry is unlikely reused before we get here. */
4331 pPhys = NULL; /* might not be valid anymore. */
4332 AssertLogRelMsg(PGM_HANDLER_PHYS_IS_VALID_STATUS(rcStrict2, true),
4333 ("rcStrict2=%Rrc (rcStrict=%Rrc) GCPhys=%RGp pPage=%R[pgmpage] %s\n", VBOXSTRICTRC_VAL(rcStrict2),
4334 VBOXSTRICTRC_VAL(rcStrict), GCPhys, pPage, pPhys ? R3STRING(pPhys->pszDesc) : ""));
4335 }
4336
4337 /*
4338 * Execute the default action and merge the status codes.
4339 */
4340 if (rcStrict2 == VINF_PGM_HANDLER_DO_DEFAULT)
4341 {
4342 memcpy(pvDst, pvBuf, cbRange);
4343 rcStrict2 = VINF_SUCCESS;
4344 }
4345 else if (!PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
4346 {
4347 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4348 return rcStrict2;
4349 }
4350 else
4351 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
4352
4353 /*
4354 * Advance if we've got more stuff to do.
4355 */
4356 if (cbRange >= cbWrite)
4357 {
4358 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4359 return rcStrict;
4360 }
4361
4362
4363 cbWrite -= cbRange;
4364 GCPhys += cbRange;
4365 pvBuf = (uint8_t *)pvBuf + cbRange;
4366 pvDst = (uint8_t *)pvDst + cbRange;
4367
4368 offPhys -= cbRange;
4369 offPhysLast -= cbRange;
4370 }
4371}
4372
4373
4374/**
4375 * Write to physical memory.
4376 *
4377 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
4378 * want to ignore those.
4379 *
4380 * @returns Strict VBox status code in raw-mode and ring-0, normal VBox status
4381 * code in ring-3. Use PGM_PHYS_RW_IS_SUCCESS to check.
4382 * @retval VINF_SUCCESS in all context - write completed.
4383 *
4384 * @retval VINF_EM_OFF in RC and R0 - write completed.
4385 * @retval VINF_EM_SUSPEND in RC and R0 - write completed.
4386 * @retval VINF_EM_RESET in RC and R0 - write completed.
4387 * @retval VINF_EM_HALT in RC and R0 - write completed.
4388 * @retval VINF_SELM_SYNC_GDT in RC only - write completed.
4389 *
4390 * @retval VINF_EM_DBG_STOP in RC and R0 - write completed.
4391 * @retval VINF_EM_DBG_BREAKPOINT in RC and R0 - write completed.
4392 * @retval VINF_EM_RAW_EMULATE_INSTR in RC and R0 only.
4393 *
4394 * @retval VINF_IOM_R3_MMIO_WRITE in RC and R0.
4395 * @retval VINF_IOM_R3_MMIO_READ_WRITE in RC and R0.
4396 * @retval VINF_IOM_R3_MMIO_COMMIT_WRITE in RC and R0.
4397 *
4398 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT in RC only - write completed.
4399 * @retval VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT in RC only.
4400 * @retval VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT in RC only.
4401 * @retval VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT in RC only.
4402 * @retval VINF_CSAM_PENDING_ACTION in RC only.
4403 * @retval VINF_PATM_CHECK_PATCH_PAGE in RC only.
4404 *
4405 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in RC and R0 for access origins that
4406 * haven't been cleared for strict status codes yet.
4407 *
4408 *
4409 * @param pVM The cross context VM structure.
4410 * @param GCPhys Physical address to write to.
4411 * @param pvBuf What to write.
4412 * @param cbWrite How many bytes to write.
4413 * @param enmOrigin Who is calling.
4414 */
4415VMMDECL(VBOXSTRICTRC) PGMPhysWrite(PVMCC pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
4416{
4417 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
4418 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
4419 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
4420
4421 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWrite));
4422 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
4423
4424 PGM_LOCK_VOID(pVM);
4425
4426 /*
4427 * Copy loop on ram ranges.
4428 */
4429 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
4430 for (;;)
4431 {
4432 PPGMRAMRANGE const pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
4433
4434 /* Inside range or not? */
4435 if (pRam && GCPhys >= pRam->GCPhys)
4436 {
4437 /*
4438 * Must work our way thru this page by page.
4439 */
4440 RTGCPTR off = GCPhys - pRam->GCPhys;
4441 while (off < pRam->cb)
4442 {
4443 RTGCPTR iPage = off >> GUEST_PAGE_SHIFT;
4444 PPGMPAGE pPage = &pRam->aPages[iPage];
4445 size_t cb = GUEST_PAGE_SIZE - (off & GUEST_PAGE_OFFSET_MASK);
4446 if (cb > cbWrite)
4447 cb = cbWrite;
4448
4449 /*
4450 * Normal page? Get the pointer to it.
4451 */
4452 if ( !PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
4453 && !PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4454 {
4455 PGMPAGEMAPLOCK PgMpLck;
4456 void *pvDst;
4457 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
4458 if (RT_SUCCESS(rc))
4459 {
4460 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
4461 memcpy(pvDst, pvBuf, cb);
4462 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
4463 }
4464 /* Ignore writes to ballooned pages. */
4465 else if (!PGM_PAGE_IS_BALLOONED(pPage))
4466 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
4467 pRam->GCPhys + off, pPage, rc));
4468 }
4469 /*
4470 * Active WRITE or ALL access handlers.
4471 */
4472 else
4473 {
4474 VBOXSTRICTRC rcStrict2 = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
4475 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
4476 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
4477 else
4478 {
4479 PGM_UNLOCK(pVM);
4480 return rcStrict2;
4481 }
4482 }
4483
4484 /* next page */
4485 if (cb >= cbWrite)
4486 {
4487 PGM_UNLOCK(pVM);
4488 return rcStrict;
4489 }
4490
4491 cbWrite -= cb;
4492 off += cb;
4493 pvBuf = (const char *)pvBuf + cb;
4494 } /* walk pages in ram range */
4495
4496 GCPhys = pRam->GCPhysLast + 1;
4497 }
4498 else
4499 {
4500 /*
4501 * Unassigned address space, skip it.
4502 */
4503 if (!pRam)
4504 break;
4505 size_t cb = pRam->GCPhys - GCPhys;
4506 if (cb >= cbWrite)
4507 break;
4508 cbWrite -= cb;
4509 pvBuf = (const char *)pvBuf + cb;
4510 GCPhys += cb;
4511 }
4512
4513 } /* Ram range walk */
4514
4515 PGM_UNLOCK(pVM);
4516 return rcStrict;
4517}
4518
4519
4520/**
4521 * Read from guest physical memory by GC physical address, bypassing
4522 * MMIO and access handlers.
4523 *
4524 * @returns VBox status code.
4525 * @param pVM The cross context VM structure.
4526 * @param pvDst The destination address.
4527 * @param GCPhysSrc The source address (GC physical address).
4528 * @param cb The number of bytes to read.
4529 */
4530VMMDECL(int) PGMPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
4531{
4532 /*
4533 * Treat the first page as a special case.
4534 */
4535 if (!cb)
4536 return VINF_SUCCESS;
4537
4538 /* map the 1st page */
4539 void const *pvSrc;
4540 PGMPAGEMAPLOCK Lock;
4541 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
4542 if (RT_FAILURE(rc))
4543 return rc;
4544
4545 /* optimize for the case where access is completely within the first page. */
4546 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysSrc & GUEST_PAGE_OFFSET_MASK);
4547 if (RT_LIKELY(cb <= cbPage))
4548 {
4549 memcpy(pvDst, pvSrc, cb);
4550 PGMPhysReleasePageMappingLock(pVM, &Lock);
4551 return VINF_SUCCESS;
4552 }
4553
4554 /* copy to the end of the page. */
4555 memcpy(pvDst, pvSrc, cbPage);
4556 PGMPhysReleasePageMappingLock(pVM, &Lock);
4557 GCPhysSrc += cbPage;
4558 pvDst = (uint8_t *)pvDst + cbPage;
4559 cb -= cbPage;
4560
4561 /*
4562 * Page by page.
4563 */
4564 for (;;)
4565 {
4566 /* map the page */
4567 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
4568 if (RT_FAILURE(rc))
4569 return rc;
4570
4571 /* last page? */
4572 if (cb <= GUEST_PAGE_SIZE)
4573 {
4574 memcpy(pvDst, pvSrc, cb);
4575 PGMPhysReleasePageMappingLock(pVM, &Lock);
4576 return VINF_SUCCESS;
4577 }
4578
4579 /* copy the entire page and advance */
4580 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4581 PGMPhysReleasePageMappingLock(pVM, &Lock);
4582 GCPhysSrc += GUEST_PAGE_SIZE;
4583 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
4584 cb -= GUEST_PAGE_SIZE;
4585 }
4586 /* won't ever get here. */
4587}
4588
4589
4590/**
4591 * Write to guest physical memory referenced by GC pointer.
4592 * Write memory to GC physical address in guest physical memory.
4593 *
4594 * This will bypass MMIO and access handlers.
4595 *
4596 * @returns VBox status code.
4597 * @param pVM The cross context VM structure.
4598 * @param GCPhysDst The GC physical address of the destination.
4599 * @param pvSrc The source buffer.
4600 * @param cb The number of bytes to write.
4601 */
4602VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVMCC pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
4603{
4604 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
4605
4606 /*
4607 * Treat the first page as a special case.
4608 */
4609 if (!cb)
4610 return VINF_SUCCESS;
4611
4612 /* map the 1st page */
4613 void *pvDst;
4614 PGMPAGEMAPLOCK Lock;
4615 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
4616 if (RT_FAILURE(rc))
4617 return rc;
4618
4619 /* optimize for the case where access is completely within the first page. */
4620 size_t cbPage = GUEST_PAGE_SIZE - (GCPhysDst & GUEST_PAGE_OFFSET_MASK);
4621 if (RT_LIKELY(cb <= cbPage))
4622 {
4623 memcpy(pvDst, pvSrc, cb);
4624 PGMPhysReleasePageMappingLock(pVM, &Lock);
4625 return VINF_SUCCESS;
4626 }
4627
4628 /* copy to the end of the page. */
4629 memcpy(pvDst, pvSrc, cbPage);
4630 PGMPhysReleasePageMappingLock(pVM, &Lock);
4631 GCPhysDst += cbPage;
4632 pvSrc = (const uint8_t *)pvSrc + cbPage;
4633 cb -= cbPage;
4634
4635 /*
4636 * Page by page.
4637 */
4638 for (;;)
4639 {
4640 /* map the page */
4641 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
4642 if (RT_FAILURE(rc))
4643 return rc;
4644
4645 /* last page? */
4646 if (cb <= GUEST_PAGE_SIZE)
4647 {
4648 memcpy(pvDst, pvSrc, cb);
4649 PGMPhysReleasePageMappingLock(pVM, &Lock);
4650 return VINF_SUCCESS;
4651 }
4652
4653 /* copy the entire page and advance */
4654 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4655 PGMPhysReleasePageMappingLock(pVM, &Lock);
4656 GCPhysDst += GUEST_PAGE_SIZE;
4657 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
4658 cb -= GUEST_PAGE_SIZE;
4659 }
4660 /* won't ever get here. */
4661}
4662
4663
4664/**
4665 * Read from guest physical memory referenced by GC pointer.
4666 *
4667 * This function uses the current CR3/CR0/CR4 of the guest and will
4668 * bypass access handlers and not set any accessed bits.
4669 *
4670 * @returns VBox status code.
4671 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4672 * @param pvDst The destination address.
4673 * @param GCPtrSrc The source address (GC pointer).
4674 * @param cb The number of bytes to read.
4675 */
4676VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
4677{
4678 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4679/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
4680
4681 /*
4682 * Treat the first page as a special case.
4683 */
4684 if (!cb)
4685 return VINF_SUCCESS;
4686
4687 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleRead));
4688 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
4689
4690 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
4691 * when many VCPUs are fighting for the lock.
4692 */
4693 PGM_LOCK_VOID(pVM);
4694
4695 /* map the 1st page */
4696 void const *pvSrc;
4697 PGMPAGEMAPLOCK Lock;
4698 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
4699 if (RT_FAILURE(rc))
4700 {
4701 PGM_UNLOCK(pVM);
4702 return rc;
4703 }
4704
4705 /* optimize for the case where access is completely within the first page. */
4706 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4707 if (RT_LIKELY(cb <= cbPage))
4708 {
4709 memcpy(pvDst, pvSrc, cb);
4710 PGMPhysReleasePageMappingLock(pVM, &Lock);
4711 PGM_UNLOCK(pVM);
4712 return VINF_SUCCESS;
4713 }
4714
4715 /* copy to the end of the page. */
4716 memcpy(pvDst, pvSrc, cbPage);
4717 PGMPhysReleasePageMappingLock(pVM, &Lock);
4718 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
4719 pvDst = (uint8_t *)pvDst + cbPage;
4720 cb -= cbPage;
4721
4722 /*
4723 * Page by page.
4724 */
4725 for (;;)
4726 {
4727 /* map the page */
4728 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
4729 if (RT_FAILURE(rc))
4730 {
4731 PGM_UNLOCK(pVM);
4732 return rc;
4733 }
4734
4735 /* last page? */
4736 if (cb <= GUEST_PAGE_SIZE)
4737 {
4738 memcpy(pvDst, pvSrc, cb);
4739 PGMPhysReleasePageMappingLock(pVM, &Lock);
4740 PGM_UNLOCK(pVM);
4741 return VINF_SUCCESS;
4742 }
4743
4744 /* copy the entire page and advance */
4745 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4746 PGMPhysReleasePageMappingLock(pVM, &Lock);
4747 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + GUEST_PAGE_SIZE);
4748 pvDst = (uint8_t *)pvDst + GUEST_PAGE_SIZE;
4749 cb -= GUEST_PAGE_SIZE;
4750 }
4751 /* won't ever get here. */
4752}
4753
4754
4755/**
4756 * Write to guest physical memory referenced by GC pointer.
4757 *
4758 * This function uses the current CR3/CR0/CR4 of the guest and will
4759 * bypass access handlers and not set dirty or accessed bits.
4760 *
4761 * @returns VBox status code.
4762 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4763 * @param GCPtrDst The destination address (GC pointer).
4764 * @param pvSrc The source address.
4765 * @param cb The number of bytes to write.
4766 */
4767VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
4768{
4769 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4770 VMCPU_ASSERT_EMT(pVCpu);
4771
4772 /*
4773 * Treat the first page as a special case.
4774 */
4775 if (!cb)
4776 return VINF_SUCCESS;
4777
4778 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWrite));
4779 STAM_COUNTER_ADD(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
4780
4781 /* map the 1st page */
4782 void *pvDst;
4783 PGMPAGEMAPLOCK Lock;
4784 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4785 if (RT_FAILURE(rc))
4786 return rc;
4787
4788 /* optimize for the case where access is completely within the first page. */
4789 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
4790 if (RT_LIKELY(cb <= cbPage))
4791 {
4792 memcpy(pvDst, pvSrc, cb);
4793 PGMPhysReleasePageMappingLock(pVM, &Lock);
4794 return VINF_SUCCESS;
4795 }
4796
4797 /* copy to the end of the page. */
4798 memcpy(pvDst, pvSrc, cbPage);
4799 PGMPhysReleasePageMappingLock(pVM, &Lock);
4800 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
4801 pvSrc = (const uint8_t *)pvSrc + cbPage;
4802 cb -= cbPage;
4803
4804 /*
4805 * Page by page.
4806 */
4807 for (;;)
4808 {
4809 /* map the page */
4810 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4811 if (RT_FAILURE(rc))
4812 return rc;
4813
4814 /* last page? */
4815 if (cb <= GUEST_PAGE_SIZE)
4816 {
4817 memcpy(pvDst, pvSrc, cb);
4818 PGMPhysReleasePageMappingLock(pVM, &Lock);
4819 return VINF_SUCCESS;
4820 }
4821
4822 /* copy the entire page and advance */
4823 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4824 PGMPhysReleasePageMappingLock(pVM, &Lock);
4825 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
4826 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
4827 cb -= GUEST_PAGE_SIZE;
4828 }
4829 /* won't ever get here. */
4830}
4831
4832
4833/**
4834 * Write to guest physical memory referenced by GC pointer and update the PTE.
4835 *
4836 * This function uses the current CR3/CR0/CR4 of the guest and will
4837 * bypass access handlers but will set any dirty and accessed bits in the PTE.
4838 *
4839 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
4840 *
4841 * @returns VBox status code.
4842 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4843 * @param GCPtrDst The destination address (GC pointer).
4844 * @param pvSrc The source address.
4845 * @param cb The number of bytes to write.
4846 */
4847VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
4848{
4849 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4850 VMCPU_ASSERT_EMT(pVCpu);
4851
4852 /*
4853 * Treat the first page as a special case.
4854 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
4855 */
4856 if (!cb)
4857 return VINF_SUCCESS;
4858
4859 /* map the 1st page */
4860 void *pvDst;
4861 PGMPAGEMAPLOCK Lock;
4862 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4863 if (RT_FAILURE(rc))
4864 return rc;
4865
4866 /* optimize for the case where access is completely within the first page. */
4867 size_t cbPage = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
4868 if (RT_LIKELY(cb <= cbPage))
4869 {
4870 memcpy(pvDst, pvSrc, cb);
4871 PGMPhysReleasePageMappingLock(pVM, &Lock);
4872#ifdef VBOX_VMM_TARGET_X86
4873 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4874#elif !defined(VBOX_VMM_TARGET_ARMV8)
4875# error "misconfig"
4876#endif
4877 return VINF_SUCCESS;
4878 }
4879
4880 /* copy to the end of the page. */
4881 memcpy(pvDst, pvSrc, cbPage);
4882 PGMPhysReleasePageMappingLock(pVM, &Lock);
4883#ifdef VBOX_VMM_TARGET_X86
4884 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4885#elif !defined(VBOX_VMM_TARGET_ARMV8)
4886# error "misconfig"
4887#endif
4888 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
4889 pvSrc = (const uint8_t *)pvSrc + cbPage;
4890 cb -= cbPage;
4891
4892 /*
4893 * Page by page.
4894 */
4895 for (;;)
4896 {
4897 /* map the page */
4898 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
4899 if (RT_FAILURE(rc))
4900 return rc;
4901
4902 /* last page? */
4903 if (cb <= GUEST_PAGE_SIZE)
4904 {
4905 memcpy(pvDst, pvSrc, cb);
4906 PGMPhysReleasePageMappingLock(pVM, &Lock);
4907#ifdef VBOX_VMM_TARGET_X86
4908 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4909#elif !defined(VBOX_VMM_TARGET_ARMV8)
4910# error "misconfig"
4911#endif
4912 return VINF_SUCCESS;
4913 }
4914
4915 /* copy the entire page and advance */
4916 memcpy(pvDst, pvSrc, GUEST_PAGE_SIZE);
4917 PGMPhysReleasePageMappingLock(pVM, &Lock);
4918#ifdef VBOX_VMM_TARGET_X86
4919 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
4920#elif !defined(VBOX_VMM_TARGET_ARMV8)
4921# error "misconfig"
4922#endif
4923 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + GUEST_PAGE_SIZE);
4924 pvSrc = (const uint8_t *)pvSrc + GUEST_PAGE_SIZE;
4925 cb -= GUEST_PAGE_SIZE;
4926 }
4927 /* won't ever get here. */
4928}
4929
4930
4931/**
4932 * Read from guest physical memory referenced by GC pointer.
4933 *
4934 * This function uses the current CR3/CR0/CR4 of the guest and will
4935 * respect access handlers and set accessed bits.
4936 *
4937 * @returns Strict VBox status, see PGMPhysRead for details.
4938 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
4939 * specified virtual address.
4940 *
4941 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4942 * @param pvDst The destination address.
4943 * @param GCPtrSrc The source address (GC pointer).
4944 * @param cb The number of bytes to read.
4945 * @param enmOrigin Who is calling.
4946 * @thread EMT(pVCpu)
4947 */
4948VMMDECL(VBOXSTRICTRC) PGMPhysReadGCPtr(PVMCPUCC pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
4949{
4950 int rc;
4951 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
4952 VMCPU_ASSERT_EMT(pVCpu);
4953
4954 /*
4955 * Anything to do?
4956 */
4957 if (!cb)
4958 return VINF_SUCCESS;
4959
4960 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
4961
4962 /*
4963 * Optimize reads within a single page.
4964 */
4965 if (((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
4966 {
4967 /* Convert virtual to physical address + flags */
4968 PGMPTWALK Walk;
4969 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
4970 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
4971 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4972
4973#ifdef VBOX_VMM_TARGET_X86
4974 /* mark the guest page as accessed. */
4975 if (!(Walk.fEffective & X86_PTE_A))
4976 {
4977 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
4978 AssertRC(rc);
4979 }
4980#elif !defined(VBOX_VMM_TARGET_ARMV8)
4981# error "misconfig"
4982#endif
4983 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
4984 }
4985
4986 /*
4987 * Page by page.
4988 */
4989 for (;;)
4990 {
4991 /* Convert virtual to physical address + flags */
4992 PGMPTWALK Walk;
4993 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrSrc, &Walk);
4994 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
4995 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
4996
4997#ifdef VBOX_VMM_TARGET_X86
4998 /* mark the guest page as accessed. */
4999 if (!(Walk.fEffective & X86_PTE_A))
5000 {
5001 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
5002 AssertRC(rc);
5003 }
5004#elif !defined(VBOX_VMM_TARGET_ARMV8)
5005# error "misconfig"
5006#endif
5007
5008 /* copy */
5009 size_t cbRead = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & GUEST_PAGE_OFFSET_MASK);
5010 if (cbRead < cb)
5011 {
5012 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
5013 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5014 { /* likely */ }
5015 else
5016 return rcStrict;
5017 }
5018 else /* Last page (cbRead is GUEST_PAGE_SIZE, we only need cb!) */
5019 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
5020
5021 /* next */
5022 Assert(cb > cbRead);
5023 cb -= cbRead;
5024 pvDst = (uint8_t *)pvDst + cbRead;
5025 GCPtrSrc += cbRead;
5026 }
5027}
5028
5029
5030/**
5031 * Write to guest physical memory referenced by GC pointer.
5032 *
5033 * This function uses the current CR3/CR0/CR4 of the guest and will
5034 * respect access handlers and set dirty and accessed bits.
5035 *
5036 * @returns Strict VBox status, see PGMPhysWrite for details.
5037 * @retval VERR_PAGE_TABLE_NOT_PRESENT if there is no page mapped at the
5038 * specified virtual address.
5039 *
5040 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
5041 * @param GCPtrDst The destination address (GC pointer).
5042 * @param pvSrc The source address.
5043 * @param cb The number of bytes to write.
5044 * @param enmOrigin Who is calling.
5045 */
5046VMMDECL(VBOXSTRICTRC) PGMPhysWriteGCPtr(PVMCPUCC pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
5047{
5048 int rc;
5049 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5050 VMCPU_ASSERT_EMT(pVCpu);
5051
5052 /*
5053 * Anything to do?
5054 */
5055 if (!cb)
5056 return VINF_SUCCESS;
5057
5058 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
5059
5060 /*
5061 * Optimize writes within a single page.
5062 */
5063 if (((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK) + cb <= GUEST_PAGE_SIZE)
5064 {
5065 /* Convert virtual to physical address + flags */
5066 PGMPTWALK Walk;
5067 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
5068 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
5069 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
5070
5071 /* Mention when we ignore X86_PTE_RW... */
5072 if (!(Walk.fEffective & X86_PTE_RW))
5073 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
5074
5075#ifdef VBOX_VMM_TARGET_X86
5076 /* Mark the guest page as accessed and dirty if necessary. */
5077 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
5078 {
5079 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
5080 AssertRC(rc);
5081 }
5082#elif !defined(VBOX_VMM_TARGET_ARMV8)
5083# error "misconfig"
5084#endif
5085
5086 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
5087 }
5088
5089 /*
5090 * Page by page.
5091 */
5092 for (;;)
5093 {
5094 /* Convert virtual to physical address + flags */
5095 PGMPTWALK Walk;
5096 rc = PGMGstGetPage(pVCpu, (RTGCUINTPTR)GCPtrDst, &Walk);
5097 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
5098 RTGCPHYS const GCPhys = Walk.GCPhys | ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
5099
5100 /* Mention when we ignore X86_PTE_RW... */
5101 if (!(Walk.fEffective & X86_PTE_RW))
5102 Log(("PGMPhysWriteGCPtr: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
5103
5104#ifdef VBOX_VMM_TARGET_X86
5105 /* Mark the guest page as accessed and dirty if necessary. */
5106 if ((Walk.fEffective & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
5107 {
5108 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
5109 AssertRC(rc);
5110 }
5111#elif !defined(VBOX_VMM_TARGET_ARMV8)
5112# error "misconfig"
5113#endif
5114
5115 /* copy */
5116 size_t cbWrite = GUEST_PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & GUEST_PAGE_OFFSET_MASK);
5117 if (cbWrite < cb)
5118 {
5119 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
5120 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5121 { /* likely */ }
5122 else
5123 return rcStrict;
5124 }
5125 else /* Last page (cbWrite is GUEST_PAGE_SIZE, we only need cb!) */
5126 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
5127
5128 /* next */
5129 Assert(cb > cbWrite);
5130 cb -= cbWrite;
5131 pvSrc = (uint8_t *)pvSrc + cbWrite;
5132 GCPtrDst += cbWrite;
5133 }
5134}
5135
5136
5137/**
5138 * Return the page type of the specified physical address.
5139 *
5140 * @returns The page type.
5141 * @param pVM The cross context VM structure.
5142 * @param GCPhys Guest physical address
5143 */
5144VMM_INT_DECL(PGMPAGETYPE) PGMPhysGetPageType(PVMCC pVM, RTGCPHYS GCPhys)
5145{
5146 PGM_LOCK_VOID(pVM);
5147 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
5148 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
5149 PGM_UNLOCK(pVM);
5150
5151 return enmPgType;
5152}
5153
5154
5155/** Helper for PGMPhysIemGCPhys2PtrNoLock. */
5156DECL_FORCE_INLINE(int)
5157pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uint64_t uTlbPhysRev, R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb,
5158 RTGCPHYS GCPhys, PCPGMPAGE pPageCopy)
5159{
5160 *pfTlb |= uTlbPhysRev
5161 | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3;
5162 *ppb = NULL;
5163 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=NULL *pfTlb=%#RX64 PageCopy=%R[pgmpage] NO\n", GCPhys,
5164 uTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3, pPageCopy));
5165 RT_NOREF(GCPhys, pPageCopy);
5166 return VINF_SUCCESS;
5167}
5168
5169
5170/** Helper for PGMPhysIemGCPhys2PtrNoLock. */
5171DECL_FORCE_INLINE(int)
5172pgmPhyIemGCphys2PtrNoLockReturnReadOnly(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTlbPhysRev, RTGCPHYS GCPhys, PCPGMPAGE pPageCopy,
5173 PPGMRAMRANGE pRam, PPGMPAGE pPage, R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb)
5174{
5175 RT_NOREF(GCPhys);
5176 if (!PGM_PAGE_IS_CODE_PAGE(pPageCopy))
5177 *pfTlb |= uTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE;
5178 else
5179 *pfTlb |= uTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_CODE_PAGE;
5180
5181#ifdef IN_RING3
5182 if (PGM_IS_IN_NEM_MODE(pVM))
5183 *ppb = &pRam->pbR3[(RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT];
5184 else
5185#endif
5186 {
5187#ifdef IN_RING3
5188# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
5189 PPGMPAGEMAPTLBE pTlbe;
5190 int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe);
5191 AssertLogRelRCReturn(rc, rc);
5192 *ppb = (uint8_t *)pTlbe->pv;
5193 RT_NOREF(pVM);
5194# endif
5195#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
5196 PGM_LOCK(pVM);
5197 PPGMPAGEMAPTLBE pTlbe;
5198 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
5199 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
5200 *ppb = (uint8_t *)pTlbe->pv;
5201 PGM_UNLOCK(pVM);
5202 RT_NOREF(pVCpu);
5203#endif
5204 }
5205 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RO\n", GCPhys, *ppb, *pfTlb, pPageCopy));
5206 RT_NOREF(pRam, pVM, pVCpu);
5207 return VINF_SUCCESS;
5208}
5209
5210
5211/** Helper for PGMPhysIemGCPhys2PtrNoLock. */
5212DECL_FORCE_INLINE(int)
5213pgmPhyIemGCphys2PtrNoLockReturnReadWrite(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uTlbPhysRev, RTGCPHYS GCPhys, PCPGMPAGE pPageCopy,
5214 PPGMRAMRANGE pRam, PPGMPAGE pPage, R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb)
5215{
5216 Assert(!PGM_PAGE_IS_CODE_PAGE(pPageCopy));
5217 RT_NOREF(pPageCopy, GCPhys);
5218 *pfTlb |= uTlbPhysRev;
5219
5220#ifdef IN_RING3
5221 if (PGM_IS_IN_NEM_MODE(pVM))
5222 *ppb = &pRam->pbR3[(RTGCPHYS)(uintptr_t)(pPage - &pRam->aPages[0]) << GUEST_PAGE_SHIFT];
5223 else
5224#endif
5225 {
5226#ifdef IN_RING3
5227# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
5228 PPGMPAGEMAPTLBE pTlbe;
5229 int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe);
5230 AssertLogRelRCReturn(rc, rc);
5231 *ppb = (uint8_t *)pTlbe->pv;
5232 RT_NOREF(pVM);
5233# endif
5234#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
5235 PGM_LOCK(pVM);
5236 PPGMPAGEMAPTLBE pTlbe;
5237 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
5238 AssertLogRelRCReturnStmt(rc, PGM_UNLOCK(pVM), rc);
5239 *ppb = (uint8_t *)pTlbe->pv;
5240 PGM_UNLOCK(pVM);
5241 RT_NOREF(pVCpu);
5242#endif
5243 }
5244 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RW\n", GCPhys, *ppb, *pfTlb, pPageCopy));
5245 RT_NOREF(pRam, pVM, pVCpu);
5246 return VINF_SUCCESS;
5247}
5248
5249
5250/**
5251 * Converts a GC physical address to a HC ring-3 pointer, with some
5252 * additional checks.
5253 *
5254 * @returns VBox status code (no informational statuses).
5255 *
5256 * @param pVM The cross context VM structure.
5257 * @param pVCpu The cross context virtual CPU structure of the
5258 * calling EMT.
5259 * @param GCPhys The GC physical address to convert. This API mask
5260 * the A20 line when necessary.
5261 * @param puTlbPhysRev Where to read the physical TLB revision. Needs to
5262 * be done while holding the PGM lock.
5263 * @param ppb Where to store the pointer corresponding to GCPhys
5264 * on success.
5265 * @param pfTlb The TLB flags and revision. We only add stuff.
5266 *
5267 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr and
5268 * PGMPhysIemGCPhys2Ptr.
5269 *
5270 * @thread EMT(pVCpu).
5271 */
5272VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev,
5273 R3R0PTRTYPE(uint8_t *) *ppb, uint64_t *pfTlb)
5274{
5275 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
5276 Assert(!(GCPhys & X86_PAGE_OFFSET_MASK));
5277
5278 PGMRAMRANGE volatile *pRam;
5279 PGMPAGE volatile *pPage;
5280 int rc = pgmPhysGetPageAndRangeExLockless(pVM, pVCpu, GCPhys, &pPage, &pRam);
5281 if (RT_SUCCESS(rc))
5282 {
5283 /*
5284 * Wrt to update races, we will try to pretend we beat the update we're
5285 * racing. We do this by sampling the physical TLB revision first, so
5286 * that the TLB entry / whatever purpose the caller has with the info
5287 * will become invalid immediately if it's updated.
5288 *
5289 * This means the caller will (probably) make use of the returned info
5290 * only once and then requery it the next time it is use, getting the
5291 * updated info. This would then be just as if the first query got the
5292 * PGM lock before the updater.
5293 */
5294 /** @todo make PGMPAGE updates more atomic, possibly flagging complex
5295 * updates by adding a u1UpdateInProgress field (or revision).
5296 * This would be especially important when updating the page ID... */
5297 uint64_t uTlbPhysRev = *puTlbPhysRev;
5298 PGMPAGE PageCopy = { { pPage->au64[0], pPage->au64[1] } };
5299 if ( uTlbPhysRev == *puTlbPhysRev
5300 && PageCopy.au64[0] == pPage->au64[0]
5301 && PageCopy.au64[1] == pPage->au64[1])
5302 ASMCompilerBarrier(); /* likely */
5303 else
5304 {
5305 PGM_LOCK_VOID(pVM);
5306 uTlbPhysRev = *puTlbPhysRev;
5307 PageCopy.au64[0] = pPage->au64[0];
5308 PageCopy.au64[1] = pPage->au64[1];
5309 PGM_UNLOCK(pVM);
5310 }
5311
5312 /*
5313 * Try optimize for the regular case first: Writable RAM.
5314 */
5315 switch (PGM_PAGE_GET_HNDL_PHYS_STATE(&PageCopy))
5316 {
5317 case PGM_PAGE_HNDL_PHYS_STATE_DISABLED:
5318 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy))
5319 { /* likely */ }
5320 else
5321 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5322 RT_FALL_THRU();
5323 case PGM_PAGE_HNDL_PHYS_STATE_NONE:
5324 Assert(!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy));
5325 switch (PGM_PAGE_GET_STATE_NA(&PageCopy))
5326 {
5327 case PGM_PAGE_STATE_ALLOCATED:
5328 return pgmPhyIemGCphys2PtrNoLockReturnReadWrite(pVM, pVCpu, uTlbPhysRev, GCPhys, &PageCopy,
5329 (PPGMRAMRANGE)pRam, (PPGMPAGE)pPage, ppb, pfTlb);
5330
5331 case PGM_PAGE_STATE_ZERO:
5332 case PGM_PAGE_STATE_WRITE_MONITORED:
5333 case PGM_PAGE_STATE_SHARED:
5334 return pgmPhyIemGCphys2PtrNoLockReturnReadOnly(pVM, pVCpu, uTlbPhysRev, GCPhys, &PageCopy,
5335 (PPGMRAMRANGE)pRam, (PPGMPAGE)pPage, ppb, pfTlb);
5336
5337 default: AssertFailed(); RT_FALL_THROUGH();
5338 case PGM_PAGE_STATE_BALLOONED:
5339 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5340 }
5341 break;
5342
5343 case PGM_PAGE_HNDL_PHYS_STATE_WRITE:
5344 Assert(!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy));
5345 switch (PGM_PAGE_GET_STATE_NA(&PageCopy))
5346 {
5347 case PGM_PAGE_STATE_ALLOCATED:
5348 Assert(!PGM_PAGE_IS_CODE_PAGE(&PageCopy));
5349 RT_FALL_THRU();
5350 case PGM_PAGE_STATE_ZERO:
5351 case PGM_PAGE_STATE_WRITE_MONITORED:
5352 case PGM_PAGE_STATE_SHARED:
5353 return pgmPhyIemGCphys2PtrNoLockReturnReadOnly(pVM, pVCpu, uTlbPhysRev, GCPhys, &PageCopy,
5354 (PPGMRAMRANGE)pRam, (PPGMPAGE)pPage, ppb, pfTlb);
5355
5356 default: AssertFailed(); RT_FALL_THROUGH();
5357 case PGM_PAGE_STATE_BALLOONED:
5358 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5359 }
5360 break;
5361
5362 case PGM_PAGE_HNDL_PHYS_STATE_ALL:
5363 Assert(!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(&PageCopy));
5364 return pgmPhyIemGCphys2PtrNoLockReturnNoNothing(uTlbPhysRev, ppb, pfTlb, GCPhys, &PageCopy);
5365 }
5366 }
5367 else
5368 {
5369 *pfTlb |= *puTlbPhysRev | PGMIEMGCPHYS2PTR_F_NO_WRITE | PGMIEMGCPHYS2PTR_F_NO_READ
5370 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 | PGMIEMGCPHYS2PTR_F_UNASSIGNED;
5371 *ppb = NULL;
5372 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 (rc=%Rrc)\n", GCPhys, *ppb, *pfTlb, rc));
5373 }
5374
5375 return VINF_SUCCESS;
5376}
5377
5378
5379/**
5380 * Converts a GC physical address to a HC ring-3 pointer, with some
5381 * additional checks.
5382 *
5383 * @returns VBox status code (no informational statuses).
5384 * @retval VINF_SUCCESS on success.
5385 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
5386 * access handler of some kind.
5387 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
5388 * accesses or is odd in any way.
5389 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
5390 *
5391 * @param pVM The cross context VM structure.
5392 * @param pVCpu The cross context virtual CPU structure of the
5393 * calling EMT.
5394 * @param GCPhys The GC physical address to convert. This API mask
5395 * the A20 line when necessary.
5396 * @param fWritable Whether write access is required.
5397 * @param fByPassHandlers Whether to bypass access handlers.
5398 * @param ppv Where to store the pointer corresponding to GCPhys
5399 * on success.
5400 * @param pLock
5401 *
5402 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
5403 * @thread EMT(pVCpu).
5404 */
5405VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
5406 void **ppv, PPGMPAGEMAPLOCK pLock)
5407{
5408 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
5409 RT_NOREF(pVCpu);
5410
5411 PGM_LOCK_VOID(pVM);
5412
5413 PPGMRAMRANGE pRam;
5414 PPGMPAGE pPage;
5415 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
5416 if (RT_SUCCESS(rc))
5417 {
5418 if (PGM_PAGE_IS_BALLOONED(pPage))
5419 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5420 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
5421 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5422 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
5423 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
5424 rc = VINF_SUCCESS;
5425 else
5426 {
5427 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
5428 {
5429 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
5430 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5431 }
5432 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
5433 {
5434 Assert(!fByPassHandlers);
5435 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5436 }
5437 }
5438 if (RT_SUCCESS(rc))
5439 {
5440 int rc2;
5441
5442 /* Make sure what we return is writable. */
5443 if (fWritable)
5444 switch (PGM_PAGE_GET_STATE(pPage))
5445 {
5446 case PGM_PAGE_STATE_ALLOCATED:
5447 break;
5448 case PGM_PAGE_STATE_BALLOONED:
5449 AssertFailed();
5450 break;
5451 case PGM_PAGE_STATE_ZERO:
5452 case PGM_PAGE_STATE_SHARED:
5453 case PGM_PAGE_STATE_WRITE_MONITORED:
5454 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK);
5455 AssertLogRelRCReturn(rc2, rc2);
5456 break;
5457 }
5458
5459 /* Get a ring-3 mapping of the address. */
5460 PPGMPAGEMAPTLBE pTlbe;
5461 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
5462 AssertLogRelRCReturn(rc2, rc2);
5463
5464 /* Lock it and calculate the address. */
5465 if (fWritable)
5466 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
5467 else
5468 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
5469 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & GUEST_PAGE_OFFSET_MASK));
5470
5471 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
5472 }
5473 else
5474 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
5475
5476 /* else: handler catching all access, no pointer returned. */
5477 }
5478 else
5479 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
5480
5481 PGM_UNLOCK(pVM);
5482 return rc;
5483}
5484
5485
5486/**
5487 * Checks if the give GCPhys page requires special handling for the given access
5488 * because it's MMIO or otherwise monitored.
5489 *
5490 * @returns VBox status code (no informational statuses).
5491 * @retval VINF_SUCCESS on success.
5492 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
5493 * access handler of some kind.
5494 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
5495 * accesses or is odd in any way.
5496 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
5497 *
5498 * @param pVM The cross context VM structure.
5499 * @param GCPhys The GC physical address to convert. Since this is
5500 * only used for filling the REM TLB, the A20 mask must
5501 * be applied before calling this API.
5502 * @param fWritable Whether write access is required.
5503 * @param fByPassHandlers Whether to bypass access handlers.
5504 *
5505 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
5506 * a stop gap thing that should be removed once there is a better TLB
5507 * for virtual address accesses.
5508 */
5509VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
5510{
5511 PGM_LOCK_VOID(pVM);
5512 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
5513
5514 PPGMRAMRANGE pRam;
5515 PPGMPAGE pPage;
5516 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
5517 if (RT_SUCCESS(rc))
5518 {
5519 if (PGM_PAGE_IS_BALLOONED(pPage))
5520 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5521 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
5522 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5523 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
5524 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
5525 rc = VINF_SUCCESS;
5526 else
5527 {
5528 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
5529 {
5530 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
5531 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
5532 }
5533 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
5534 {
5535 Assert(!fByPassHandlers);
5536 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
5537 }
5538 }
5539 }
5540
5541 PGM_UNLOCK(pVM);
5542 return rc;
5543}
5544
5545#ifdef VBOX_WITH_NATIVE_NEM
5546
5547/**
5548 * Interface used by NEM to check what to do on a memory access exit.
5549 *
5550 * @returns VBox status code.
5551 * @param pVM The cross context VM structure.
5552 * @param pVCpu The cross context per virtual CPU structure.
5553 * Optional.
5554 * @param GCPhys The guest physical address.
5555 * @param fMakeWritable Whether to try make the page writable or not. If it
5556 * cannot be made writable, NEM_PAGE_PROT_WRITE won't
5557 * be returned and the return code will be unaffected
5558 * @param pInfo Where to return the page information. This is
5559 * initialized even on failure.
5560 * @param pfnChecker Page in-sync checker callback. Optional.
5561 * @param pvUser User argument to pass to pfnChecker.
5562 */
5563VMM_INT_DECL(int) PGMPhysNemPageInfoChecker(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, bool fMakeWritable,
5564 PPGMPHYSNEMPAGEINFO pInfo, PFNPGMPHYSNEMCHECKPAGE pfnChecker, void *pvUser)
5565{
5566 PGM_LOCK_VOID(pVM);
5567
5568 PPGMPAGE pPage;
5569 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
5570 if (RT_SUCCESS(rc))
5571 {
5572 /* Try make it writable if requested. */
5573 pInfo->u2OldNemState = PGM_PAGE_GET_NEM_STATE(pPage);
5574 if (fMakeWritable)
5575 switch (PGM_PAGE_GET_STATE(pPage))
5576 {
5577 case PGM_PAGE_STATE_SHARED:
5578 case PGM_PAGE_STATE_WRITE_MONITORED:
5579 case PGM_PAGE_STATE_ZERO:
5580 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
5581 if (rc == VERR_PGM_PHYS_PAGE_RESERVED)
5582 rc = VINF_SUCCESS;
5583 break;
5584 }
5585
5586 /* Fill in the info. */
5587 pInfo->HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
5588 pInfo->u2NemState = PGM_PAGE_GET_NEM_STATE(pPage);
5589 pInfo->fHasHandlers = PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) ? 1 : 0;
5590 PGMPAGETYPE const enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
5591 pInfo->enmType = enmType;
5592 pInfo->fNemProt = pgmPhysPageCalcNemProtection(pPage, enmType);
5593 switch (PGM_PAGE_GET_STATE(pPage))
5594 {
5595 case PGM_PAGE_STATE_ALLOCATED:
5596 pInfo->fZeroPage = 0;
5597 break;
5598
5599 case PGM_PAGE_STATE_ZERO:
5600 pInfo->fZeroPage = 1;
5601 break;
5602
5603 case PGM_PAGE_STATE_WRITE_MONITORED:
5604 pInfo->fZeroPage = 0;
5605 break;
5606
5607 case PGM_PAGE_STATE_SHARED:
5608 pInfo->fZeroPage = 0;
5609 break;
5610
5611 case PGM_PAGE_STATE_BALLOONED:
5612 pInfo->fZeroPage = 1;
5613 break;
5614
5615 default:
5616 pInfo->fZeroPage = 1;
5617 AssertFailedStmt(rc = VERR_PGM_PHYS_PAGE_GET_IPE);
5618 }
5619
5620 /* Call the checker and update NEM state. */
5621 if (pfnChecker)
5622 {
5623 rc = pfnChecker(pVM, pVCpu, GCPhys, pInfo, pvUser);
5624 PGM_PAGE_SET_NEM_STATE(pPage, pInfo->u2NemState);
5625 }
5626
5627 /* Done. */
5628 PGM_UNLOCK(pVM);
5629 }
5630 else
5631 {
5632 PGM_UNLOCK(pVM);
5633
5634 pInfo->HCPhys = NIL_RTHCPHYS;
5635 pInfo->fNemProt = NEM_PAGE_PROT_NONE;
5636 pInfo->u2NemState = 0;
5637 pInfo->fHasHandlers = 0;
5638 pInfo->fZeroPage = 0;
5639 pInfo->enmType = PGMPAGETYPE_INVALID;
5640 }
5641
5642 return rc;
5643}
5644
5645
5646/**
5647 * NEM helper that performs @a pfnCallback on pages with NEM state @a uMinState
5648 * or higher.
5649 *
5650 * @returns VBox status code from callback.
5651 * @param pVM The cross context VM structure.
5652 * @param pVCpu The cross context per CPU structure. This is
5653 * optional as its only for passing to callback.
5654 * @param uMinState The minimum NEM state value to call on.
5655 * @param pfnCallback The callback function.
5656 * @param pvUser User argument for the callback.
5657 */
5658VMM_INT_DECL(int) PGMPhysNemEnumPagesByState(PVMCC pVM, PVMCPUCC pVCpu, uint8_t uMinState,
5659 PFNPGMPHYSNEMENUMCALLBACK pfnCallback, void *pvUser)
5660{
5661 /*
5662 * Just brute force this problem.
5663 */
5664 PGM_LOCK_VOID(pVM);
5665 int rc = VINF_SUCCESS;
5666 uint32_t const cLookupEntries = RT_MIN(pVM->pgm.s.RamRangeUnion.cLookupEntries, RT_ELEMENTS(pVM->pgm.s.aRamRangeLookup));
5667 for (uint32_t idxLookup = 0; idxLookup < cLookupEntries && RT_SUCCESS(rc); idxLookup++)
5668 {
5669 uint32_t const idRamRange = PGMRAMRANGELOOKUPENTRY_GET_ID(pVM->pgm.s.aRamRangeLookup[idxLookup]);
5670 AssertContinue(idRamRange < RT_ELEMENTS(pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges));
5671 PPGMRAMRANGE const pRam = pVM->CTX_EXPR(pgm, pgmr0, pgm).s.apRamRanges[idRamRange];
5672 AssertContinue(pRam);
5673 Assert(pRam->GCPhys == PGMRAMRANGELOOKUPENTRY_GET_FIRST(pVM->pgm.s.aRamRangeLookup[idxLookup]));
5674
5675#ifdef IN_RING0
5676 uint32_t const cPages = RT_MIN(pRam->cb >> X86_PAGE_SHIFT, pVM->pgmr0.s.acRamRangePages[idRamRange]);
5677#else
5678 uint32_t const cPages = pRam->cb >> X86_PAGE_SHIFT;
5679#endif
5680 for (uint32_t iPage = 0; iPage < cPages; iPage++)
5681 {
5682 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(&pRam->aPages[iPage]);
5683 if (u2State < uMinState)
5684 { /* likely */ }
5685 else
5686 {
5687 rc = pfnCallback(pVM, pVCpu, pRam->GCPhys + ((RTGCPHYS)iPage << X86_PAGE_SHIFT), &u2State, pvUser);
5688 if (RT_SUCCESS(rc))
5689 PGM_PAGE_SET_NEM_STATE(&pRam->aPages[iPage], u2State);
5690 else
5691 break;
5692 }
5693 }
5694 }
5695 PGM_UNLOCK(pVM);
5696
5697 return rc;
5698}
5699
5700
5701/**
5702 * Helper for setting the NEM state for a range of pages.
5703 *
5704 * @param paPages Array of pages to modify.
5705 * @param cPages How many pages to modify.
5706 * @param u2State The new state value.
5707 */
5708DECLHIDDEN(void) pgmPhysSetNemStateForPages(PPGMPAGE paPages, RTGCPHYS cPages, uint8_t u2State)
5709{
5710 PPGMPAGE pPage = paPages;
5711 while (cPages-- > 0)
5712 {
5713 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
5714 pPage++;
5715 }
5716}
5717
5718#endif /* VBOX_WITH_NATIVE_NEM */
5719
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette