1 | /* $Id: MMHyper.cpp 80319 2019-08-16 09:19:31Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * MM - Memory Manager - Hypervisor Memory Area.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2006-2019 Oracle Corporation
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox Open Source Edition (OSE), as
|
---|
10 | * available from http://www.alldomusa.eu.org. This file is free software;
|
---|
11 | * you can redistribute it and/or modify it under the terms of the GNU
|
---|
12 | * General Public License (GPL) as published by the Free Software
|
---|
13 | * Foundation, in version 2 as it comes in the "COPYING" file of the
|
---|
14 | * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
|
---|
15 | * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
|
---|
16 | */
|
---|
17 |
|
---|
18 |
|
---|
19 | /*********************************************************************************************************************************
|
---|
20 | * Header Files *
|
---|
21 | *********************************************************************************************************************************/
|
---|
22 | #define VBOX_BUGREF_9217_PART_I
|
---|
23 | #define LOG_GROUP LOG_GROUP_MM_HYPER
|
---|
24 | #include <VBox/vmm/pgm.h>
|
---|
25 | #include <VBox/vmm/mm.h>
|
---|
26 | #include <VBox/vmm/hm.h>
|
---|
27 | #include <VBox/vmm/dbgf.h>
|
---|
28 | #include "MMInternal.h"
|
---|
29 | #include <VBox/vmm/vm.h>
|
---|
30 | #include <VBox/vmm/gvm.h>
|
---|
31 | #include <VBox/err.h>
|
---|
32 | #include <VBox/param.h>
|
---|
33 | #include <VBox/log.h>
|
---|
34 | #include <iprt/alloc.h>
|
---|
35 | #include <iprt/assert.h>
|
---|
36 | #include <iprt/string.h>
|
---|
37 |
|
---|
38 |
|
---|
39 | /*********************************************************************************************************************************
|
---|
40 | * Internal Functions *
|
---|
41 | *********************************************************************************************************************************/
|
---|
42 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
43 | static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew, PGMRELOCATECALL enmMode,
|
---|
44 | void *pvUser);
|
---|
45 | #endif
|
---|
46 | static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup);
|
---|
47 | static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap);
|
---|
48 | static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC);
|
---|
49 | static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
|
---|
50 |
|
---|
51 |
|
---|
52 | /**
|
---|
53 | * Determin the default heap size.
|
---|
54 | *
|
---|
55 | * @returns The heap size in bytes.
|
---|
56 | * @param pVM The cross context VM structure.
|
---|
57 | */
|
---|
58 | static uint32_t mmR3HyperComputeHeapSize(PVM pVM)
|
---|
59 | {
|
---|
60 | /** @todo Redo after moving allocations off the hyper heap. */
|
---|
61 |
|
---|
62 | /*
|
---|
63 | * Gather parameters.
|
---|
64 | */
|
---|
65 | bool fCanUseLargerHeap = true;
|
---|
66 | //bool fCanUseLargerHeap;
|
---|
67 | //int rc = CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM"), "CanUseLargerHeap", &fCanUseLargerHeap, false);
|
---|
68 | //AssertStmt(RT_SUCCESS(rc), fCanUseLargerHeap = false);
|
---|
69 |
|
---|
70 | uint64_t cbRam;
|
---|
71 | int rc = CFGMR3QueryU64(CFGMR3GetRoot(pVM), "RamSize", &cbRam);
|
---|
72 | AssertStmt(RT_SUCCESS(rc), cbRam = _1G);
|
---|
73 |
|
---|
74 | /*
|
---|
75 | * We need to keep saved state compatibility if raw-mode is an option,
|
---|
76 | * so lets filter out that case first.
|
---|
77 | */
|
---|
78 | if ( !fCanUseLargerHeap
|
---|
79 | && VM_IS_RAW_MODE_ENABLED(pVM)
|
---|
80 | && cbRam < 16*_1G64)
|
---|
81 | return 1280 * _1K;
|
---|
82 |
|
---|
83 | /*
|
---|
84 | * Calculate the heap size.
|
---|
85 | */
|
---|
86 | uint32_t cbHeap = _1M;
|
---|
87 |
|
---|
88 | /* The newer chipset may have more devices attached, putting additional
|
---|
89 | pressure on the heap. */
|
---|
90 | if (fCanUseLargerHeap)
|
---|
91 | cbHeap += _1M;
|
---|
92 |
|
---|
93 | /* More CPUs means some extra memory usage. */
|
---|
94 | if (pVM->cCpus > 1)
|
---|
95 | cbHeap += pVM->cCpus * _64K;
|
---|
96 |
|
---|
97 | /* Lots of memory means extra memory consumption as well (pool). */
|
---|
98 | if (cbRam > 16*_1G64)
|
---|
99 | cbHeap += _2M; /** @todo figure out extactly how much */
|
---|
100 |
|
---|
101 | return RT_ALIGN(cbHeap, _256K);
|
---|
102 | }
|
---|
103 |
|
---|
104 |
|
---|
105 | /**
|
---|
106 | * Initializes the hypervisor related MM stuff without
|
---|
107 | * calling down to PGM.
|
---|
108 | *
|
---|
109 | * PGM is not initialized at this point, PGM relies on
|
---|
110 | * the heap to initialize.
|
---|
111 | *
|
---|
112 | * @returns VBox status code.
|
---|
113 | */
|
---|
114 | int mmR3HyperInit(PVM pVM)
|
---|
115 | {
|
---|
116 | LogFlow(("mmR3HyperInit:\n"));
|
---|
117 |
|
---|
118 | /*
|
---|
119 | * Decide Hypervisor mapping in the guest context
|
---|
120 | * And setup various hypervisor area and heap parameters.
|
---|
121 | */
|
---|
122 | pVM->mm.s.pvHyperAreaGC = (RTGCPTR)MM_HYPER_AREA_ADDRESS;
|
---|
123 | pVM->mm.s.cbHyperArea = MM_HYPER_AREA_MAX_SIZE;
|
---|
124 | AssertRelease(RT_ALIGN_T(pVM->mm.s.pvHyperAreaGC, 1 << X86_PD_SHIFT, RTGCPTR) == pVM->mm.s.pvHyperAreaGC);
|
---|
125 | Assert(pVM->mm.s.pvHyperAreaGC < 0xff000000);
|
---|
126 |
|
---|
127 | /** @todo @bugref{1865}, @bugref{3202}: Change the cbHyperHeap default
|
---|
128 | * depending on whether VT-x/AMD-V is enabled or not! Don't waste
|
---|
129 | * precious kernel space on heap for the PATM.
|
---|
130 | */
|
---|
131 | PCFGMNODE pMM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "MM");
|
---|
132 | uint32_t cbHyperHeap;
|
---|
133 | int rc = CFGMR3QueryU32Def(pMM, "cbHyperHeap", &cbHyperHeap, mmR3HyperComputeHeapSize(pVM));
|
---|
134 | AssertLogRelRCReturn(rc, rc);
|
---|
135 |
|
---|
136 | cbHyperHeap = RT_ALIGN_32(cbHyperHeap, PAGE_SIZE);
|
---|
137 | LogRel(("MM: cbHyperHeap=%#x (%u)\n", cbHyperHeap, cbHyperHeap));
|
---|
138 |
|
---|
139 | /*
|
---|
140 | * Allocate the hypervisor heap.
|
---|
141 | *
|
---|
142 | * (This must be done before we start adding memory to the
|
---|
143 | * hypervisor static area because lookup records are allocated from it.)
|
---|
144 | */
|
---|
145 | rc = mmR3HyperHeapCreate(pVM, cbHyperHeap, &pVM->mm.s.pHyperHeapR3, &pVM->mm.s.pHyperHeapR0);
|
---|
146 | if (RT_SUCCESS(rc))
|
---|
147 | {
|
---|
148 | /*
|
---|
149 | * Make a small head fence to fend of accidental sequential access.
|
---|
150 | */
|
---|
151 | MMR3HyperReserveFence(pVM);
|
---|
152 |
|
---|
153 | /*
|
---|
154 | * Map the VM structure into the hypervisor space.
|
---|
155 | * Note! Keeping the mappings here for now in case someone is using
|
---|
156 | * MMHyperR3ToR0 or similar.
|
---|
157 | */
|
---|
158 | AssertCompileSizeAlignment(VM, PAGE_SIZE);
|
---|
159 | AssertCompileSizeAlignment(VMCPU, PAGE_SIZE);
|
---|
160 | #ifdef VBOX_BUGREF_9217
|
---|
161 | AssertCompileSizeAlignment(GVM, PAGE_SIZE);
|
---|
162 | AssertCompileSizeAlignment(GVMCPU, PAGE_SIZE);
|
---|
163 | AssertRelease(pVM->cbSelf == sizeof(VM));
|
---|
164 | AssertRelease(pVM->cbVCpu == sizeof(VMCPU));
|
---|
165 | RTGCPTR GCPtr;
|
---|
166 | rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0ForCall, sizeof(VM) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM", &GCPtr);
|
---|
167 | uint32_t offPages = RT_UOFFSETOF_DYN(GVM, aCpus) >> PAGE_SHIFT; /* (Using the _DYN variant avoids -Winvalid-offset) */
|
---|
168 | for (uint32_t idCpu = 0; idCpu < pVM->cCpus && RT_SUCCESS(rc); idCpu++, offPages += sizeof(GVMCPU) >> PAGE_SHIFT)
|
---|
169 | {
|
---|
170 | PVMCPU pVCpu = pVM->apCpusR3[idCpu];
|
---|
171 | RTGCPTR GCPtrIgn;
|
---|
172 | rc = MMR3HyperMapPages(pVM, pVCpu, pVM->pVMR0ForCall + offPages * PAGE_SIZE,
|
---|
173 | sizeof(VMCPU) >> PAGE_SHIFT, &pVM->paVMPagesR3[offPages], "VMCPU", &GCPtrIgn);
|
---|
174 | }
|
---|
175 | #else
|
---|
176 | AssertRelease(pVM->cbSelf >= sizeof(VMCPU));
|
---|
177 | RTGCPTR GCPtr;
|
---|
178 | rc = MMR3HyperMapPages(pVM, pVM, pVM->pVMR0,
|
---|
179 | RT_ALIGN_Z(pVM->cbSelf, PAGE_SIZE) >> PAGE_SHIFT, pVM->paVMPagesR3, "VM",
|
---|
180 | &GCPtr);
|
---|
181 | #endif
|
---|
182 | if (RT_SUCCESS(rc))
|
---|
183 | {
|
---|
184 | pVM->pVMRC = (RTRCPTR)GCPtr;
|
---|
185 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
186 | pVM->apCpusR3[i]->pVMRC = pVM->pVMRC;
|
---|
187 |
|
---|
188 | /* Reserve a page for fencing. */
|
---|
189 | MMR3HyperReserveFence(pVM);
|
---|
190 |
|
---|
191 | /*
|
---|
192 | * Map the heap into the hypervisor space.
|
---|
193 | */
|
---|
194 | rc = mmR3HyperHeapMap(pVM, pVM->mm.s.pHyperHeapR3, &GCPtr);
|
---|
195 | if (RT_SUCCESS(rc))
|
---|
196 | {
|
---|
197 | pVM->mm.s.pHyperHeapRC = (RTRCPTR)GCPtr;
|
---|
198 | Assert(pVM->mm.s.pHyperHeapRC == GCPtr);
|
---|
199 |
|
---|
200 | /*
|
---|
201 | * Register info handlers.
|
---|
202 | */
|
---|
203 | DBGFR3InfoRegisterInternal(pVM, "hma", "Show the layout of the Hypervisor Memory Area.", mmR3HyperInfoHma);
|
---|
204 |
|
---|
205 | LogFlow(("mmR3HyperInit: returns VINF_SUCCESS\n"));
|
---|
206 | return VINF_SUCCESS;
|
---|
207 | }
|
---|
208 | /* Caller will do proper cleanup. */
|
---|
209 | }
|
---|
210 | }
|
---|
211 |
|
---|
212 | LogFlow(("mmR3HyperInit: returns %Rrc\n", rc));
|
---|
213 | return rc;
|
---|
214 | }
|
---|
215 |
|
---|
216 |
|
---|
217 | /**
|
---|
218 | * Cleans up the hypervisor heap.
|
---|
219 | *
|
---|
220 | * @returns VBox status code.
|
---|
221 | */
|
---|
222 | int mmR3HyperTerm(PVM pVM)
|
---|
223 | {
|
---|
224 | if (pVM->mm.s.pHyperHeapR3)
|
---|
225 | PDMR3CritSectDelete(&pVM->mm.s.pHyperHeapR3->Lock);
|
---|
226 |
|
---|
227 | return VINF_SUCCESS;
|
---|
228 | }
|
---|
229 |
|
---|
230 |
|
---|
231 | /**
|
---|
232 | * Finalizes the HMA mapping.
|
---|
233 | *
|
---|
234 | * This is called later during init, most (all) HMA allocations should be done
|
---|
235 | * by the time this function is called.
|
---|
236 | *
|
---|
237 | * @returns VBox status code.
|
---|
238 | */
|
---|
239 | VMMR3DECL(int) MMR3HyperInitFinalize(PVM pVM)
|
---|
240 | {
|
---|
241 | LogFlow(("MMR3HyperInitFinalize:\n"));
|
---|
242 |
|
---|
243 | /*
|
---|
244 | * Initialize the hyper heap critical section.
|
---|
245 | */
|
---|
246 | int rc = PDMR3CritSectInit(pVM, &pVM->mm.s.pHyperHeapR3->Lock, RT_SRC_POS, "MM-HYPER");
|
---|
247 | AssertRC(rc);
|
---|
248 |
|
---|
249 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
250 | /*
|
---|
251 | * Adjust and create the HMA mapping.
|
---|
252 | */
|
---|
253 | while ((RTINT)pVM->mm.s.offHyperNextStatic + 64*_1K < (RTINT)pVM->mm.s.cbHyperArea - _4M)
|
---|
254 | pVM->mm.s.cbHyperArea -= _4M;
|
---|
255 | rc = PGMR3MapPT(pVM, pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea, 0 /*fFlags*/,
|
---|
256 | mmR3HyperRelocateCallback, NULL, "Hypervisor Memory Area");
|
---|
257 | if (RT_FAILURE(rc))
|
---|
258 | return rc;
|
---|
259 | #endif
|
---|
260 | pVM->mm.s.fPGMInitialized = true;
|
---|
261 |
|
---|
262 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
263 | /*
|
---|
264 | * Do all the delayed mappings.
|
---|
265 | */
|
---|
266 | PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uintptr_t)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
|
---|
267 | for (;;)
|
---|
268 | {
|
---|
269 | RTGCPTR GCPtr = pVM->mm.s.pvHyperAreaGC + pLookup->off;
|
---|
270 | uint32_t cPages = pLookup->cb >> PAGE_SHIFT;
|
---|
271 | switch (pLookup->enmType)
|
---|
272 | {
|
---|
273 | case MMLOOKUPHYPERTYPE_LOCKED:
|
---|
274 | {
|
---|
275 | PCRTHCPHYS paHCPhysPages = pLookup->u.Locked.paHCPhysPages;
|
---|
276 | for (uint32_t i = 0; i < cPages; i++)
|
---|
277 | {
|
---|
278 | rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
|
---|
279 | AssertRCReturn(rc, rc);
|
---|
280 | }
|
---|
281 | break;
|
---|
282 | }
|
---|
283 |
|
---|
284 | case MMLOOKUPHYPERTYPE_HCPHYS:
|
---|
285 | rc = PGMMap(pVM, GCPtr, pLookup->u.HCPhys.HCPhys, pLookup->cb, 0);
|
---|
286 | break;
|
---|
287 |
|
---|
288 | case MMLOOKUPHYPERTYPE_GCPHYS:
|
---|
289 | {
|
---|
290 | const RTGCPHYS GCPhys = pLookup->u.GCPhys.GCPhys;
|
---|
291 | const uint32_t cb = pLookup->cb;
|
---|
292 | for (uint32_t off = 0; off < cb; off += PAGE_SIZE)
|
---|
293 | {
|
---|
294 | RTHCPHYS HCPhys;
|
---|
295 | rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
|
---|
296 | if (RT_FAILURE(rc))
|
---|
297 | break;
|
---|
298 | rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
|
---|
299 | if (RT_FAILURE(rc))
|
---|
300 | break;
|
---|
301 | }
|
---|
302 | break;
|
---|
303 | }
|
---|
304 |
|
---|
305 | case MMLOOKUPHYPERTYPE_MMIO2:
|
---|
306 | {
|
---|
307 | const RTGCPHYS offEnd = pLookup->u.MMIO2.off + pLookup->cb;
|
---|
308 | for (RTGCPHYS offCur = pLookup->u.MMIO2.off; offCur < offEnd; offCur += PAGE_SIZE)
|
---|
309 | {
|
---|
310 | RTHCPHYS HCPhys;
|
---|
311 | rc = PGMR3PhysMMIO2GetHCPhys(pVM, pLookup->u.MMIO2.pDevIns, pLookup->u.MMIO2.iSubDev,
|
---|
312 | pLookup->u.MMIO2.iRegion, offCur, &HCPhys);
|
---|
313 | if (RT_FAILURE(rc))
|
---|
314 | break;
|
---|
315 | rc = PGMMap(pVM, GCPtr + (offCur - pLookup->u.MMIO2.off), HCPhys, PAGE_SIZE, 0);
|
---|
316 | if (RT_FAILURE(rc))
|
---|
317 | break;
|
---|
318 | }
|
---|
319 | break;
|
---|
320 | }
|
---|
321 |
|
---|
322 | case MMLOOKUPHYPERTYPE_DYNAMIC:
|
---|
323 | /* do nothing here since these are either fences or managed by someone else using PGM. */
|
---|
324 | break;
|
---|
325 |
|
---|
326 | default:
|
---|
327 | AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
|
---|
328 | break;
|
---|
329 | }
|
---|
330 |
|
---|
331 | if (RT_FAILURE(rc))
|
---|
332 | {
|
---|
333 | AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n",
|
---|
334 | rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc));
|
---|
335 | return rc;
|
---|
336 | }
|
---|
337 |
|
---|
338 | /* next */
|
---|
339 | if (pLookup->offNext == (int32_t)NIL_OFFSET)
|
---|
340 | break;
|
---|
341 | pLookup = (PMMLOOKUPHYPER)((uintptr_t)pLookup + pLookup->offNext);
|
---|
342 | }
|
---|
343 | #endif /* !PGM_WITHOUT_MAPPINGS */
|
---|
344 |
|
---|
345 | LogFlow(("MMR3HyperInitFinalize: returns VINF_SUCCESS\n"));
|
---|
346 | return VINF_SUCCESS;
|
---|
347 | }
|
---|
348 |
|
---|
349 |
|
---|
350 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
351 | /**
|
---|
352 | * Callback function which will be called when PGM is trying to find a new
|
---|
353 | * location for the mapping.
|
---|
354 | *
|
---|
355 | * The callback is called in two modes, 1) the check mode and 2) the relocate mode.
|
---|
356 | * In 1) the callback should say if it objects to a suggested new location. If it
|
---|
357 | * accepts the new location, it is called again for doing it's relocation.
|
---|
358 | *
|
---|
359 | *
|
---|
360 | * @returns true if the location is ok.
|
---|
361 | * @returns false if another location should be found.
|
---|
362 | * @param pVM The cross context VM structure.
|
---|
363 | * @param GCPtrOld The old virtual address.
|
---|
364 | * @param GCPtrNew The new virtual address.
|
---|
365 | * @param enmMode Used to indicate the callback mode.
|
---|
366 | * @param pvUser User argument. Ignored.
|
---|
367 | * @remark The return value is no a failure indicator, it's an acceptance
|
---|
368 | * indicator. Relocation can not fail!
|
---|
369 | */
|
---|
370 | static DECLCALLBACK(bool) mmR3HyperRelocateCallback(PVM pVM, RTGCPTR GCPtrOld, RTGCPTR GCPtrNew,
|
---|
371 | PGMRELOCATECALL enmMode, void *pvUser)
|
---|
372 | {
|
---|
373 | NOREF(pvUser);
|
---|
374 | switch (enmMode)
|
---|
375 | {
|
---|
376 | /*
|
---|
377 | * Verify location - all locations are good for us.
|
---|
378 | */
|
---|
379 | case PGMRELOCATECALL_SUGGEST:
|
---|
380 | return true;
|
---|
381 |
|
---|
382 | /*
|
---|
383 | * Execute the relocation.
|
---|
384 | */
|
---|
385 | case PGMRELOCATECALL_RELOCATE:
|
---|
386 | {
|
---|
387 | /*
|
---|
388 | * Accepted!
|
---|
389 | */
|
---|
390 | AssertMsg(GCPtrOld == pVM->mm.s.pvHyperAreaGC,
|
---|
391 | ("GCPtrOld=%RGv pVM->mm.s.pvHyperAreaGC=%RGv\n", GCPtrOld, pVM->mm.s.pvHyperAreaGC));
|
---|
392 | Log(("Relocating the hypervisor from %RGv to %RGv\n", GCPtrOld, GCPtrNew));
|
---|
393 |
|
---|
394 | /*
|
---|
395 | * Relocate the VM structure and ourselves.
|
---|
396 | */
|
---|
397 | RTGCINTPTR offDelta = GCPtrNew - GCPtrOld;
|
---|
398 | pVM->pVMRC += offDelta;
|
---|
399 | for (VMCPUID i = 0; i < pVM->cCpus; i++)
|
---|
400 | pVM->aCpus[i].pVMRC = pVM->pVMRC;
|
---|
401 |
|
---|
402 | pVM->mm.s.pvHyperAreaGC += offDelta;
|
---|
403 | Assert(pVM->mm.s.pvHyperAreaGC < _4G);
|
---|
404 | pVM->mm.s.pHyperHeapRC += offDelta;
|
---|
405 | pVM->mm.s.pHyperHeapR3->pbHeapRC += offDelta;
|
---|
406 | pVM->mm.s.pHyperHeapR3->pVMRC = pVM->pVMRC;
|
---|
407 |
|
---|
408 | /*
|
---|
409 | * Relocate the rest.
|
---|
410 | */
|
---|
411 | VMR3Relocate(pVM, offDelta);
|
---|
412 | return true;
|
---|
413 | }
|
---|
414 |
|
---|
415 | default:
|
---|
416 | AssertMsgFailed(("Invalid relocation mode %d\n", enmMode));
|
---|
417 | }
|
---|
418 |
|
---|
419 | return false;
|
---|
420 | }
|
---|
421 | #endif /* !PGM_WITHOUT_MAPPINGS */
|
---|
422 |
|
---|
423 |
|
---|
424 | /**
|
---|
425 | * Service a VMMCALLRING3_MMHYPER_LOCK call.
|
---|
426 | *
|
---|
427 | * @returns VBox status code.
|
---|
428 | * @param pVM The cross context VM structure.
|
---|
429 | */
|
---|
430 | VMMR3DECL(int) MMR3LockCall(PVM pVM)
|
---|
431 | {
|
---|
432 | PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
|
---|
433 |
|
---|
434 | int rc = PDMR3CritSectEnterEx(&pHeap->Lock, true /* fHostCall */);
|
---|
435 | AssertRC(rc);
|
---|
436 | return rc;
|
---|
437 | }
|
---|
438 |
|
---|
439 |
|
---|
440 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
441 |
|
---|
442 | /**
|
---|
443 | * Maps contiguous HC physical memory into the hypervisor region in the GC.
|
---|
444 | *
|
---|
445 | * @return VBox status code.
|
---|
446 | *
|
---|
447 | * @param pVM The cross context VM structure.
|
---|
448 | * @param pvR3 Ring-3 address of the memory. Must be page aligned!
|
---|
449 | * @param pvR0 Optional ring-0 address of the memory.
|
---|
450 | * @param HCPhys Host context physical address of the memory to be
|
---|
451 | * mapped. Must be page aligned!
|
---|
452 | * @param cb Size of the memory. Will be rounded up to nearest page.
|
---|
453 | * @param pszDesc Description.
|
---|
454 | * @param pGCPtr Where to store the GC address.
|
---|
455 | */
|
---|
456 | VMMR3DECL(int) MMR3HyperMapHCPhys(PVM pVM, void *pvR3, RTR0PTR pvR0, RTHCPHYS HCPhys, size_t cb,
|
---|
457 | const char *pszDesc, PRTGCPTR pGCPtr)
|
---|
458 | {
|
---|
459 | LogFlow(("MMR3HyperMapHCPhys: pvR3=%p pvR0=%p HCPhys=%RHp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n",
|
---|
460 | pvR3, pvR0, HCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
|
---|
461 |
|
---|
462 | /*
|
---|
463 | * Validate input.
|
---|
464 | */
|
---|
465 | AssertReturn(RT_ALIGN_P(pvR3, PAGE_SIZE) == pvR3, VERR_INVALID_PARAMETER);
|
---|
466 | AssertReturn(RT_ALIGN_T(pvR0, PAGE_SIZE, RTR0PTR) == pvR0, VERR_INVALID_PARAMETER);
|
---|
467 | AssertReturn(RT_ALIGN_T(HCPhys, PAGE_SIZE, RTHCPHYS) == HCPhys, VERR_INVALID_PARAMETER);
|
---|
468 | AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
|
---|
469 |
|
---|
470 | /*
|
---|
471 | * Add the memory to the hypervisor area.
|
---|
472 | */
|
---|
473 | uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
|
---|
474 | AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
|
---|
475 | RTGCPTR GCPtr;
|
---|
476 | PMMLOOKUPHYPER pLookup;
|
---|
477 | int rc = mmR3HyperMap(pVM, cbAligned, pszDesc, &GCPtr, &pLookup);
|
---|
478 | if (RT_SUCCESS(rc))
|
---|
479 | {
|
---|
480 | pLookup->enmType = MMLOOKUPHYPERTYPE_HCPHYS;
|
---|
481 | pLookup->u.HCPhys.pvR3 = pvR3;
|
---|
482 | pLookup->u.HCPhys.pvR0 = pvR0;
|
---|
483 | pLookup->u.HCPhys.HCPhys = HCPhys;
|
---|
484 |
|
---|
485 | /*
|
---|
486 | * Update the page table.
|
---|
487 | */
|
---|
488 | if (pVM->mm.s.fPGMInitialized)
|
---|
489 | rc = PGMMap(pVM, GCPtr, HCPhys, cbAligned, 0);
|
---|
490 | if (RT_SUCCESS(rc))
|
---|
491 | *pGCPtr = GCPtr;
|
---|
492 | }
|
---|
493 | return rc;
|
---|
494 | }
|
---|
495 |
|
---|
496 |
|
---|
497 | /**
|
---|
498 | * Maps contiguous GC physical memory into the hypervisor region in the GC.
|
---|
499 | *
|
---|
500 | * @return VBox status code.
|
---|
501 | *
|
---|
502 | * @param pVM The cross context VM structure.
|
---|
503 | * @param GCPhys Guest context physical address of the memory to be mapped. Must be page aligned!
|
---|
504 | * @param cb Size of the memory. Will be rounded up to nearest page.
|
---|
505 | * @param pszDesc Mapping description.
|
---|
506 | * @param pGCPtr Where to store the GC address.
|
---|
507 | */
|
---|
508 | VMMR3DECL(int) MMR3HyperMapGCPhys(PVM pVM, RTGCPHYS GCPhys, size_t cb, const char *pszDesc, PRTGCPTR pGCPtr)
|
---|
509 | {
|
---|
510 | LogFlow(("MMR3HyperMapGCPhys: GCPhys=%RGp cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", GCPhys, (int)cb, pszDesc, pszDesc, pGCPtr));
|
---|
511 |
|
---|
512 | /*
|
---|
513 | * Validate input.
|
---|
514 | */
|
---|
515 | AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
|
---|
516 | AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
|
---|
517 |
|
---|
518 | /*
|
---|
519 | * Add the memory to the hypervisor area.
|
---|
520 | */
|
---|
521 | cb = RT_ALIGN_Z(cb, PAGE_SIZE);
|
---|
522 | RTGCPTR GCPtr;
|
---|
523 | PMMLOOKUPHYPER pLookup;
|
---|
524 | int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
|
---|
525 | if (RT_SUCCESS(rc))
|
---|
526 | {
|
---|
527 | pLookup->enmType = MMLOOKUPHYPERTYPE_GCPHYS;
|
---|
528 | pLookup->u.GCPhys.GCPhys = GCPhys;
|
---|
529 |
|
---|
530 | /*
|
---|
531 | * Update the page table.
|
---|
532 | */
|
---|
533 | for (unsigned off = 0; off < cb; off += PAGE_SIZE)
|
---|
534 | {
|
---|
535 | RTHCPHYS HCPhys;
|
---|
536 | rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys + off, &HCPhys);
|
---|
537 | AssertRC(rc);
|
---|
538 | if (RT_FAILURE(rc))
|
---|
539 | {
|
---|
540 | AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
|
---|
541 | break;
|
---|
542 | }
|
---|
543 | if (pVM->mm.s.fPGMInitialized)
|
---|
544 | {
|
---|
545 | rc = PGMMap(pVM, GCPtr + off, HCPhys, PAGE_SIZE, 0);
|
---|
546 | AssertRC(rc);
|
---|
547 | if (RT_FAILURE(rc))
|
---|
548 | {
|
---|
549 | AssertMsgFailed(("rc=%Rrc GCPhys=%RGp off=%#x %s\n", rc, GCPhys, off, pszDesc));
|
---|
550 | break;
|
---|
551 | }
|
---|
552 | }
|
---|
553 | }
|
---|
554 |
|
---|
555 | if (RT_SUCCESS(rc) && pGCPtr)
|
---|
556 | *pGCPtr = GCPtr;
|
---|
557 | }
|
---|
558 | return rc;
|
---|
559 | }
|
---|
560 |
|
---|
561 |
|
---|
562 | /**
|
---|
563 | * Maps a portion of an MMIO2 region into the hypervisor region.
|
---|
564 | *
|
---|
565 | * Callers of this API must never deregister the MMIO2 region before the
|
---|
566 | * VM is powered off. If this becomes a requirement MMR3HyperUnmapMMIO2
|
---|
567 | * API will be needed to perform cleanups.
|
---|
568 | *
|
---|
569 | * @return VBox status code.
|
---|
570 | *
|
---|
571 | * @param pVM The cross context VM structure.
|
---|
572 | * @param pDevIns The device owning the MMIO2 memory.
|
---|
573 | * @param iSubDev The sub-device number.
|
---|
574 | * @param iRegion The region.
|
---|
575 | * @param off The offset into the region. Will be rounded down to closest page boundary.
|
---|
576 | * @param cb The number of bytes to map. Will be rounded up to the closest page boundary.
|
---|
577 | * @param pszDesc Mapping description.
|
---|
578 | * @param pRCPtr Where to store the RC address.
|
---|
579 | */
|
---|
580 | VMMR3DECL(int) MMR3HyperMapMMIO2(PVM pVM, PPDMDEVINS pDevIns, uint32_t iSubDev, uint32_t iRegion, RTGCPHYS off, RTGCPHYS cb,
|
---|
581 | const char *pszDesc, PRTRCPTR pRCPtr)
|
---|
582 | {
|
---|
583 | LogFlow(("MMR3HyperMapMMIO2: pDevIns=%p iSubDev=%#x iRegion=%#x off=%RGp cb=%RGp pszDesc=%p:{%s} pRCPtr=%p\n",
|
---|
584 | pDevIns, iSubDev, iRegion, off, cb, pszDesc, pszDesc, pRCPtr));
|
---|
585 | int rc;
|
---|
586 |
|
---|
587 | /*
|
---|
588 | * Validate input.
|
---|
589 | */
|
---|
590 | AssertReturn(pszDesc && *pszDesc, VERR_INVALID_PARAMETER);
|
---|
591 | AssertReturn(off + cb > off, VERR_INVALID_PARAMETER);
|
---|
592 | uint32_t const offPage = off & PAGE_OFFSET_MASK;
|
---|
593 | off &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
|
---|
594 | cb += offPage;
|
---|
595 | cb = RT_ALIGN_Z(cb, PAGE_SIZE);
|
---|
596 | const RTGCPHYS offEnd = off + cb;
|
---|
597 | AssertReturn(offEnd > off, VERR_INVALID_PARAMETER);
|
---|
598 | for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
|
---|
599 | {
|
---|
600 | RTHCPHYS HCPhys;
|
---|
601 | rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iSubDev, iRegion, offCur, &HCPhys);
|
---|
602 | AssertMsgRCReturn(rc, ("rc=%Rrc - iSubDev=%#x iRegion=%#x off=%RGp\n", rc, iSubDev, iRegion, off), rc);
|
---|
603 | }
|
---|
604 |
|
---|
605 | /*
|
---|
606 | * Add the memory to the hypervisor area.
|
---|
607 | */
|
---|
608 | RTGCPTR GCPtr;
|
---|
609 | PMMLOOKUPHYPER pLookup;
|
---|
610 | rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
|
---|
611 | if (RT_SUCCESS(rc))
|
---|
612 | {
|
---|
613 | pLookup->enmType = MMLOOKUPHYPERTYPE_MMIO2;
|
---|
614 | pLookup->u.MMIO2.pDevIns = pDevIns;
|
---|
615 | pLookup->u.MMIO2.iSubDev = iSubDev;
|
---|
616 | pLookup->u.MMIO2.iRegion = iRegion;
|
---|
617 | pLookup->u.MMIO2.off = off;
|
---|
618 |
|
---|
619 | /*
|
---|
620 | * Update the page table.
|
---|
621 | */
|
---|
622 | if (pVM->mm.s.fPGMInitialized)
|
---|
623 | {
|
---|
624 | for (RTGCPHYS offCur = off; offCur < offEnd; offCur += PAGE_SIZE)
|
---|
625 | {
|
---|
626 | RTHCPHYS HCPhys;
|
---|
627 | rc = PGMR3PhysMMIO2GetHCPhys(pVM, pDevIns, iSubDev, iRegion, offCur, &HCPhys);
|
---|
628 | AssertRCReturn(rc, rc);
|
---|
629 | rc = PGMMap(pVM, GCPtr + (offCur - off), HCPhys, PAGE_SIZE, 0);
|
---|
630 | if (RT_FAILURE(rc))
|
---|
631 | {
|
---|
632 | AssertMsgFailed(("rc=%Rrc offCur=%RGp %s\n", rc, offCur, pszDesc));
|
---|
633 | break;
|
---|
634 | }
|
---|
635 | }
|
---|
636 | }
|
---|
637 |
|
---|
638 | if (RT_SUCCESS(rc))
|
---|
639 | {
|
---|
640 | GCPtr |= offPage;
|
---|
641 | *pRCPtr = GCPtr;
|
---|
642 | AssertLogRelReturn(*pRCPtr == GCPtr, VERR_INTERNAL_ERROR);
|
---|
643 | }
|
---|
644 | }
|
---|
645 | return rc;
|
---|
646 | }
|
---|
647 |
|
---|
648 | #endif /* !PGM_WITHOUT_MAPPINGS */
|
---|
649 |
|
---|
650 | /**
|
---|
651 | * Maps locked R3 virtual memory into the hypervisor region in the GC.
|
---|
652 | *
|
---|
653 | * @return VBox status code.
|
---|
654 | *
|
---|
655 | * @param pVM The cross context VM structure.
|
---|
656 | * @param pvR3 The ring-3 address of the memory, must be page aligned.
|
---|
657 | * @param pvR0 The ring-0 address of the memory, must be page aligned. (optional)
|
---|
658 | * @param cPages The number of pages.
|
---|
659 | * @param paPages The page descriptors.
|
---|
660 | * @param pszDesc Mapping description.
|
---|
661 | * @param pGCPtr Where to store the GC address corresponding to pvR3.
|
---|
662 | */
|
---|
663 | VMMR3DECL(int) MMR3HyperMapPages(PVM pVM, void *pvR3, RTR0PTR pvR0, size_t cPages, PCSUPPAGE paPages,
|
---|
664 | const char *pszDesc, PRTGCPTR pGCPtr)
|
---|
665 | {
|
---|
666 | LogFlow(("MMR3HyperMapPages: pvR3=%p pvR0=%p cPages=%zu paPages=%p pszDesc=%p:{%s} pGCPtr=%p\n",
|
---|
667 | pvR3, pvR0, cPages, paPages, pszDesc, pszDesc, pGCPtr));
|
---|
668 |
|
---|
669 | /*
|
---|
670 | * Validate input.
|
---|
671 | */
|
---|
672 | AssertPtrReturn(pvR3, VERR_INVALID_POINTER);
|
---|
673 | AssertPtrReturn(paPages, VERR_INVALID_POINTER);
|
---|
674 | AssertReturn(cPages > 0, VERR_PAGE_COUNT_OUT_OF_RANGE);
|
---|
675 | AssertReturn(cPages <= VBOX_MAX_ALLOC_PAGE_COUNT, VERR_PAGE_COUNT_OUT_OF_RANGE);
|
---|
676 | AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
|
---|
677 | AssertReturn(*pszDesc, VERR_INVALID_PARAMETER);
|
---|
678 | AssertPtrReturn(pGCPtr, VERR_INVALID_PARAMETER);
|
---|
679 |
|
---|
680 | /*
|
---|
681 | * Add the memory to the hypervisor area.
|
---|
682 | */
|
---|
683 | RTGCPTR GCPtr;
|
---|
684 | PMMLOOKUPHYPER pLookup;
|
---|
685 | int rc = mmR3HyperMap(pVM, cPages << PAGE_SHIFT, pszDesc, &GCPtr, &pLookup);
|
---|
686 | if (RT_SUCCESS(rc))
|
---|
687 | {
|
---|
688 | /*
|
---|
689 | * Copy the physical page addresses and tell PGM about them.
|
---|
690 | */
|
---|
691 | PRTHCPHYS paHCPhysPages = (PRTHCPHYS)MMR3HeapAlloc(pVM, MM_TAG_MM, sizeof(RTHCPHYS) * cPages);
|
---|
692 | if (paHCPhysPages)
|
---|
693 | {
|
---|
694 | for (size_t i = 0; i < cPages; i++)
|
---|
695 | {
|
---|
696 | AssertReleaseMsgReturn( paPages[i].Phys != 0
|
---|
697 | && paPages[i].Phys != NIL_RTHCPHYS
|
---|
698 | && !(paPages[i].Phys & PAGE_OFFSET_MASK),
|
---|
699 | ("i=%#zx Phys=%RHp %s\n", i, paPages[i].Phys, pszDesc),
|
---|
700 | VERR_INTERNAL_ERROR);
|
---|
701 | paHCPhysPages[i] = paPages[i].Phys;
|
---|
702 | }
|
---|
703 |
|
---|
704 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
705 | if (pVM->mm.s.fPGMInitialized)
|
---|
706 | {
|
---|
707 | for (size_t i = 0; i < cPages; i++)
|
---|
708 | {
|
---|
709 | rc = PGMMap(pVM, GCPtr + (i << PAGE_SHIFT), paHCPhysPages[i], PAGE_SIZE, 0);
|
---|
710 | AssertRCBreak(rc);
|
---|
711 | }
|
---|
712 | }
|
---|
713 | #endif
|
---|
714 | if (RT_SUCCESS(rc))
|
---|
715 | {
|
---|
716 | pLookup->enmType = MMLOOKUPHYPERTYPE_LOCKED;
|
---|
717 | pLookup->u.Locked.pvR3 = pvR3;
|
---|
718 | pLookup->u.Locked.pvR0 = pvR0;
|
---|
719 | pLookup->u.Locked.paHCPhysPages = paHCPhysPages;
|
---|
720 |
|
---|
721 | /* done. */
|
---|
722 | *pGCPtr = GCPtr;
|
---|
723 | return rc;
|
---|
724 | }
|
---|
725 | /* Don't care about failure clean, we're screwed if this fails anyway. */
|
---|
726 | }
|
---|
727 | }
|
---|
728 |
|
---|
729 | return rc;
|
---|
730 | }
|
---|
731 |
|
---|
732 |
|
---|
733 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
734 | /**
|
---|
735 | * Reserves a hypervisor memory area.
|
---|
736 | * Most frequent usage is fence pages and dynamically mappings like the guest PD and PDPT.
|
---|
737 | *
|
---|
738 | * @return VBox status code.
|
---|
739 | *
|
---|
740 | * @param pVM The cross context VM structure.
|
---|
741 | * @param cb Size of the memory. Will be rounded up to nearest page.
|
---|
742 | * @param pszDesc Mapping description.
|
---|
743 | * @param pGCPtr Where to store the assigned GC address. Optional.
|
---|
744 | */
|
---|
745 | VMMR3DECL(int) MMR3HyperReserve(PVM pVM, unsigned cb, const char *pszDesc, PRTGCPTR pGCPtr)
|
---|
746 | {
|
---|
747 | LogFlow(("MMR3HyperMapHCRam: cb=%d pszDesc=%p:{%s} pGCPtr=%p\n", (int)cb, pszDesc, pszDesc, pGCPtr));
|
---|
748 |
|
---|
749 | /*
|
---|
750 | * Validate input.
|
---|
751 | */
|
---|
752 | if ( cb <= 0
|
---|
753 | || !pszDesc
|
---|
754 | || !*pszDesc)
|
---|
755 | {
|
---|
756 | AssertMsgFailed(("Invalid parameter\n"));
|
---|
757 | return VERR_INVALID_PARAMETER;
|
---|
758 | }
|
---|
759 |
|
---|
760 | /*
|
---|
761 | * Add the memory to the hypervisor area.
|
---|
762 | */
|
---|
763 | RTGCPTR GCPtr;
|
---|
764 | PMMLOOKUPHYPER pLookup;
|
---|
765 | int rc = mmR3HyperMap(pVM, cb, pszDesc, &GCPtr, &pLookup);
|
---|
766 | if (RT_SUCCESS(rc))
|
---|
767 | {
|
---|
768 | pLookup->enmType = MMLOOKUPHYPERTYPE_DYNAMIC;
|
---|
769 | if (pGCPtr)
|
---|
770 | *pGCPtr = GCPtr;
|
---|
771 | return VINF_SUCCESS;
|
---|
772 | }
|
---|
773 | return rc;
|
---|
774 | }
|
---|
775 | #endif /* !PGM_WITHOUT_MAPPINGS */
|
---|
776 |
|
---|
777 |
|
---|
778 | /**
|
---|
779 | * Reserves an electric fence page.
|
---|
780 | *
|
---|
781 | * @returns VBox status code.
|
---|
782 | * @param pVM The cross context VM structure.
|
---|
783 | */
|
---|
784 | VMMR3DECL(int) MMR3HyperReserveFence(PVM pVM)
|
---|
785 | {
|
---|
786 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
787 | return MMR3HyperReserve(pVM, cb, "fence", NULL);
|
---|
788 | #else
|
---|
789 | RT_NOREF(pVM);
|
---|
790 | return VINF_SUCCESS;
|
---|
791 | #endif
|
---|
792 | }
|
---|
793 |
|
---|
794 |
|
---|
795 | /**
|
---|
796 | * Adds memory to the hypervisor memory arena.
|
---|
797 | *
|
---|
798 | * @return VBox status code.
|
---|
799 | * @param pVM The cross context VM structure.
|
---|
800 | * @param cb Size of the memory. Will be rounded up to nearest page.
|
---|
801 | * @param pszDesc The description of the memory.
|
---|
802 | * @param pGCPtr Where to store the GC address.
|
---|
803 | * @param ppLookup Where to store the pointer to the lookup record.
|
---|
804 | * @remark We assume the threading structure of VBox imposes natural
|
---|
805 | * serialization of most functions, this one included.
|
---|
806 | */
|
---|
807 | static int mmR3HyperMap(PVM pVM, const size_t cb, const char *pszDesc, PRTGCPTR pGCPtr, PMMLOOKUPHYPER *ppLookup)
|
---|
808 | {
|
---|
809 | /*
|
---|
810 | * Validate input.
|
---|
811 | */
|
---|
812 | const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
|
---|
813 | AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
|
---|
814 | if (pVM->mm.s.offHyperNextStatic + cbAligned >= pVM->mm.s.cbHyperArea) /* don't use the last page, it's a fence. */
|
---|
815 | {
|
---|
816 | AssertMsgFailed(("Out of static mapping space in the HMA! offHyperAreaGC=%x cbAligned=%x cbHyperArea=%x\n",
|
---|
817 | pVM->mm.s.offHyperNextStatic, cbAligned, pVM->mm.s.cbHyperArea));
|
---|
818 | return VERR_NO_MEMORY;
|
---|
819 | }
|
---|
820 |
|
---|
821 | /*
|
---|
822 | * Allocate lookup record.
|
---|
823 | */
|
---|
824 | PMMLOOKUPHYPER pLookup;
|
---|
825 | int rc = MMHyperAlloc(pVM, sizeof(*pLookup), 1, MM_TAG_MM, (void **)&pLookup);
|
---|
826 | if (RT_SUCCESS(rc))
|
---|
827 | {
|
---|
828 | /*
|
---|
829 | * Initialize it and insert it.
|
---|
830 | */
|
---|
831 | pLookup->offNext = pVM->mm.s.offLookupHyper;
|
---|
832 | pLookup->cb = cbAligned;
|
---|
833 | pLookup->off = pVM->mm.s.offHyperNextStatic;
|
---|
834 | pVM->mm.s.offLookupHyper = (uint8_t *)pLookup - (uint8_t *)pVM->mm.s.pHyperHeapR3;
|
---|
835 | if (pLookup->offNext != (int32_t)NIL_OFFSET)
|
---|
836 | pLookup->offNext -= pVM->mm.s.offLookupHyper;
|
---|
837 | pLookup->enmType = MMLOOKUPHYPERTYPE_INVALID;
|
---|
838 | memset(&pLookup->u, 0xff, sizeof(pLookup->u));
|
---|
839 | pLookup->pszDesc = pszDesc;
|
---|
840 |
|
---|
841 | /* Mapping. */
|
---|
842 | *pGCPtr = pVM->mm.s.pvHyperAreaGC + pVM->mm.s.offHyperNextStatic;
|
---|
843 | pVM->mm.s.offHyperNextStatic += cbAligned;
|
---|
844 |
|
---|
845 | /* Return pointer. */
|
---|
846 | *ppLookup = pLookup;
|
---|
847 | }
|
---|
848 |
|
---|
849 | AssertRC(rc);
|
---|
850 | LogFlow(("mmR3HyperMap: returns %Rrc *pGCPtr=%RGv\n", rc, *pGCPtr));
|
---|
851 | return rc;
|
---|
852 | }
|
---|
853 |
|
---|
854 |
|
---|
855 | /**
|
---|
856 | * Allocates a new heap.
|
---|
857 | *
|
---|
858 | * @returns VBox status code.
|
---|
859 | * @param pVM The cross context VM structure.
|
---|
860 | * @param cb The size of the new heap.
|
---|
861 | * @param ppHeap Where to store the heap pointer on successful return.
|
---|
862 | * @param pR0PtrHeap Where to store the ring-0 address of the heap on
|
---|
863 | * success.
|
---|
864 | */
|
---|
865 | static int mmR3HyperHeapCreate(PVM pVM, const size_t cb, PMMHYPERHEAP *ppHeap, PRTR0PTR pR0PtrHeap)
|
---|
866 | {
|
---|
867 | /*
|
---|
868 | * Allocate the hypervisor heap.
|
---|
869 | */
|
---|
870 | const uint32_t cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
|
---|
871 | AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
|
---|
872 | uint32_t const cPages = cbAligned >> PAGE_SHIFT;
|
---|
873 | PSUPPAGE paPages = (PSUPPAGE)MMR3HeapAlloc(pVM, MM_TAG_MM, cPages * sizeof(paPages[0]));
|
---|
874 | if (!paPages)
|
---|
875 | return VERR_NO_MEMORY;
|
---|
876 | void *pv;
|
---|
877 | RTR0PTR pvR0 = NIL_RTR0PTR;
|
---|
878 | int rc = SUPR3PageAllocEx(cPages,
|
---|
879 | 0 /*fFlags*/,
|
---|
880 | &pv,
|
---|
881 | &pvR0,
|
---|
882 | paPages);
|
---|
883 | if (RT_SUCCESS(rc))
|
---|
884 | {
|
---|
885 | Assert(pvR0 != NIL_RTR0PTR && !(PAGE_OFFSET_MASK & pvR0));
|
---|
886 | memset(pv, 0, cbAligned);
|
---|
887 |
|
---|
888 | /*
|
---|
889 | * Initialize the heap and first free chunk.
|
---|
890 | */
|
---|
891 | PMMHYPERHEAP pHeap = (PMMHYPERHEAP)pv;
|
---|
892 | pHeap->u32Magic = MMHYPERHEAP_MAGIC;
|
---|
893 | pHeap->pbHeapR3 = (uint8_t *)pHeap + MMYPERHEAP_HDR_SIZE;
|
---|
894 | pHeap->pbHeapR0 = pvR0 + MMYPERHEAP_HDR_SIZE;
|
---|
895 | //pHeap->pbHeapRC = 0; // set by mmR3HyperHeapMap()
|
---|
896 | pHeap->pVMR3 = pVM;
|
---|
897 | #ifdef VBOX_BUGREF_9217
|
---|
898 | pHeap->pVMR0 = pVM->pVMR0ForCall;
|
---|
899 | #else
|
---|
900 | pHeap->pVMR0 = pVM->pVMR0;
|
---|
901 | #endif
|
---|
902 | pHeap->pVMRC = pVM->pVMRC;
|
---|
903 | pHeap->cbHeap = cbAligned - MMYPERHEAP_HDR_SIZE;
|
---|
904 | pHeap->cbFree = pHeap->cbHeap - sizeof(MMHYPERCHUNK);
|
---|
905 | //pHeap->offFreeHead = 0;
|
---|
906 | //pHeap->offFreeTail = 0;
|
---|
907 | pHeap->offPageAligned = pHeap->cbHeap;
|
---|
908 | //pHeap->HyperHeapStatTree = 0;
|
---|
909 | pHeap->paPages = paPages;
|
---|
910 |
|
---|
911 | PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pHeap->pbHeapR3;
|
---|
912 | pFree->cb = pHeap->cbFree;
|
---|
913 | //pFree->core.offNext = 0;
|
---|
914 | MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
|
---|
915 | pFree->core.offHeap = -(int32_t)MMYPERHEAP_HDR_SIZE;
|
---|
916 | //pFree->offNext = 0;
|
---|
917 | //pFree->offPrev = 0;
|
---|
918 |
|
---|
919 | STAMR3Register(pVM, &pHeap->cbHeap, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbHeap", STAMUNIT_BYTES, "The heap size.");
|
---|
920 | STAMR3Register(pVM, &pHeap->cbFree, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, "/MM/HyperHeap/cbFree", STAMUNIT_BYTES, "The free space.");
|
---|
921 |
|
---|
922 | *ppHeap = pHeap;
|
---|
923 | *pR0PtrHeap = pvR0;
|
---|
924 | return VINF_SUCCESS;
|
---|
925 | }
|
---|
926 | AssertMsgFailed(("SUPR3PageAllocEx(%d,,,,) -> %Rrc\n", cbAligned >> PAGE_SHIFT, rc));
|
---|
927 |
|
---|
928 | *ppHeap = NULL;
|
---|
929 | return rc;
|
---|
930 | }
|
---|
931 |
|
---|
932 |
|
---|
933 | /**
|
---|
934 | * Allocates a new heap.
|
---|
935 | */
|
---|
936 | static int mmR3HyperHeapMap(PVM pVM, PMMHYPERHEAP pHeap, PRTGCPTR ppHeapGC)
|
---|
937 | {
|
---|
938 | Assert(RT_ALIGN_Z(pHeap->cbHeap + MMYPERHEAP_HDR_SIZE, PAGE_SIZE) == pHeap->cbHeap + MMYPERHEAP_HDR_SIZE);
|
---|
939 | Assert(pHeap->pbHeapR0);
|
---|
940 | Assert(pHeap->paPages);
|
---|
941 | int rc = MMR3HyperMapPages(pVM,
|
---|
942 | pHeap,
|
---|
943 | pHeap->pbHeapR0 - MMYPERHEAP_HDR_SIZE,
|
---|
944 | (pHeap->cbHeap + MMYPERHEAP_HDR_SIZE) >> PAGE_SHIFT,
|
---|
945 | pHeap->paPages,
|
---|
946 | "Heap", ppHeapGC);
|
---|
947 | if (RT_SUCCESS(rc))
|
---|
948 | {
|
---|
949 | pHeap->pVMRC = pVM->pVMRC;
|
---|
950 | pHeap->pbHeapRC = *ppHeapGC + MMYPERHEAP_HDR_SIZE;
|
---|
951 | /* Reserve a page for fencing. */
|
---|
952 | MMR3HyperReserveFence(pVM);
|
---|
953 |
|
---|
954 | /* We won't need these any more. */
|
---|
955 | MMR3HeapFree(pHeap->paPages);
|
---|
956 | pHeap->paPages = NULL;
|
---|
957 | }
|
---|
958 | return rc;
|
---|
959 | }
|
---|
960 |
|
---|
961 |
|
---|
962 | /**
|
---|
963 | * Allocates memory in the Hypervisor (GC VMM) area which never will
|
---|
964 | * be freed and doesn't have any offset based relation to other heap blocks.
|
---|
965 | *
|
---|
966 | * The latter means that two blocks allocated by this API will not have the
|
---|
967 | * same relative position to each other in GC and HC. In short, never use
|
---|
968 | * this API for allocating nodes for an offset based AVL tree!
|
---|
969 | *
|
---|
970 | * The returned memory is of course zeroed.
|
---|
971 | *
|
---|
972 | * @returns VBox status code.
|
---|
973 | * @param pVM The cross context VM structure.
|
---|
974 | * @param cb Number of bytes to allocate.
|
---|
975 | * @param uAlignment Required memory alignment in bytes.
|
---|
976 | * Values are 0,8,16,32 and PAGE_SIZE.
|
---|
977 | * 0 -> default alignment, i.e. 8 bytes.
|
---|
978 | * @param enmTag The statistics tag.
|
---|
979 | * @param ppv Where to store the address to the allocated
|
---|
980 | * memory.
|
---|
981 | * @remark This is assumed not to be used at times when serialization is required.
|
---|
982 | */
|
---|
983 | VMMR3DECL(int) MMR3HyperAllocOnceNoRel(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
|
---|
984 | {
|
---|
985 | return MMR3HyperAllocOnceNoRelEx(pVM, cb, uAlignment, enmTag, 0/*fFlags*/, ppv);
|
---|
986 | }
|
---|
987 |
|
---|
988 |
|
---|
989 | /**
|
---|
990 | * Allocates memory in the Hypervisor (GC VMM) area which never will
|
---|
991 | * be freed and doesn't have any offset based relation to other heap blocks.
|
---|
992 | *
|
---|
993 | * The latter means that two blocks allocated by this API will not have the
|
---|
994 | * same relative position to each other in GC and HC. In short, never use
|
---|
995 | * this API for allocating nodes for an offset based AVL tree!
|
---|
996 | *
|
---|
997 | * The returned memory is of course zeroed.
|
---|
998 | *
|
---|
999 | * @returns VBox status code.
|
---|
1000 | * @param pVM The cross context VM structure.
|
---|
1001 | * @param cb Number of bytes to allocate.
|
---|
1002 | * @param uAlignment Required memory alignment in bytes.
|
---|
1003 | * Values are 0,8,16,32 and PAGE_SIZE.
|
---|
1004 | * 0 -> default alignment, i.e. 8 bytes.
|
---|
1005 | * @param enmTag The statistics tag.
|
---|
1006 | * @param fFlags Flags, see MMHYPER_AONR_FLAGS_KERNEL_MAPPING.
|
---|
1007 | * @param ppv Where to store the address to the allocated memory.
|
---|
1008 | * @remark This is assumed not to be used at times when serialization is required.
|
---|
1009 | */
|
---|
1010 | VMMR3DECL(int) MMR3HyperAllocOnceNoRelEx(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, uint32_t fFlags, void **ppv)
|
---|
1011 | {
|
---|
1012 | AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
|
---|
1013 | Assert(!(fFlags & ~(MMHYPER_AONR_FLAGS_KERNEL_MAPPING)));
|
---|
1014 |
|
---|
1015 | /*
|
---|
1016 | * Choose between allocating a new chunk of HMA memory
|
---|
1017 | * and the heap. We will only do BIG allocations from HMA and
|
---|
1018 | * only at creation time.
|
---|
1019 | */
|
---|
1020 | if ( ( cb < _64K
|
---|
1021 | && ( uAlignment != PAGE_SIZE
|
---|
1022 | || cb < 48*_1K)
|
---|
1023 | && !(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING)
|
---|
1024 | )
|
---|
1025 | || VMR3GetState(pVM) != VMSTATE_CREATING
|
---|
1026 | )
|
---|
1027 | {
|
---|
1028 | Assert(!(fFlags & MMHYPER_AONR_FLAGS_KERNEL_MAPPING));
|
---|
1029 | int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
|
---|
1030 | if ( rc != VERR_MM_HYPER_NO_MEMORY
|
---|
1031 | || cb <= 8*_1K)
|
---|
1032 | {
|
---|
1033 | Log2(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc and *ppv=%p\n",
|
---|
1034 | cb, uAlignment, rc, *ppv));
|
---|
1035 | return rc;
|
---|
1036 | }
|
---|
1037 | }
|
---|
1038 |
|
---|
1039 | #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
|
---|
1040 | /*
|
---|
1041 | * Set MMHYPER_AONR_FLAGS_KERNEL_MAPPING if we're in going to execute in ring-0.
|
---|
1042 | */
|
---|
1043 | if (VM_IS_HM_OR_NEM_ENABLED(pVM))
|
---|
1044 | fFlags |= MMHYPER_AONR_FLAGS_KERNEL_MAPPING;
|
---|
1045 | #endif
|
---|
1046 |
|
---|
1047 | /*
|
---|
1048 | * Validate alignment.
|
---|
1049 | */
|
---|
1050 | switch (uAlignment)
|
---|
1051 | {
|
---|
1052 | case 0:
|
---|
1053 | case 8:
|
---|
1054 | case 16:
|
---|
1055 | case 32:
|
---|
1056 | case PAGE_SIZE:
|
---|
1057 | break;
|
---|
1058 | default:
|
---|
1059 | AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
|
---|
1060 | return VERR_INVALID_PARAMETER;
|
---|
1061 | }
|
---|
1062 |
|
---|
1063 | /*
|
---|
1064 | * Allocate the pages and map them into HMA space.
|
---|
1065 | */
|
---|
1066 | uint32_t const cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
|
---|
1067 | AssertReturn(cbAligned >= cb, VERR_INVALID_PARAMETER);
|
---|
1068 | uint32_t const cPages = cbAligned >> PAGE_SHIFT;
|
---|
1069 | PSUPPAGE paPages = (PSUPPAGE)RTMemTmpAlloc(cPages * sizeof(paPages[0]));
|
---|
1070 | if (!paPages)
|
---|
1071 | return VERR_NO_TMP_MEMORY;
|
---|
1072 | void *pvPages;
|
---|
1073 | RTR0PTR pvR0 = NIL_RTR0PTR;
|
---|
1074 | int rc = SUPR3PageAllocEx(cPages,
|
---|
1075 | 0 /*fFlags*/,
|
---|
1076 | &pvPages,
|
---|
1077 | &pvR0,
|
---|
1078 | paPages);
|
---|
1079 | if (RT_SUCCESS(rc))
|
---|
1080 | {
|
---|
1081 | Assert(pvR0 != NIL_RTR0PTR);
|
---|
1082 | memset(pvPages, 0, cbAligned);
|
---|
1083 |
|
---|
1084 | RTGCPTR GCPtr;
|
---|
1085 | rc = MMR3HyperMapPages(pVM,
|
---|
1086 | pvPages,
|
---|
1087 | pvR0,
|
---|
1088 | cPages,
|
---|
1089 | paPages,
|
---|
1090 | MMR3HeapAPrintf(pVM, MM_TAG_MM, "alloc once (%s)", mmGetTagName(enmTag)),
|
---|
1091 | &GCPtr);
|
---|
1092 | /* not needed anymore */
|
---|
1093 | RTMemTmpFree(paPages);
|
---|
1094 | if (RT_SUCCESS(rc))
|
---|
1095 | {
|
---|
1096 | *ppv = pvPages;
|
---|
1097 | Log2(("MMR3HyperAllocOnceNoRel: cbAligned=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n",
|
---|
1098 | cbAligned, uAlignment, *ppv));
|
---|
1099 | MMR3HyperReserveFence(pVM);
|
---|
1100 | return rc;
|
---|
1101 | }
|
---|
1102 | AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
|
---|
1103 | SUPR3PageFreeEx(pvPages, cPages);
|
---|
1104 |
|
---|
1105 |
|
---|
1106 | /*
|
---|
1107 | * HACK ALERT! Try allocate it off the heap so that we don't freak
|
---|
1108 | * out during vga/vmmdev mmio2 allocation with certain ram sizes.
|
---|
1109 | */
|
---|
1110 | /** @todo make a proper fix for this so we will never end up in this kind of situation! */
|
---|
1111 | Log(("MMR3HyperAllocOnceNoRel: MMR3HyperMapHCRam failed with rc=%Rrc, try MMHyperAlloc(,%#x,,) instead\n", rc, cb));
|
---|
1112 | int rc2 = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
|
---|
1113 | if (RT_SUCCESS(rc2))
|
---|
1114 | {
|
---|
1115 | Log2(("MMR3HyperAllocOnceNoRel: cb=%#x uAlignment=%#x returns %Rrc and *ppv=%p\n",
|
---|
1116 | cb, uAlignment, rc, *ppv));
|
---|
1117 | return rc;
|
---|
1118 | }
|
---|
1119 | }
|
---|
1120 | else
|
---|
1121 | AssertMsgFailed(("Failed to allocate %zd bytes! %Rrc\n", cbAligned, rc));
|
---|
1122 |
|
---|
1123 | if (rc == VERR_NO_MEMORY)
|
---|
1124 | rc = VERR_MM_HYPER_NO_MEMORY;
|
---|
1125 | LogRel(("MMR3HyperAllocOnceNoRel: cb=%#zx uAlignment=%#x returns %Rrc\n", cb, uAlignment, rc));
|
---|
1126 | return rc;
|
---|
1127 | }
|
---|
1128 |
|
---|
1129 |
|
---|
1130 | /**
|
---|
1131 | * Lookus up a ring-3 pointer to HMA.
|
---|
1132 | *
|
---|
1133 | * @returns The lookup record on success, NULL on failure.
|
---|
1134 | * @param pVM The cross context VM structure.
|
---|
1135 | * @param pvR3 The ring-3 address to look up.
|
---|
1136 | */
|
---|
1137 | DECLINLINE(PMMLOOKUPHYPER) mmR3HyperLookupR3(PVM pVM, void *pvR3)
|
---|
1138 | {
|
---|
1139 | PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
|
---|
1140 | for (;;)
|
---|
1141 | {
|
---|
1142 | switch (pLookup->enmType)
|
---|
1143 | {
|
---|
1144 | case MMLOOKUPHYPERTYPE_LOCKED:
|
---|
1145 | {
|
---|
1146 | unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
|
---|
1147 | if (off < pLookup->cb)
|
---|
1148 | return pLookup;
|
---|
1149 | break;
|
---|
1150 | }
|
---|
1151 |
|
---|
1152 | case MMLOOKUPHYPERTYPE_HCPHYS:
|
---|
1153 | {
|
---|
1154 | unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
|
---|
1155 | if (off < pLookup->cb)
|
---|
1156 | return pLookup;
|
---|
1157 | break;
|
---|
1158 | }
|
---|
1159 |
|
---|
1160 | case MMLOOKUPHYPERTYPE_GCPHYS:
|
---|
1161 | case MMLOOKUPHYPERTYPE_MMIO2:
|
---|
1162 | case MMLOOKUPHYPERTYPE_DYNAMIC:
|
---|
1163 | /** @todo ? */
|
---|
1164 | break;
|
---|
1165 |
|
---|
1166 | default:
|
---|
1167 | AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
|
---|
1168 | return NULL;
|
---|
1169 | }
|
---|
1170 |
|
---|
1171 | /* next */
|
---|
1172 | if ((unsigned)pLookup->offNext == NIL_OFFSET)
|
---|
1173 | return NULL;
|
---|
1174 | pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
|
---|
1175 | }
|
---|
1176 | }
|
---|
1177 |
|
---|
1178 |
|
---|
1179 | /**
|
---|
1180 | * Set / unset guard status on one or more hyper heap pages.
|
---|
1181 | *
|
---|
1182 | * @returns VBox status code (first failure).
|
---|
1183 | * @param pVM The cross context VM structure.
|
---|
1184 | * @param pvStart The hyper heap page address. Must be page
|
---|
1185 | * aligned.
|
---|
1186 | * @param cb The number of bytes. Must be page aligned.
|
---|
1187 | * @param fSet Whether to set or unset guard page status.
|
---|
1188 | */
|
---|
1189 | VMMR3DECL(int) MMR3HyperSetGuard(PVM pVM, void *pvStart, size_t cb, bool fSet)
|
---|
1190 | {
|
---|
1191 | /*
|
---|
1192 | * Validate input.
|
---|
1193 | */
|
---|
1194 | AssertReturn(!((uintptr_t)pvStart & PAGE_OFFSET_MASK), VERR_INVALID_POINTER);
|
---|
1195 | AssertReturn(!(cb & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
|
---|
1196 | AssertReturn(cb <= UINT32_MAX, VERR_INVALID_PARAMETER);
|
---|
1197 | PMMLOOKUPHYPER pLookup = mmR3HyperLookupR3(pVM, pvStart);
|
---|
1198 | AssertReturn(pLookup, VERR_INVALID_PARAMETER);
|
---|
1199 | AssertReturn(pLookup->enmType == MMLOOKUPHYPERTYPE_LOCKED, VERR_INVALID_PARAMETER);
|
---|
1200 |
|
---|
1201 | /*
|
---|
1202 | * Get down to business.
|
---|
1203 | * Note! We quietly ignore errors from the support library since the
|
---|
1204 | * protection stuff isn't possible to implement on all platforms.
|
---|
1205 | */
|
---|
1206 | uint8_t *pbR3 = (uint8_t *)pLookup->u.Locked.pvR3;
|
---|
1207 | RTR0PTR R0Ptr = pLookup->u.Locked.pvR0 != (uintptr_t)pLookup->u.Locked.pvR3
|
---|
1208 | ? pLookup->u.Locked.pvR0
|
---|
1209 | : NIL_RTR0PTR;
|
---|
1210 | uint32_t off = (uint32_t)((uint8_t *)pvStart - pbR3);
|
---|
1211 | int rc;
|
---|
1212 | if (fSet)
|
---|
1213 | {
|
---|
1214 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
1215 | rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, 0);
|
---|
1216 | #else
|
---|
1217 | rc = VINF_SUCCESS;
|
---|
1218 | #endif
|
---|
1219 | SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_NONE);
|
---|
1220 | }
|
---|
1221 | else
|
---|
1222 | {
|
---|
1223 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
1224 | rc = PGMMapSetPage(pVM, MMHyperR3ToRC(pVM, pvStart), cb, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
|
---|
1225 | #else
|
---|
1226 | rc = VINF_SUCCESS;
|
---|
1227 | #endif
|
---|
1228 | SUPR3PageProtect(pbR3, R0Ptr, off, (uint32_t)cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
|
---|
1229 | }
|
---|
1230 | return rc;
|
---|
1231 | }
|
---|
1232 |
|
---|
1233 |
|
---|
1234 | /**
|
---|
1235 | * Convert hypervisor HC virtual address to HC physical address.
|
---|
1236 | *
|
---|
1237 | * @returns HC physical address.
|
---|
1238 | * @param pVM The cross context VM structure.
|
---|
1239 | * @param pvR3 Host context virtual address.
|
---|
1240 | */
|
---|
1241 | VMMR3DECL(RTHCPHYS) MMR3HyperHCVirt2HCPhys(PVM pVM, void *pvR3)
|
---|
1242 | {
|
---|
1243 | PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
|
---|
1244 | for (;;)
|
---|
1245 | {
|
---|
1246 | switch (pLookup->enmType)
|
---|
1247 | {
|
---|
1248 | case MMLOOKUPHYPERTYPE_LOCKED:
|
---|
1249 | {
|
---|
1250 | unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.Locked.pvR3;
|
---|
1251 | if (off < pLookup->cb)
|
---|
1252 | return pLookup->u.Locked.paHCPhysPages[off >> PAGE_SHIFT] | (off & PAGE_OFFSET_MASK);
|
---|
1253 | break;
|
---|
1254 | }
|
---|
1255 |
|
---|
1256 | case MMLOOKUPHYPERTYPE_HCPHYS:
|
---|
1257 | {
|
---|
1258 | unsigned off = (uint8_t *)pvR3 - (uint8_t *)pLookup->u.HCPhys.pvR3;
|
---|
1259 | if (off < pLookup->cb)
|
---|
1260 | return pLookup->u.HCPhys.HCPhys + off;
|
---|
1261 | break;
|
---|
1262 | }
|
---|
1263 |
|
---|
1264 | case MMLOOKUPHYPERTYPE_GCPHYS:
|
---|
1265 | case MMLOOKUPHYPERTYPE_MMIO2:
|
---|
1266 | case MMLOOKUPHYPERTYPE_DYNAMIC:
|
---|
1267 | /* can (or don't want to) convert these kind of records. */
|
---|
1268 | break;
|
---|
1269 |
|
---|
1270 | default:
|
---|
1271 | AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
|
---|
1272 | break;
|
---|
1273 | }
|
---|
1274 |
|
---|
1275 | /* next */
|
---|
1276 | if ((unsigned)pLookup->offNext == NIL_OFFSET)
|
---|
1277 | break;
|
---|
1278 | pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
|
---|
1279 | }
|
---|
1280 |
|
---|
1281 | AssertMsgFailed(("pvR3=%p is not inside the hypervisor memory area!\n", pvR3));
|
---|
1282 | return NIL_RTHCPHYS;
|
---|
1283 | }
|
---|
1284 |
|
---|
1285 | #ifndef PGM_WITHOUT_MAPPINGS
|
---|
1286 |
|
---|
1287 | /**
|
---|
1288 | * Implements the hcphys-not-found return case of MMR3HyperQueryInfoFromHCPhys.
|
---|
1289 | *
|
---|
1290 | * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW.
|
---|
1291 | * @param pVM The cross context VM structure.
|
---|
1292 | * @param HCPhys The host physical address to look for.
|
---|
1293 | * @param pLookup The HMA lookup entry corresponding to HCPhys.
|
---|
1294 | * @param pszWhat Where to return the description.
|
---|
1295 | * @param cbWhat Size of the return buffer.
|
---|
1296 | * @param pcbAlloc Where to return the size of whatever it is.
|
---|
1297 | */
|
---|
1298 | static int mmR3HyperQueryInfoFromHCPhysFound(PVM pVM, RTHCPHYS HCPhys, PMMLOOKUPHYPER pLookup,
|
---|
1299 | char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)
|
---|
1300 | {
|
---|
1301 | NOREF(pVM); NOREF(HCPhys);
|
---|
1302 | *pcbAlloc = pLookup->cb;
|
---|
1303 | int rc = RTStrCopy(pszWhat, cbWhat, pLookup->pszDesc);
|
---|
1304 | return rc == VERR_BUFFER_OVERFLOW ? VINF_BUFFER_OVERFLOW : rc;
|
---|
1305 | }
|
---|
1306 |
|
---|
1307 |
|
---|
1308 | /**
|
---|
1309 | * Scans the HMA for the physical page and reports back a description if found.
|
---|
1310 | *
|
---|
1311 | * @returns VINF_SUCCESS, VINF_BUFFER_OVERFLOW, VERR_NOT_FOUND.
|
---|
1312 | * @param pVM The cross context VM structure.
|
---|
1313 | * @param HCPhys The host physical address to look for.
|
---|
1314 | * @param pszWhat Where to return the description.
|
---|
1315 | * @param cbWhat Size of the return buffer.
|
---|
1316 | * @param pcbAlloc Where to return the size of whatever it is.
|
---|
1317 | */
|
---|
1318 | VMMR3_INT_DECL(int) MMR3HyperQueryInfoFromHCPhys(PVM pVM, RTHCPHYS HCPhys, char *pszWhat, size_t cbWhat, uint32_t *pcbAlloc)
|
---|
1319 | {
|
---|
1320 | RTHCPHYS HCPhysPage = HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK;
|
---|
1321 | PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
|
---|
1322 | for (;;)
|
---|
1323 | {
|
---|
1324 | switch (pLookup->enmType)
|
---|
1325 | {
|
---|
1326 | case MMLOOKUPHYPERTYPE_LOCKED:
|
---|
1327 | {
|
---|
1328 | uint32_t i = pLookup->cb >> PAGE_SHIFT;
|
---|
1329 | while (i-- > 0)
|
---|
1330 | if (pLookup->u.Locked.paHCPhysPages[i] == HCPhysPage)
|
---|
1331 | return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
|
---|
1332 | break;
|
---|
1333 | }
|
---|
1334 |
|
---|
1335 | case MMLOOKUPHYPERTYPE_HCPHYS:
|
---|
1336 | {
|
---|
1337 | if (pLookup->u.HCPhys.HCPhys - HCPhysPage < pLookup->cb)
|
---|
1338 | return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
|
---|
1339 | break;
|
---|
1340 | }
|
---|
1341 |
|
---|
1342 | case MMLOOKUPHYPERTYPE_MMIO2:
|
---|
1343 | case MMLOOKUPHYPERTYPE_GCPHYS:
|
---|
1344 | case MMLOOKUPHYPERTYPE_DYNAMIC:
|
---|
1345 | {
|
---|
1346 | /* brute force. */
|
---|
1347 | uint32_t i = pLookup->cb >> PAGE_SHIFT;
|
---|
1348 | while (i-- > 0)
|
---|
1349 | {
|
---|
1350 | RTGCPTR GCPtr = pLookup->off + pVM->mm.s.pvHyperAreaGC;
|
---|
1351 | RTHCPHYS HCPhysCur;
|
---|
1352 | int rc = PGMMapGetPage(pVM, GCPtr, NULL, &HCPhysCur);
|
---|
1353 | if (RT_SUCCESS(rc) && HCPhysCur == HCPhysPage)
|
---|
1354 | return mmR3HyperQueryInfoFromHCPhysFound(pVM, HCPhys, pLookup, pszWhat, cbWhat, pcbAlloc);
|
---|
1355 | }
|
---|
1356 | break;
|
---|
1357 | }
|
---|
1358 | default:
|
---|
1359 | AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
|
---|
1360 | break;
|
---|
1361 | }
|
---|
1362 |
|
---|
1363 | /* next */
|
---|
1364 | if ((unsigned)pLookup->offNext == NIL_OFFSET)
|
---|
1365 | break;
|
---|
1366 | pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
|
---|
1367 | }
|
---|
1368 | return VERR_NOT_FOUND;
|
---|
1369 | }
|
---|
1370 |
|
---|
1371 |
|
---|
1372 | /**
|
---|
1373 | * Read hypervisor memory from GC virtual address.
|
---|
1374 | *
|
---|
1375 | * @returns VBox status code.
|
---|
1376 | * @param pVM The cross context VM structure.
|
---|
1377 | * @param pvDst Destination address (HC of course).
|
---|
1378 | * @param GCPtr GC virtual address.
|
---|
1379 | * @param cb Number of bytes to read.
|
---|
1380 | *
|
---|
1381 | * @remarks For DBGF only.
|
---|
1382 | */
|
---|
1383 | VMMR3DECL(int) MMR3HyperReadGCVirt(PVM pVM, void *pvDst, RTGCPTR GCPtr, size_t cb)
|
---|
1384 | {
|
---|
1385 | if (GCPtr - pVM->mm.s.pvHyperAreaGC >= pVM->mm.s.cbHyperArea)
|
---|
1386 | return VERR_INVALID_POINTER;
|
---|
1387 | return PGMR3MapRead(pVM, pvDst, GCPtr, cb);
|
---|
1388 | }
|
---|
1389 |
|
---|
1390 | #endif /* !PGM_WITHOUT_MAPPINGS */
|
---|
1391 |
|
---|
1392 | /**
|
---|
1393 | * Info handler for 'hma', it dumps the list of lookup records for the hypervisor memory area.
|
---|
1394 | *
|
---|
1395 | * @param pVM The cross context VM structure.
|
---|
1396 | * @param pHlp Callback functions for doing output.
|
---|
1397 | * @param pszArgs Argument string. Optional and specific to the handler.
|
---|
1398 | */
|
---|
1399 | static DECLCALLBACK(void) mmR3HyperInfoHma(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
|
---|
1400 | {
|
---|
1401 | NOREF(pszArgs);
|
---|
1402 |
|
---|
1403 | pHlp->pfnPrintf(pHlp, "Hypervisor Memory Area (HMA) Layout: Base %RGv, 0x%08x bytes\n",
|
---|
1404 | pVM->mm.s.pvHyperAreaGC, pVM->mm.s.cbHyperArea);
|
---|
1405 |
|
---|
1406 | PMMLOOKUPHYPER pLookup = (PMMLOOKUPHYPER)((uint8_t *)pVM->mm.s.pHyperHeapR3 + pVM->mm.s.offLookupHyper);
|
---|
1407 | for (;;)
|
---|
1408 | {
|
---|
1409 | switch (pLookup->enmType)
|
---|
1410 | {
|
---|
1411 | case MMLOOKUPHYPERTYPE_LOCKED:
|
---|
1412 | pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv LOCKED %-*s %s\n",
|
---|
1413 | pLookup->off + pVM->mm.s.pvHyperAreaGC,
|
---|
1414 | pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
|
---|
1415 | pLookup->u.Locked.pvR3,
|
---|
1416 | pLookup->u.Locked.pvR0,
|
---|
1417 | sizeof(RTHCPTR) * 2, "",
|
---|
1418 | pLookup->pszDesc);
|
---|
1419 | break;
|
---|
1420 |
|
---|
1421 | case MMLOOKUPHYPERTYPE_HCPHYS:
|
---|
1422 | pHlp->pfnPrintf(pHlp, "%RGv-%RGv %RHv %RHv HCPHYS %RHp %s\n",
|
---|
1423 | pLookup->off + pVM->mm.s.pvHyperAreaGC,
|
---|
1424 | pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
|
---|
1425 | pLookup->u.HCPhys.pvR3,
|
---|
1426 | pLookup->u.HCPhys.pvR0,
|
---|
1427 | pLookup->u.HCPhys.HCPhys,
|
---|
1428 | pLookup->pszDesc);
|
---|
1429 | break;
|
---|
1430 |
|
---|
1431 | case MMLOOKUPHYPERTYPE_GCPHYS:
|
---|
1432 | pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s GCPHYS %RGp%*s %s\n",
|
---|
1433 | pLookup->off + pVM->mm.s.pvHyperAreaGC,
|
---|
1434 | pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
|
---|
1435 | sizeof(RTHCPTR) * 2 * 2 + 1, "",
|
---|
1436 | pLookup->u.GCPhys.GCPhys, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
|
---|
1437 | pLookup->pszDesc);
|
---|
1438 | break;
|
---|
1439 |
|
---|
1440 | case MMLOOKUPHYPERTYPE_MMIO2:
|
---|
1441 | pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s MMIO2 %RGp%*s %s\n",
|
---|
1442 | pLookup->off + pVM->mm.s.pvHyperAreaGC,
|
---|
1443 | pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
|
---|
1444 | sizeof(RTHCPTR) * 2 * 2 + 1, "",
|
---|
1445 | pLookup->u.MMIO2.off, RT_ABS((int)(sizeof(RTHCPHYS) - sizeof(RTGCPHYS))) * 2, "",
|
---|
1446 | pLookup->pszDesc);
|
---|
1447 | break;
|
---|
1448 |
|
---|
1449 | case MMLOOKUPHYPERTYPE_DYNAMIC:
|
---|
1450 | pHlp->pfnPrintf(pHlp, "%RGv-%RGv %*s DYNAMIC %*s %s\n",
|
---|
1451 | pLookup->off + pVM->mm.s.pvHyperAreaGC,
|
---|
1452 | pLookup->off + pVM->mm.s.pvHyperAreaGC + pLookup->cb,
|
---|
1453 | sizeof(RTHCPTR) * 2 * 2 + 1, "",
|
---|
1454 | sizeof(RTHCPTR) * 2, "",
|
---|
1455 | pLookup->pszDesc);
|
---|
1456 | break;
|
---|
1457 |
|
---|
1458 | default:
|
---|
1459 | AssertMsgFailed(("enmType=%d\n", pLookup->enmType));
|
---|
1460 | break;
|
---|
1461 | }
|
---|
1462 |
|
---|
1463 | /* next */
|
---|
1464 | if ((unsigned)pLookup->offNext == NIL_OFFSET)
|
---|
1465 | break;
|
---|
1466 | pLookup = (PMMLOOKUPHYPER)((uint8_t *)pLookup + pLookup->offNext);
|
---|
1467 | }
|
---|
1468 | }
|
---|
1469 |
|
---|
1470 |
|
---|
1471 | /**
|
---|
1472 | * Re-allocates memory from the hyper heap.
|
---|
1473 | *
|
---|
1474 | * @returns VBox status code.
|
---|
1475 | * @param pVM The cross context VM structure.
|
---|
1476 | * @param pvOld The existing block of memory in the hyper heap to
|
---|
1477 | * re-allocate (can be NULL).
|
---|
1478 | * @param cbOld Size of the existing block.
|
---|
1479 | * @param uAlignmentNew Required memory alignment in bytes. Values are
|
---|
1480 | * 0,8,16,32 and PAGE_SIZE. 0 -> default alignment,
|
---|
1481 | * i.e. 8 bytes.
|
---|
1482 | * @param enmTagNew The statistics tag.
|
---|
1483 | * @param cbNew The required size of the new block.
|
---|
1484 | * @param ppv Where to store the address to the re-allocated
|
---|
1485 | * block.
|
---|
1486 | *
|
---|
1487 | * @remarks This does not work like normal realloc() on failure, the memory
|
---|
1488 | * pointed to by @a pvOld is lost if there isn't sufficient space on
|
---|
1489 | * the hyper heap for the re-allocation to succeed.
|
---|
1490 | */
|
---|
1491 | VMMR3DECL(int) MMR3HyperRealloc(PVM pVM, void *pvOld, size_t cbOld, unsigned uAlignmentNew, MMTAG enmTagNew, size_t cbNew,
|
---|
1492 | void **ppv)
|
---|
1493 | {
|
---|
1494 | if (!pvOld)
|
---|
1495 | return MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
|
---|
1496 |
|
---|
1497 | if (!cbNew && pvOld)
|
---|
1498 | return MMHyperFree(pVM, pvOld);
|
---|
1499 |
|
---|
1500 | if (cbOld == cbNew)
|
---|
1501 | return VINF_SUCCESS;
|
---|
1502 |
|
---|
1503 | size_t cbData = RT_MIN(cbNew, cbOld);
|
---|
1504 | void *pvTmp = RTMemTmpAlloc(cbData);
|
---|
1505 | if (RT_UNLIKELY(!pvTmp))
|
---|
1506 | {
|
---|
1507 | MMHyperFree(pVM, pvOld);
|
---|
1508 | return VERR_NO_TMP_MEMORY;
|
---|
1509 | }
|
---|
1510 | memcpy(pvTmp, pvOld, cbData);
|
---|
1511 |
|
---|
1512 | int rc = MMHyperFree(pVM, pvOld);
|
---|
1513 | if (RT_SUCCESS(rc))
|
---|
1514 | {
|
---|
1515 | rc = MMHyperAlloc(pVM, cbNew, uAlignmentNew, enmTagNew, ppv);
|
---|
1516 | if (RT_SUCCESS(rc))
|
---|
1517 | {
|
---|
1518 | Assert(cbData <= cbNew);
|
---|
1519 | memcpy(*ppv, pvTmp, cbData);
|
---|
1520 | }
|
---|
1521 | }
|
---|
1522 | else
|
---|
1523 | AssertMsgFailed(("Failed to free hyper heap block pvOld=%p cbOld=%u\n", pvOld, cbOld));
|
---|
1524 |
|
---|
1525 | RTMemTmpFree(pvTmp);
|
---|
1526 | return rc;
|
---|
1527 | }
|
---|
1528 |
|
---|