VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/MMUkHeap.cpp@ 80191

最後變更 在這個檔案從80191是 80191,由 vboxsync 提交於 6 年 前

VMM/r3: Refactored VMCPU enumeration in preparation that aCpus will be replaced with a pointer array. Removed two raw-mode offset members from the CPUM and CPUMCPU sub-structures. bugref:9217 bugref:9517

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 15.7 KB
 
1/* $Id: MMUkHeap.cpp 80191 2019-08-08 00:36:57Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Ring-3 Heap with kernel accessible mapping.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_MM_HEAP
24#include <VBox/vmm/mm.h>
25#include <VBox/vmm/stam.h>
26#include "MMInternal.h"
27#include <VBox/vmm/vm.h>
28#include <VBox/vmm/uvm.h>
29#include <iprt/errcore.h>
30#include <VBox/param.h>
31#include <VBox/log.h>
32
33#include <iprt/assert.h>
34#include <iprt/string.h>
35#include <iprt/heap.h>
36
37
38/*********************************************************************************************************************************
39* Internal Functions *
40*********************************************************************************************************************************/
41static void *mmR3UkHeapAlloc(PMMUKHEAP pHeap, MMTAG enmTag, size_t cb, bool fZero, PRTR0PTR pR0Ptr);
42
43
44
45/**
46 * Create a User-kernel heap.
47 *
48 * This does not require SUPLib to be initialized as we'll lazily allocate the
49 * kernel accessible memory on the first alloc call.
50 *
51 * @returns VBox status code.
52 * @param pUVM Pointer to the user mode VM structure.
53 * @param ppHeap Where to store the heap pointer.
54 */
55int mmR3UkHeapCreateU(PUVM pUVM, PMMUKHEAP *ppHeap)
56{
57 PMMUKHEAP pHeap = (PMMUKHEAP)MMR3HeapAllocZU(pUVM, MM_TAG_MM, sizeof(MMUKHEAP));
58 if (pHeap)
59 {
60 int rc = RTCritSectInit(&pHeap->Lock);
61 if (RT_SUCCESS(rc))
62 {
63 /*
64 * Initialize the global stat record.
65 */
66 pHeap->pUVM = pUVM;
67#ifdef MMUKHEAP_WITH_STATISTICS
68 PMMUKHEAPSTAT pStat = &pHeap->Stat;
69 STAMR3RegisterU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cAllocations", STAMUNIT_CALLS, "Number or MMR3UkHeapAlloc() calls.");
70 STAMR3RegisterU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cReallocations", STAMUNIT_CALLS, "Number of MMR3UkHeapRealloc() calls.");
71 STAMR3RegisterU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cFrees", STAMUNIT_CALLS, "Number of MMR3UkHeapFree() calls.");
72 STAMR3RegisterU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cFailures", STAMUNIT_COUNT, "Number of failures.");
73 STAMR3RegisterU(pUVM, &pStat->cbCurAllocated, sizeof(pStat->cbCurAllocated) == sizeof(uint32_t) ? STAMTYPE_U32 : STAMTYPE_U64,
74 STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cbCurAllocated", STAMUNIT_BYTES, "Number of bytes currently allocated.");
75 STAMR3RegisterU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cbAllocated", STAMUNIT_BYTES, "Total number of bytes allocated.");
76 STAMR3RegisterU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/UkHeap/cbFreed", STAMUNIT_BYTES, "Total number of bytes freed.");
77#endif
78 *ppHeap = pHeap;
79 return VINF_SUCCESS;
80 }
81 AssertRC(rc);
82 MMR3HeapFree(pHeap);
83 }
84 AssertMsgFailed(("failed to allocate heap structure\n"));
85 return VERR_NO_MEMORY;
86}
87
88
89/**
90 * Destroy a User-kernel heap.
91 *
92 * @param pHeap Heap handle.
93 */
94void mmR3UkHeapDestroy(PMMUKHEAP pHeap)
95{
96 /*
97 * Start by deleting the lock, that'll trap anyone
98 * attempting to use the heap.
99 */
100 RTCritSectDelete(&pHeap->Lock);
101
102 /*
103 * Walk the sub-heaps and free them.
104 */
105 while (pHeap->pSubHeapHead)
106 {
107 PMMUKHEAPSUB pSubHeap = pHeap->pSubHeapHead;
108 pHeap->pSubHeapHead = pSubHeap->pNext;
109 SUPR3PageFreeEx(pSubHeap->pv, pSubHeap->cb >> PAGE_SHIFT);
110 //MMR3HeapFree(pSubHeap); - rely on the automatic cleanup.
111 }
112 //MMR3HeapFree(pHeap->stats);
113 //MMR3HeapFree(pHeap);
114}
115
116
117/**
118 * Allocate memory associating it with the VM for collective cleanup.
119 *
120 * The memory will be allocated from the default heap but a header
121 * is added in which we keep track of which VM it belongs to and chain
122 * all the allocations together so they can be freed in one go.
123 *
124 * This interface is typically used for memory block which will not be
125 * freed during the life of the VM.
126 *
127 * @returns Pointer to allocated memory.
128 * @param pVM The cross context VM structure.
129 * @param enmTag Statistics tag. Statistics are collected on a per tag
130 * basis in addition to a global one. Thus we can easily
131 * identify how memory is used by the VM.
132 * @param cbSize Size of the block.
133 * @param pR0Ptr Where to return the ring-0 address of the memory.
134 */
135VMMR3DECL(void *) MMR3UkHeapAlloc(PVM pVM, MMTAG enmTag, size_t cbSize, PRTR0PTR pR0Ptr)
136{
137 return mmR3UkHeapAlloc(pVM->pUVM->mm.s.pUkHeap, enmTag, cbSize, false, pR0Ptr);
138}
139
140
141/**
142 * Same as MMR3UkHeapAlloc().
143 *
144 * @returns Pointer to allocated memory.
145 * @param pVM The cross context VM structure.
146 * @param enmTag Statistics tag. Statistics are collected on a per tag
147 * basis in addition to a global one. Thus we can easily
148 * identify how memory is used by the VM.
149 * @param cbSize Size of the block.
150 * @param ppv Where to store the pointer to the allocated memory on success.
151 * @param pR0Ptr Where to return the ring-0 address of the memory.
152 */
153VMMR3DECL(int) MMR3UkHeapAllocEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv, PRTR0PTR pR0Ptr)
154{
155 void *pv = mmR3UkHeapAlloc(pVM->pUVM->mm.s.pUkHeap, enmTag, cbSize, false, pR0Ptr);
156 if (pv)
157 {
158 *ppv = pv;
159 return VINF_SUCCESS;
160 }
161 return VERR_NO_MEMORY;
162}
163
164
165/**
166 * Same as MMR3UkHeapAlloc() only the memory is zeroed.
167 *
168 * @returns Pointer to allocated memory.
169 * @param pVM The cross context VM structure.
170 * @param enmTag Statistics tag. Statistics are collected on a per tag
171 * basis in addition to a global one. Thus we can easily
172 * identify how memory is used by the VM.
173 * @param cbSize Size of the block.
174 * @param pR0Ptr Where to return the ring-0 address of the memory.
175 */
176VMMR3DECL(void *) MMR3UkHeapAllocZ(PVM pVM, MMTAG enmTag, size_t cbSize, PRTR0PTR pR0Ptr)
177{
178 return mmR3UkHeapAlloc(pVM->pUVM->mm.s.pUkHeap, enmTag, cbSize, true, pR0Ptr);
179}
180
181
182/**
183 * Same as MMR3UkHeapAllocZ().
184 *
185 * @returns Pointer to allocated memory.
186 * @param pVM The cross context VM structure.
187 * @param enmTag Statistics tag. Statistics are collected on a per tag
188 * basis in addition to a global one. Thus we can easily
189 * identify how memory is used by the VM.
190 * @param cbSize Size of the block.
191 * @param ppv Where to store the pointer to the allocated memory on success.
192 * @param pR0Ptr Where to return the ring-0 address of the memory.
193 */
194VMMR3DECL(int) MMR3UkHeapAllocZEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv, PRTR0PTR pR0Ptr)
195{
196 void *pv = mmR3UkHeapAlloc(pVM->pUVM->mm.s.pUkHeap, enmTag, cbSize, true, pR0Ptr);
197 if (pv)
198 {
199 *ppv = pv;
200 return VINF_SUCCESS;
201 }
202 return VERR_NO_MEMORY;
203}
204
205
206/***
207 * Worker for mmR3UkHeapAlloc that creates and adds a new sub-heap.
208 *
209 * @returns Pointer to the new sub-heap.
210 * @param pHeap The heap
211 * @param cbSubHeap The size of the sub-heap.
212 */
213static PMMUKHEAPSUB mmR3UkHeapAddSubHeap(PMMUKHEAP pHeap, size_t cbSubHeap)
214{
215 PMMUKHEAPSUB pSubHeap = (PMMUKHEAPSUB)MMR3HeapAllocU(pHeap->pUVM, MM_TAG_MM/*_UK_HEAP*/, sizeof(*pSubHeap));
216 if (pSubHeap)
217 {
218 pSubHeap->cb = cbSubHeap;
219 int rc = SUPR3PageAllocEx(pSubHeap->cb >> PAGE_SHIFT, 0, &pSubHeap->pv, &pSubHeap->pvR0, NULL);
220 if (RT_SUCCESS(rc))
221 {
222 rc = RTHeapSimpleInit(&pSubHeap->hSimple, pSubHeap->pv, pSubHeap->cb);
223 if (RT_SUCCESS(rc))
224 {
225 pSubHeap->pNext = pHeap->pSubHeapHead;
226 pHeap->pSubHeapHead = pSubHeap;
227 return pSubHeap;
228 }
229
230 /* bail out */
231 SUPR3PageFreeEx(pSubHeap->pv, pSubHeap->cb >> PAGE_SHIFT);
232 }
233 MMR3HeapFree(pSubHeap);
234 }
235 return NULL;
236}
237
238
239/**
240 * Allocate memory from the heap.
241 *
242 * @returns Pointer to allocated memory.
243 * @param pHeap Heap handle.
244 * @param enmTag Statistics tag. Statistics are collected on a per tag
245 * basis in addition to a global one. Thus we can easily
246 * identify how memory is used by the VM.
247 * @param cb Size of the block.
248 * @param fZero Whether or not to zero the memory block.
249 * @param pR0Ptr Where to return the ring-0 pointer.
250 */
251static void *mmR3UkHeapAlloc(PMMUKHEAP pHeap, MMTAG enmTag, size_t cb, bool fZero, PRTR0PTR pR0Ptr)
252{
253 if (pR0Ptr)
254 *pR0Ptr = NIL_RTR0PTR;
255 RTCritSectEnter(&pHeap->Lock);
256
257#ifdef MMUKHEAP_WITH_STATISTICS
258 /*
259 * Find/alloc statistics nodes.
260 */
261 pHeap->Stat.cAllocations++;
262 PMMUKHEAPSTAT pStat = (PMMUKHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag);
263 if (pStat)
264 pStat->cAllocations++;
265 else
266 {
267 pStat = (PMMUKHEAPSTAT)MMR3HeapAllocZU(pHeap->pUVM, MM_TAG_MM, sizeof(MMUKHEAPSTAT));
268 if (!pStat)
269 {
270 pHeap->Stat.cFailures++;
271 AssertMsgFailed(("Failed to allocate heap stat record.\n"));
272 RTCritSectLeave(&pHeap->Lock);
273 return NULL;
274 }
275 pStat->Core.Key = (AVLULKEY)enmTag;
276 RTAvlULInsert(&pHeap->pStatTree, &pStat->Core);
277
278 pStat->cAllocations++;
279
280 /* register the statistics */
281 PUVM pUVM = pHeap->pUVM;
282 const char *pszTag = mmGetTagName(enmTag);
283 STAMR3RegisterFU(pUVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/UkHeap/%s", pszTag);
284 STAMR3RegisterFU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number or MMR3UkHeapAlloc() calls.", "/MM/UkHeap/%s/cAllocations", pszTag);
285 STAMR3RegisterFU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3UkHeapRealloc() calls.", "/MM/UkHeap/%s/cReallocations", pszTag);
286 STAMR3RegisterFU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3UkHeapFree() calls.", "/MM/UkHeap/%s/cFrees", pszTag);
287 STAMR3RegisterFU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/UkHeap/%s/cFailures", pszTag);
288 STAMR3RegisterFU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes allocated.", "/MM/UkHeap/%s/cbAllocated", pszTag);
289 STAMR3RegisterFU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes freed.", "/MM/UkHeap/%s/cbFreed", pszTag);
290 }
291#else
292 RT_NOREF_PV(enmTag);
293#endif
294
295 /*
296 * Validate input.
297 */
298 if (cb == 0)
299 {
300#ifdef MMUKHEAP_WITH_STATISTICS
301 pStat->cFailures++;
302 pHeap->Stat.cFailures++;
303#endif
304 RTCritSectLeave(&pHeap->Lock);
305 return NULL;
306 }
307
308 /*
309 * Allocate heap block.
310 */
311 cb = RT_ALIGN_Z(cb, MMUKHEAP_SIZE_ALIGNMENT);
312 void *pv = NULL;
313 PMMUKHEAPSUB pSubHeapPrev = NULL;
314 PMMUKHEAPSUB pSubHeap = pHeap->pSubHeapHead;
315 while (pSubHeap)
316 {
317 if (fZero)
318 pv = RTHeapSimpleAllocZ(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
319 else
320 pv = RTHeapSimpleAlloc(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
321 if (pv)
322 {
323 /* Move the sub-heap with free memory to the head. */
324 if (pSubHeapPrev)
325 {
326 pSubHeapPrev->pNext = pSubHeap->pNext;
327 pSubHeap->pNext = pHeap->pSubHeapHead;
328 pHeap->pSubHeapHead = pSubHeap;
329 }
330 break;
331 }
332 pSubHeapPrev = pSubHeap;
333 pSubHeap = pSubHeap->pNext;
334 }
335 if (RT_UNLIKELY(!pv))
336 {
337 /*
338 * Add another sub-heap.
339 */
340 pSubHeap = mmR3UkHeapAddSubHeap(pHeap, RT_MAX(RT_ALIGN_Z(cb, PAGE_SIZE) + PAGE_SIZE * 16, _256K));
341 if (pSubHeap)
342 {
343 if (fZero)
344 pv = RTHeapSimpleAllocZ(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
345 else
346 pv = RTHeapSimpleAlloc(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
347 }
348 if (RT_UNLIKELY(!pv))
349 {
350 AssertMsgFailed(("Failed to allocate heap block %d, enmTag=%x(%.4s).\n", cb, enmTag, &enmTag));
351#ifdef MMUKHEAP_WITH_STATISTICS
352 pStat->cFailures++;
353 pHeap->Stat.cFailures++;
354#endif
355 RTCritSectLeave(&pHeap->Lock);
356 return NULL;
357 }
358 }
359
360 /*
361 * Update statistics
362 */
363#ifdef MMUKHEAP_WITH_STATISTICS
364 size_t cbActual = RTHeapSimpleSize(pSubHeap->hSimple, pv);
365 pStat->cbAllocated += cbActual;
366 pStat->cbCurAllocated += cbActual;
367 pHeap->Stat.cbAllocated += cbActual;
368 pHeap->Stat.cbCurAllocated += cbActual;
369#endif
370
371 if (pR0Ptr)
372 *pR0Ptr = (uintptr_t)pv - (uintptr_t)pSubHeap->pv + pSubHeap->pvR0;
373 RTCritSectLeave(&pHeap->Lock);
374 return pv;
375}
376
377
378/**
379 * Releases memory allocated with MMR3UkHeapAlloc() and MMR3UkHeapAllocZ()
380 *
381 * @param pVM The cross context VM structure.
382 * @param pv Pointer to the memory block to free.
383 * @param enmTag The allocation accounting tag.
384 */
385VMMR3DECL(void) MMR3UkHeapFree(PVM pVM, void *pv, MMTAG enmTag)
386{
387 /* Ignore NULL pointers. */
388 if (!pv)
389 return;
390
391 PMMUKHEAP pHeap = pVM->pUVM->mm.s.pUkHeap;
392 RTCritSectEnter(&pHeap->Lock);
393
394 /*
395 * Find the sub-heap and block
396 */
397#ifdef MMUKHEAP_WITH_STATISTICS
398 size_t cbActual = 0;
399#endif
400 PMMUKHEAPSUB pSubHeap = pHeap->pSubHeapHead;
401 while (pSubHeap)
402 {
403 if ((uintptr_t)pv - (uintptr_t)pSubHeap->pv < pSubHeap->cb)
404 {
405#ifdef MMUKHEAP_WITH_STATISTICS
406 cbActual = RTHeapSimpleSize(pSubHeap->hSimple, pv);
407 PMMUKHEAPSTAT pStat = (PMMUKHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag);
408 if (pStat)
409 {
410 pStat->cFrees++;
411 pStat->cbCurAllocated -= cbActual;
412 pStat->cbFreed += cbActual;
413 }
414 pHeap->Stat.cFrees++;
415 pHeap->Stat.cbFreed += cbActual;
416 pHeap->Stat.cbCurAllocated -= cbActual;
417#else
418 RT_NOREF_PV(enmTag);
419#endif
420 RTHeapSimpleFree(pSubHeap->hSimple, pv);
421
422 RTCritSectLeave(&pHeap->Lock);
423 return;
424 }
425 }
426 AssertMsgFailed(("pv=%p\n", pv));
427}
428
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette