VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 98103

最後變更 在這個檔案從98103是 98103,由 vboxsync 提交於 2 年 前

Copyright year updates by scm.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 42.9 KB
 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
32 *
33 * Traditional heap implementation keeping all blocks in a ordered list and
34 * keeping free blocks on additional list via pointers in the user area. This
35 * is similar to @ref grp_rt_heap_simple "RTHeapSimple" and
36 * @ref grp_rt_heap_offset "RTHeapOffset" in IPRT, except that this code handles
37 * mutiple chunks and has a physical address associated with each chunk and
38 * block. The alignment is fixed (VBGL_PH_ALLOC_ALIGN).
39 *
40 * When allocating memory, a free block is found that satisfies the request,
41 * extending the heap with another chunk if needed. The block is split if it's
42 * too large, and the tail end is put on the free list.
43 *
44 * When freeing memory, the block being freed is put back on the free list and
45 * we use the block list to check whether it can be merged with adjacent blocks.
46 *
47 * @note The original code managed the blocks in two separate lists for free and
48 * allocated blocks, which had the disadvantage only allowing merging with
49 * the block after the block being freed. On the plus side, it had the
50 * potential for slightly better locality when examining the free list,
51 * since the next pointer and block size members were closer to one
52 * another.
53 */
54
55
56/*********************************************************************************************************************************
57* Header Files *
58*********************************************************************************************************************************/
59#include "VBoxGuestR0LibInternal.h"
60
61#include <iprt/assert.h>
62#include <iprt/err.h>
63#include <iprt/mem.h>
64#include <iprt/memobj.h>
65#include <iprt/semaphore.h>
66
67
68/*********************************************************************************************************************************
69* Defined Constants And Macros *
70*********************************************************************************************************************************/
71/** Enables heap dumping. */
72#if defined(DOXYGEN_RUNNING) || 0
73# define VBGL_PH_DUMPHEAP
74#endif
75
76#ifdef VBGL_PH_DUMPHEAP
77# define VBGL_PH_DPRINTF(a) RTAssertMsg2Weak a
78#else
79# define VBGL_PH_DPRINTF(a) do { } while (0)
80#endif
81
82/** Heap chunk signature */
83#define VBGL_PH_CHUNKSIGNATURE UINT32_C(0xADDCCCCC)
84/** Heap chunk allocation unit */
85#define VBGL_PH_CHUNKSIZE (0x10000)
86
87/** Heap block signature */
88#define VBGL_PH_BLOCKSIGNATURE UINT32_C(0xADDBBBBB)
89
90/** The allocation block alignment.
91 *
92 * This cannot be larger than VBGLPHYSHEAPBLOCK.
93 */
94#define VBGL_PH_ALLOC_ALIGN (sizeof(void *))
95
96/** Max number of free nodes to search before just using the best fit.
97 *
98 * This is used to limit the free list walking during allocation and just get
99 * on with the job. A low number should reduce the cache trashing at the
100 * possible cost of heap fragmentation.
101 *
102 * Picked 16 after comparing the tstVbglR0PhysHeap-1 results w/ uRandSeed=42 for
103 * different max values.
104 */
105#define VBGL_PH_MAX_FREE_SEARCH 16
106
107/** Threshold to stop the block search if a free block is at least this much too big.
108 *
109 * May cause more fragmation (depending on usage pattern), but should speed up
110 * allocation and hopefully reduce cache trashing.
111 *
112 * Since we merge adjacent free blocks when we can, free blocks should typically
113 * be a lot larger that what's requested. So, it is probably a good idea to
114 * just chop up a large block rather than keep searching for a perfect-ish
115 * match.
116 *
117 * Undefine this to disable this trick.
118 */
119#if defined(DOXYGEN_RUNNING) || 1
120# define VBGL_PH_STOP_SEARCH_AT_EXCESS _4K
121#endif
122
123/** Threshold at which to split out a tail free block when allocating.
124 *
125 * The value gives the amount of user space, i.e. excluding the header.
126 *
127 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
128 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
129 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
130 * some 25 requests out of about 60, which is reasonable.
131 */
132#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
133
134
135/** The smallest amount of user data that can be allocated.
136 *
137 * This is to ensure that the block can be converted into a
138 * VBGLPHYSHEAPFREEBLOCK structure when freed. This must be smaller or equal
139 * to VBGL_PH_MIN_SPLIT_FREE_BLOCK.
140 */
141#define VBGL_PH_SMALLEST_ALLOC_SIZE 16
142
143/** The maximum allocation request size. */
144#define VBGL_PH_LARGEST_ALLOC_SIZE RT_ALIGN_32( _128M \
145 - sizeof(VBGLPHYSHEAPBLOCK) \
146 - sizeof(VBGLPHYSHEAPCHUNK) \
147 - VBGL_PH_ALLOC_ALIGN, \
148 VBGL_PH_ALLOC_ALIGN)
149
150/**
151 * Whether to use the RTR0MemObjAllocCont API or RTMemContAlloc for
152 * allocating chunks.
153 *
154 * This can be enabled on hosts where RTMemContAlloc is more complicated than
155 * RTR0MemObjAllocCont. This can also be done if we wish to save code space, as
156 * the latter is typically always dragged into the link on guests where the
157 * linker cannot eliminiate functions within objects. Only drawback is that
158 * RTR0MemObjAllocCont requires another heap allocation for the handle.
159 */
160#if defined(DOXYGEN_RUNNING) || (!defined(IN_TESTCASE) && 0)
161# define VBGL_PH_USE_MEMOBJ
162#endif
163
164
165/*********************************************************************************************************************************
166* Structures and Typedefs *
167*********************************************************************************************************************************/
168/**
169 * A heap block (within a chunk).
170 *
171 * This is used to track a part of a heap chunk that's either free or
172 * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
173 */
174struct VBGLPHYSHEAPBLOCK
175{
176 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
177 uint32_t u32Signature;
178
179 /** Size of user data in the block. Does not include this block header. */
180 uint32_t cbUser : 31;
181 /** The top bit indicates whether it's allocated or free. */
182 uint32_t fAllocated : 1;
183
184 /** Pointer to the next block on the list. */
185 VBGLPHYSHEAPBLOCK *pNext;
186 /** Pointer to the previous block on the list. */
187 VBGLPHYSHEAPBLOCK *pPrev;
188 /** Pointer back to the chunk. */
189 VBGLPHYSHEAPCHUNK *pChunk;
190};
191
192/**
193 * A free block.
194 */
195struct VBGLPHYSHEAPFREEBLOCK
196{
197 /** Core block data. */
198 VBGLPHYSHEAPBLOCK Core;
199 /** Pointer to the next free list entry. */
200 VBGLPHYSHEAPFREEBLOCK *pNextFree;
201 /** Pointer to the previous free list entry. */
202 VBGLPHYSHEAPFREEBLOCK *pPrevFree;
203};
204AssertCompile(VBGL_PH_SMALLEST_ALLOC_SIZE >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
205AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
206AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= VBGL_PH_SMALLEST_ALLOC_SIZE);
207
208/**
209 * A chunk of memory used by the heap for sub-allocations.
210 *
211 * There is a list of these.
212 */
213struct VBGLPHYSHEAPCHUNK
214{
215 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
216 uint32_t u32Signature;
217
218 /** Size of the chunk. Includes the chunk header. */
219 uint32_t cbChunk;
220
221 /** Physical address of the chunk (contiguous). */
222 uint32_t physAddr;
223
224#if !defined(VBGL_PH_USE_MEMOBJ) || ARCH_BITS != 32
225 uint32_t uPadding1;
226#endif
227
228 /** Number of block of any kind. */
229 int32_t cBlocks;
230 /** Number of free blocks. */
231 int32_t cFreeBlocks;
232
233 /** Pointer to the next chunk. */
234 VBGLPHYSHEAPCHUNK *pNext;
235 /** Pointer to the previous chunk. */
236 VBGLPHYSHEAPCHUNK *pPrev;
237
238#if defined(VBGL_PH_USE_MEMOBJ)
239 /** The allocation handle. */
240 RTR0MEMOBJ hMemObj;
241#endif
242
243#if ARCH_BITS == 64
244 /** Pad the size up to 64 bytes. */
245# ifdef VBGL_PH_USE_MEMOBJ
246 uintptr_t auPadding2[2];
247# else
248 uintptr_t auPadding2[3];
249# endif
250#endif
251};
252#if ARCH_BITS == 64
253AssertCompileSize(VBGLPHYSHEAPCHUNK, 64);
254#endif
255
256
257/**
258 * Debug function that dumps the heap.
259 */
260#ifndef VBGL_PH_DUMPHEAP
261# define dumpheap(pszWhere) do { } while (0)
262#else
263static void dumpheap(const char *pszWhere)
264{
265 VBGL_PH_DPRINTF(("VBGL_PH dump at '%s'\n", pszWhere));
266
267 VBGL_PH_DPRINTF(("Chunks:\n"));
268 for (VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; pChunk; pChunk = pChunk->pNext)
269 VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, cBlocks = %8d, cFreeBlocks=%8d, phys = %08X\n",
270 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbChunk,
271 pChunk->cBlocks, pChunk->cFreeBlocks, pChunk->physAddr));
272
273 VBGL_PH_DPRINTF(("Allocated blocks:\n"));
274 for (VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pBlockHead; pBlock; pBlock = pBlock->pNext)
275 VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, size = %05x, sign = %08X, %s, pChunk = %p\n",
276 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->cbUser,
277 pBlock->u32Signature, pBlock->fAllocated ? "allocated" : " free", pBlock->pChunk));
278
279 VBGL_PH_DPRINTF(("Free blocks:\n"));
280 for (VBGLPHYSHEAPFREEBLOCK *pBlock = g_vbgldata.pFreeHead; pBlock; pBlock = pBlock->pNextFree)
281 VBGL_PH_DPRINTF(("%p: pNextFree = %p, pPrevFree = %p, size = %05x, sign = %08X, pChunk = %p%s\n",
282 pBlock, pBlock->pNextFree, pBlock->pPrevFree, pBlock->Core.cbUser,
283 pBlock->Core.u32Signature, pBlock->Core.pChunk,
284 !pBlock->Core.fAllocated ? "" : " !!allocated-block-on-freelist!!"));
285
286 VBGL_PH_DPRINTF(("VBGL_PH dump at '%s' done\n", pszWhere));
287}
288#endif
289
290
291/**
292 * Initialize a free block
293 */
294static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbUser)
295{
296 Assert(pBlock != NULL);
297 Assert(pChunk != NULL);
298
299 pBlock->Core.u32Signature = VBGL_PH_BLOCKSIGNATURE;
300 pBlock->Core.cbUser = cbUser;
301 pBlock->Core.fAllocated = false;
302 pBlock->Core.pNext = NULL;
303 pBlock->Core.pPrev = NULL;
304 pBlock->Core.pChunk = pChunk;
305 pBlock->pNextFree = NULL;
306 pBlock->pPrevFree = NULL;
307}
308
309
310/**
311 * Updates block statistics when a block is added.
312 */
313DECLINLINE(void) vbglPhysHeapStatsBlockAdded(VBGLPHYSHEAPBLOCK *pBlock)
314{
315 g_vbgldata.cBlocks += 1;
316 pBlock->pChunk->cBlocks += 1;
317 AssertMsg((uint32_t)pBlock->pChunk->cBlocks <= pBlock->pChunk->cbChunk / sizeof(VBGLPHYSHEAPFREEBLOCK),
318 ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
319}
320
321
322/**
323 * Links @a pBlock onto the head of block list.
324 *
325 * This also update the per-chunk block counts.
326 */
327static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pBlock)
328{
329 AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
330 AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
331
332 /* inserting to head of list */
333 VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.pBlockHead;
334
335 pBlock->pNext = pOldHead;
336 pBlock->pPrev = NULL;
337
338 if (pOldHead)
339 pOldHead->pPrev = pBlock;
340 g_vbgldata.pBlockHead = pBlock;
341
342 /* Update the stats: */
343 vbglPhysHeapStatsBlockAdded(pBlock);
344}
345
346
347/**
348 * Links @a pBlock onto the block list after @a pInsertAfter.
349 *
350 * This also update the per-chunk block counts.
351 */
352static void vbglPhysHeapInsertBlockAfter(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPBLOCK *pInsertAfter)
353{
354 AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
355 AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
356
357 pBlock->pNext = pInsertAfter->pNext;
358 pBlock->pPrev = pInsertAfter;
359
360 if (pInsertAfter->pNext)
361 pInsertAfter->pNext->pPrev = pBlock;
362
363 pInsertAfter->pNext = pBlock;
364
365 /* Update the stats: */
366 vbglPhysHeapStatsBlockAdded(pBlock);
367}
368
369
370/**
371 * Unlinks @a pBlock from the block list.
372 *
373 * This also update the per-chunk block counts.
374 */
375static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock)
376{
377 VBGLPHYSHEAPBLOCK *pOtherBlock = pBlock->pNext;
378 if (pOtherBlock)
379 pOtherBlock->pPrev = pBlock->pPrev;
380 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
381
382 pOtherBlock = pBlock->pPrev;
383 if (pOtherBlock)
384 pOtherBlock->pNext = pBlock->pNext;
385 else
386 {
387 Assert(g_vbgldata.pBlockHead == pBlock);
388 g_vbgldata.pBlockHead = pBlock->pNext;
389 }
390
391 pBlock->pNext = NULL;
392 pBlock->pPrev = NULL;
393
394 /* Update the stats: */
395 g_vbgldata.cBlocks -= 1;
396 pBlock->pChunk->cBlocks -= 1;
397 AssertMsg(pBlock->pChunk->cBlocks >= 0,
398 ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
399 Assert(g_vbgldata.cBlocks >= 0);
400}
401
402
403
404/**
405 * Updates statistics after adding a free block.
406 */
407DECLINLINE(void) vbglPhysHeapStatsFreeBlockAdded(VBGLPHYSHEAPFREEBLOCK *pBlock)
408{
409 g_vbgldata.cFreeBlocks += 1;
410 pBlock->Core.pChunk->cFreeBlocks += 1;
411}
412
413
414/**
415 * Links @a pBlock onto head of the free chain.
416 *
417 * This is used during block freeing and when adding a new chunk.
418 *
419 * This also update the per-chunk block counts.
420 */
421static void vbglPhysHeapInsertFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
422{
423 Assert(!pBlock->Core.fAllocated);
424 AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
425 AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
426
427 /* inserting to head of list */
428 VBGLPHYSHEAPFREEBLOCK *pOldHead = g_vbgldata.pFreeHead;
429
430 pBlock->pNextFree = pOldHead;
431 pBlock->pPrevFree = NULL;
432
433 if (pOldHead)
434 pOldHead->pPrevFree = pBlock;
435 g_vbgldata.pFreeHead = pBlock;
436
437 /* Update the stats: */
438 vbglPhysHeapStatsFreeBlockAdded(pBlock);
439}
440
441
442/**
443 * Links @a pBlock after @a pInsertAfter.
444 *
445 * This is used when splitting a free block during allocation to preserve the
446 * place in the free list.
447 *
448 * This also update the per-chunk block counts.
449 */
450static void vbglPhysHeapInsertFreeBlockAfter(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPFREEBLOCK *pInsertAfter)
451{
452 Assert(!pBlock->Core.fAllocated);
453 AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
454 AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
455
456 /* inserting after the tiven node */
457 pBlock->pNextFree = pInsertAfter->pNextFree;
458 pBlock->pPrevFree = pInsertAfter;
459
460 if (pInsertAfter->pNextFree)
461 pInsertAfter->pNextFree->pPrevFree = pBlock;
462
463 pInsertAfter->pNextFree = pBlock;
464
465 /* Update the stats: */
466 vbglPhysHeapStatsFreeBlockAdded(pBlock);
467}
468
469
470/**
471 * Unlinks @a pBlock from the free list.
472 *
473 * This also update the per-chunk block counts.
474 */
475static void vbglPhysHeapUnlinkFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
476{
477 Assert(!pBlock->Core.fAllocated);
478
479 VBGLPHYSHEAPFREEBLOCK *pOtherBlock = pBlock->pNextFree;
480 if (pOtherBlock)
481 pOtherBlock->pPrevFree = pBlock->pPrevFree;
482 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
483
484 pOtherBlock = pBlock->pPrevFree;
485 if (pOtherBlock)
486 pOtherBlock->pNextFree = pBlock->pNextFree;
487 else
488 {
489 Assert(g_vbgldata.pFreeHead == pBlock);
490 g_vbgldata.pFreeHead = pBlock->pNextFree;
491 }
492
493 pBlock->pNextFree = NULL;
494 pBlock->pPrevFree = NULL;
495
496 /* Update the stats: */
497 g_vbgldata.cFreeBlocks -= 1;
498 pBlock->Core.pChunk->cFreeBlocks -= 1;
499 AssertMsg(pBlock->Core.pChunk->cFreeBlocks >= 0,
500 ("pChunk=%p: cbChunk=%#x cFreeBlocks=%d\n",
501 pBlock->Core.pChunk, pBlock->Core.pChunk->cbChunk, pBlock->Core.pChunk->cFreeBlocks));
502 Assert(g_vbgldata.cFreeBlocks >= 0);
503}
504
505
506/**
507 * Allocate another chunk and add it to the heap.
508 *
509 * @returns Pointer to the free block in the new chunk on success, NULL on
510 * allocation failure.
511 * @param cbMinBlock The size of the user block we need this chunk for.
512 */
513static VBGLPHYSHEAPFREEBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
514{
515 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
516 VBGLPHYSHEAPCHUNK *pChunk;
517 uint32_t cbChunk;
518#ifdef VBGL_PH_USE_MEMOBJ
519 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
520 int rc;
521#endif
522
523 VBGL_PH_DPRINTF(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
524 AssertReturn(cbMinBlock <= VBGL_PH_LARGEST_ALLOC_SIZE, NULL); /* paranoia */
525
526 /*
527 * Compute the size of the new chunk, rounding up to next chunk size,
528 * which must be power of 2.
529 *
530 * Note! Using VBGLPHYSHEAPFREEBLOCK here means the minimum block size is
531 * 8 or 16 bytes too high, but safer this way since cbMinBlock is
532 * zero during the init code call.
533 */
534 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
535 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
536 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
537
538 /*
539 * This function allocates physical contiguous memory below 4 GB. This 4GB
540 * limitation stems from using a 32-bit OUT instruction to pass a block
541 * physical address to the host.
542 */
543#ifdef VBGL_PH_USE_MEMOBJ
544 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, false /*fExecutable*/);
545 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
546#else
547 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
548#endif
549 if (!pChunk)
550 {
551 /* If the allocation fail, halv the size till and try again. */
552 uint32_t cbMinChunk = RT_MAX(cbMinBlock, PAGE_SIZE / 2) + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
553 cbMinChunk = RT_ALIGN_32(cbMinChunk, PAGE_SIZE);
554 if (cbChunk > cbMinChunk)
555 do
556 {
557 cbChunk >>= 2;
558 cbChunk = RT_ALIGN_32(cbChunk, PAGE_SIZE);
559#ifdef VBGL_PH_USE_MEMOBJ
560 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, false /*fExecutable*/);
561 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
562#else
563 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
564#endif
565 } while (!pChunk && cbChunk > cbMinChunk);
566 }
567 if (pChunk)
568 {
569 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
570 VBGLPHYSHEAPFREEBLOCK *pBlock;
571 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
572
573 /*
574 * Init the new chunk.
575 */
576 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
577 pChunk->cbChunk = cbChunk;
578 pChunk->physAddr = (uint32_t)PhysAddr;
579 pChunk->cBlocks = 0;
580 pChunk->cFreeBlocks = 0;
581 pChunk->pNext = NULL;
582 pChunk->pPrev = NULL;
583#ifdef VBGL_PH_USE_MEMOBJ
584 pChunk->hMemObj = hMemObj;
585#endif
586
587 /* Initialize the padding too: */
588#if !defined(VBGL_PH_USE_MEMOBJ) || ARCH_BITS != 32
589 pChunk->uPadding1 = UINT32_C(0xADDCAAA1);
590#endif
591#if ARCH_BITS == 64
592 pChunk->auPadding2[0] = UINT64_C(0xADDCAAA3ADDCAAA2);
593 pChunk->auPadding2[1] = UINT64_C(0xADDCAAA5ADDCAAA4);
594# ifndef VBGL_PH_USE_MEMOBJ
595 pChunk->auPadding2[2] = UINT64_C(0xADDCAAA7ADDCAAA6);
596# endif
597#endif
598
599 /*
600 * Initialize the free block, which now occupies entire chunk.
601 */
602 pBlock = (VBGLPHYSHEAPFREEBLOCK *)(pChunk + 1);
603 vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
604 vbglPhysHeapInsertBlock(&pBlock->Core);
605 vbglPhysHeapInsertFreeBlock(pBlock);
606
607 /*
608 * Add the chunk to the list.
609 */
610 pOldHeadChunk = g_vbgldata.pChunkHead;
611 pChunk->pNext = pOldHeadChunk;
612 if (pOldHeadChunk)
613 pOldHeadChunk->pPrev = pChunk;
614 g_vbgldata.pChunkHead = pChunk;
615
616 VBGL_PH_DPRINTF(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->Core.cbUser));
617 return pBlock;
618 }
619 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
620 return NULL;
621}
622
623
624/**
625 * Deletes a chunk: Unlinking all its blocks and freeing its memory.
626 */
627static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
628{
629 uintptr_t uEnd, uCur;
630 Assert(pChunk != NULL);
631 AssertMsg(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
632
633 VBGL_PH_DPRINTF(("Deleting chunk %p size %x\n", pChunk, pChunk->cbChunk));
634
635 /*
636 * First scan the chunk and unlink all blocks from the lists.
637 *
638 * Note! We could do this by finding the first and last block list entries
639 * and just drop the whole chain relating to this chunk, rather than
640 * doing it one by one. But doing it one by one is simpler and will
641 * continue to work if the block list ends in an unsorted state.
642 */
643 uEnd = (uintptr_t)pChunk + pChunk->cbChunk;
644 uCur = (uintptr_t)(pChunk + 1);
645
646 while (uCur < uEnd)
647 {
648 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
649 Assert(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
650 Assert(pBlock->pChunk == pChunk);
651
652 uCur += pBlock->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
653 Assert(uCur == (uintptr_t)pBlock->pNext || uCur >= uEnd);
654
655 if (!pBlock->fAllocated)
656 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pBlock);
657 vbglPhysHeapUnlinkBlock(pBlock);
658 }
659
660 AssertMsg(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbChunk = %08X\n", uCur, uEnd, pChunk->cbChunk));
661
662 /*
663 * Unlink the chunk from the chunk list.
664 */
665 if (pChunk->pNext)
666 pChunk->pNext->pPrev = pChunk->pPrev;
667 /* else: we do not maintain tail pointer. */
668
669 if (pChunk->pPrev)
670 pChunk->pPrev->pNext = pChunk->pNext;
671 else
672 {
673 Assert(g_vbgldata.pChunkHead == pChunk);
674 g_vbgldata.pChunkHead = pChunk->pNext;
675 }
676
677 /*
678 * Finally, free the chunk memory.
679 */
680#ifdef VBGL_PH_USE_MEMOBJ
681 RTR0MemObjFree(pChunk->hMemObj, true /*fFreeMappings*/);
682#else
683 RTMemContFree(pChunk, pChunk->cbChunk);
684#endif
685}
686
687
688DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cb)
689{
690 VBGLPHYSHEAPFREEBLOCK *pBlock;
691 VBGLPHYSHEAPFREEBLOCK *pIter;
692 int32_t cLeft;
693#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
694 uint32_t cbAlwaysSplit;
695#endif
696 int rc;
697
698 /*
699 * Make sure we don't allocate anything too small to turn into a free node
700 * and align the size to prevent pointer misalignment and whatnot.
701 */
702 cb = RT_MAX(cb, VBGL_PH_SMALLEST_ALLOC_SIZE);
703 cb = RT_ALIGN_32(cb, VBGL_PH_ALLOC_ALIGN);
704 AssertCompile(VBGL_PH_ALLOC_ALIGN <= sizeof(pBlock->Core));
705
706 rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
707 AssertRCReturn(rc, NULL);
708
709 dumpheap("pre alloc");
710
711 /*
712 * Search the free list. We do this in linear fashion as we don't expect
713 * there to be many blocks in the heap.
714 */
715#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
716 cbAlwaysSplit = cb + VBGL_PH_STOP_SEARCH_AT_EXCESS;
717#endif
718 cLeft = VBGL_PH_MAX_FREE_SEARCH;
719 pBlock = NULL;
720 if (cb <= PAGE_SIZE / 4 * 3)
721 {
722 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
723 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
724 VBGLPHYSHEAPFREEBLOCK *pFallback = NULL;
725 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
726 {
727 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
728 if (pIter->Core.cbUser >= cb)
729 {
730 if (pIter->Core.cbUser == cb)
731 {
732 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
733 {
734 pBlock = pIter;
735 break;
736 }
737 pFallback = pIter;
738 }
739 else
740 {
741 if (!pFallback || pIter->Core.cbUser < pFallback->Core.cbUser)
742 pFallback = pIter;
743 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
744 {
745 if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
746 pBlock = pIter;
747#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
748 else if (pIter->Core.cbUser >= cbAlwaysSplit)
749 {
750 pBlock = pIter;
751 break;
752 }
753#endif
754 }
755 }
756
757 if (cLeft > 0)
758 { /* likely */ }
759 else
760 break;
761 }
762 }
763
764 if (!pBlock)
765 pBlock = pFallback;
766 }
767 else
768 {
769 /* Large than 3/4 page: Find closest free list match. */
770 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
771 {
772 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
773 if (pIter->Core.cbUser >= cb)
774 {
775 if (pIter->Core.cbUser == cb)
776 {
777 /* Exact match - we're done! */
778 pBlock = pIter;
779 break;
780 }
781
782#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
783 if (pIter->Core.cbUser >= cbAlwaysSplit)
784 {
785 /* Really big block - no point continue searching! */
786 pBlock = pIter;
787 break;
788 }
789#endif
790 /* Looking for a free block with nearest size. */
791 if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
792 pBlock = pIter;
793
794 if (cLeft > 0)
795 { /* likely */ }
796 else
797 break;
798 }
799 }
800 }
801
802 if (!pBlock)
803 {
804 /* No free blocks, allocate a new chunk, the only free block of the
805 chunk will be returned. */
806 pBlock = vbglPhysHeapChunkAlloc(cb);
807 }
808
809 if (pBlock)
810 {
811 /* We have a free block, either found or allocated. */
812 AssertMsg(pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
813 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->Core.u32Signature));
814 AssertMsg(!pBlock->Core.fAllocated, ("pBlock = %p\n", pBlock));
815
816 /*
817 * If the block is too large, split off a free block with the unused space.
818 *
819 * We do this before unlinking the block so we can preserve the location
820 * in the free list.
821 *
822 * Note! We cannot split off and return the tail end here, because that may
823 * violate the same page requirements for requests smaller than 3/4 page.
824 */
825 AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(*pBlock) - sizeof(pBlock->Core));
826 if (pBlock->Core.cbUser >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cb)
827 {
828 pIter = (VBGLPHYSHEAPFREEBLOCK *)((uintptr_t)(&pBlock->Core + 1) + cb);
829 vbglPhysHeapInitFreeBlock(pIter, pBlock->Core.pChunk, pBlock->Core.cbUser - cb - sizeof(VBGLPHYSHEAPBLOCK));
830
831 pBlock->Core.cbUser = cb;
832
833 /* Insert the new 'pIter' block after the 'pBlock' in the block list
834 and in the free list. */
835 vbglPhysHeapInsertBlockAfter(&pIter->Core, &pBlock->Core);
836 vbglPhysHeapInsertFreeBlockAfter(pIter, pBlock);
837 }
838
839 /*
840 * Unlink the block from the free list and mark it as allocated.
841 */
842 vbglPhysHeapUnlinkFreeBlock(pBlock);
843 pBlock->Core.fAllocated = true;
844
845 dumpheap("post alloc");
846
847 /*
848 * Return success.
849 */
850 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
851
852 VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->Core.cbUser));
853 return &pBlock->Core + 1;
854 }
855
856 /*
857 * Return failure.
858 */
859 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
860 AssertRC(rc);
861
862 VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns NULL (requested %#x bytes)\n", cb));
863 return NULL;
864}
865
866
867DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv)
868{
869 /*
870 * Validate the incoming pointer.
871 */
872 if (pv != NULL)
873 {
874 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
875 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
876 && pBlock->fAllocated)
877 {
878 /*
879 * Calculate and return its physical address.
880 */
881 VBGLPHYSHEAPCHUNK *pChunk = pBlock->pChunk;
882 return pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pChunk);
883 }
884
885 AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n",
886 pv, pBlock, pBlock->u32Signature, pBlock->cbUser, pBlock->fAllocated));
887 }
888 else
889 AssertMsgFailed(("Unexpected NULL pointer\n"));
890 return 0;
891}
892
893
894DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
895{
896 if (pv != NULL)
897 {
898 VBGLPHYSHEAPFREEBLOCK *pBlock;
899
900 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
901 AssertRCReturnVoid(rc);
902
903 dumpheap("pre free");
904
905 /*
906 * Validate the block header.
907 */
908 pBlock = (VBGLPHYSHEAPFREEBLOCK *)((VBGLPHYSHEAPBLOCK *)pv - 1);
909 if ( pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE
910 && pBlock->Core.fAllocated
911 && pBlock->Core.cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE)
912 {
913 VBGLPHYSHEAPCHUNK *pChunk;
914 VBGLPHYSHEAPBLOCK *pNeighbour;
915
916 /*
917 * Change the block status to freeed.
918 */
919 VBGL_PH_DPRINTF(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->Core.cbUser));
920
921 pBlock->Core.fAllocated = false;
922 pBlock->pNextFree = pBlock->pPrevFree = NULL;
923 vbglPhysHeapInsertFreeBlock(pBlock);
924
925 dumpheap("post insert");
926
927 /*
928 * Check if the block after this one is also free and we can merge it into this one.
929 */
930 pChunk = pBlock->Core.pChunk;
931
932 pNeighbour = pBlock->Core.pNext;
933 if ( pNeighbour
934 && !pNeighbour->fAllocated
935 && pNeighbour->pChunk == pChunk)
936 {
937 Assert((uintptr_t)pBlock + sizeof(pBlock->Core) + pBlock->Core.cbUser == (uintptr_t)pNeighbour);
938
939 /* Adjust size of current memory block */
940 pBlock->Core.cbUser += pNeighbour->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
941
942 /* Unlink the following node and invalid it. */
943 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pNeighbour);
944 vbglPhysHeapUnlinkBlock(pNeighbour);
945
946 pNeighbour->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
947 pNeighbour->cbUser = UINT32_MAX / 4;
948
949 dumpheap("post merge after");
950 }
951
952 /*
953 * Same check for the block before us. This invalidates pBlock.
954 */
955 pNeighbour = pBlock->Core.pPrev;
956 if ( pNeighbour
957 && !pNeighbour->fAllocated
958 && pNeighbour->pChunk == pChunk)
959 {
960 Assert((uintptr_t)pNeighbour + sizeof(*pNeighbour) + pNeighbour->cbUser == (uintptr_t)pBlock);
961
962 /* Adjust size of the block before us */
963 pNeighbour->cbUser += pBlock->Core.cbUser + sizeof(VBGLPHYSHEAPBLOCK);
964
965 /* Unlink this node and invalid it. */
966 vbglPhysHeapUnlinkFreeBlock(pBlock);
967 vbglPhysHeapUnlinkBlock(&pBlock->Core);
968
969 pBlock->Core.u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
970 pBlock->Core.cbUser = UINT32_MAX / 8;
971
972 pBlock = NULL; /* invalid */
973
974 dumpheap("post merge before");
975 }
976
977 /*
978 * If this chunk is now completely unused, delete it if there are
979 * more completely free ones.
980 */
981 if ( pChunk->cFreeBlocks == pChunk->cBlocks
982 && (pChunk->pPrev || pChunk->pNext))
983 {
984 VBGLPHYSHEAPCHUNK *pCurChunk;
985 uint32_t cUnusedChunks = 0;
986 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
987 {
988 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
989 if (pCurChunk->cFreeBlocks == pCurChunk->cBlocks)
990 {
991 cUnusedChunks++;
992 if (cUnusedChunks > 1)
993 {
994 /* Delete current chunk, it will also unlink all free blocks
995 * remaining in the chunk from the free list, so the pBlock
996 * will also be invalid after this.
997 */
998 vbglPhysHeapChunkDelete(pChunk);
999 pBlock = NULL; /* invalid */
1000 pChunk = NULL;
1001 pNeighbour = NULL;
1002 break;
1003 }
1004 }
1005 }
1006 }
1007
1008 dumpheap("post free");
1009 }
1010 else
1011 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
1012 pBlock, pBlock->Core.u32Signature, pBlock->Core.cbUser, pBlock->Core.fAllocated));
1013
1014 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1015 AssertRC(rc);
1016 }
1017}
1018
1019#ifdef IN_TESTCASE /* For the testcase only */
1020
1021/**
1022 * Returns the sum of all free heap blocks.
1023 *
1024 * This is the amount of memory you can theoretically allocate if you do
1025 * allocations exactly matching the free blocks.
1026 *
1027 * @returns The size of the free blocks.
1028 * @returns 0 if heap was safely detected as being bad.
1029 */
1030DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
1031{
1032 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
1033 AssertRCReturn(rc, 0);
1034
1035 size_t cbTotal = 0;
1036 for (VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead; pCurBlock; pCurBlock = pCurBlock->pNextFree)
1037 {
1038 Assert(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
1039 Assert(!pCurBlock->Core.fAllocated);
1040 cbTotal += pCurBlock->Core.cbUser;
1041 }
1042
1043 RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1044 return cbTotal;
1045}
1046
1047
1048/**
1049 * Checks the heap, caller responsible for locking.
1050 *
1051 * @returns VINF_SUCCESS if okay, error status if not.
1052 * @param pErrInfo Where to return more error details, optional.
1053 */
1054static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
1055{
1056 /*
1057 * Scan the blocks in each chunk, walking the block list in parallel.
1058 */
1059 const VBGLPHYSHEAPBLOCK *pPrevBlockListEntry = NULL;
1060 const VBGLPHYSHEAPBLOCK *pCurBlockListEntry = g_vbgldata.pBlockHead;
1061 unsigned acTotalBlocks[2] = { 0, 0 };
1062 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
1063 {
1064 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
1065 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x", pCurChunk, pCurChunk->u32Signature));
1066 AssertReturn(pCurChunk->pPrev == pPrevChunk,
1067 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1068 "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
1069
1070 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
1071 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbChunk;
1072 unsigned acBlocks[2] = { 0, 0 };
1073 while ((uintptr_t)pCurBlock < uEnd)
1074 {
1075 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
1076 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
1077 "pCurBlock=%p: magic=%#x", pCurBlock, pCurBlock->u32Signature));
1078 AssertReturn(pCurBlock->pChunk == pCurChunk,
1079 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1080 "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
1081 AssertReturn( pCurBlock->cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE
1082 && pCurBlock->cbUser <= VBGL_PH_LARGEST_ALLOC_SIZE
1083 && RT_ALIGN_32(pCurBlock->cbUser, VBGL_PH_ALLOC_ALIGN) == pCurBlock->cbUser,
1084 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
1085 "pCurBlock=%p: cbUser=%#x", pCurBlock, pCurBlock->cbUser));
1086 AssertReturn(pCurBlock == pCurBlockListEntry,
1087 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1088 "pCurChunk=%p: pCurBlock=%p, pCurBlockListEntry=%p\n",
1089 pCurChunk, pCurBlock, pCurBlockListEntry));
1090 AssertReturn(pCurBlock->pPrev == pPrevBlockListEntry,
1091 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
1092 "pCurChunk=%p: pCurBlock->pPrev=%p, pPrevBlockListEntry=%p\n",
1093 pCurChunk, pCurBlock->pPrev, pPrevBlockListEntry));
1094
1095 acBlocks[pCurBlock->fAllocated] += 1;
1096
1097 /* advance */
1098 pPrevBlockListEntry = pCurBlock;
1099 pCurBlockListEntry = pCurBlock->pNext;
1100 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbUser);
1101 }
1102 AssertReturn((uintptr_t)pCurBlock == uEnd,
1103 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1104 "pCurBlock=%p uEnd=%p", pCurBlock, uEnd));
1105
1106 acTotalBlocks[1] += acBlocks[1];
1107 AssertReturn(acBlocks[0] + acBlocks[1] == (uint32_t)pCurChunk->cBlocks,
1108 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1109 "pCurChunk=%p: cBlocks=%u, expected %u",
1110 pCurChunk, pCurChunk->cBlocks, acBlocks[0] + acBlocks[1]));
1111
1112 acTotalBlocks[0] += acBlocks[0];
1113 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->cFreeBlocks,
1114 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
1115 "pCurChunk=%p: cFreeBlocks=%u, expected %u",
1116 pCurChunk, pCurChunk->cFreeBlocks, acBlocks[0]));
1117
1118 pPrevChunk = pCurChunk;
1119 }
1120
1121 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.cFreeBlocks,
1122 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
1123 "g_vbgldata.cFreeBlocks=%u, expected %u", g_vbgldata.cFreeBlocks, acTotalBlocks[0]));
1124 AssertReturn(acTotalBlocks[0] + acTotalBlocks[1] == (uint32_t)g_vbgldata.cBlocks,
1125 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
1126 "g_vbgldata.cBlocks=%u, expected %u", g_vbgldata.cBlocks, acTotalBlocks[0] + acTotalBlocks[1]));
1127
1128 /*
1129 * Check that the free list contains the same number of blocks as we
1130 * encountered during the above scan.
1131 */
1132 {
1133 unsigned cFreeListBlocks = 0;
1134 for (const VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead, *pPrevBlock = NULL;
1135 pCurBlock;
1136 pCurBlock = pCurBlock->pNextFree)
1137 {
1138 AssertReturn(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
1139 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
1140 "pCurBlock=%p/free: magic=%#x", pCurBlock, pCurBlock->Core.u32Signature));
1141 AssertReturn(pCurBlock->pPrevFree == pPrevBlock,
1142 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1143 "pCurBlock=%p/free: pPrev=%p, expected %p", pCurBlock, pCurBlock->pPrevFree, pPrevBlock));
1144 AssertReturn(pCurBlock->Core.pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
1145 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/free: chunk (%p) magic=%#x",
1146 pCurBlock, pCurBlock->Core.pChunk, pCurBlock->Core.pChunk->u32Signature));
1147 cFreeListBlocks++;
1148 pPrevBlock = pCurBlock;
1149 }
1150
1151 AssertReturn(cFreeListBlocks == acTotalBlocks[0],
1152 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
1153 "Found %u in free list, expected %u", cFreeListBlocks, acTotalBlocks[0]));
1154 }
1155 return VINF_SUCCESS;
1156}
1157
1158
1159/**
1160 * Performs a heap check.
1161 *
1162 * @returns Problem description on failure, NULL on success.
1163 * @param pErrInfo Where to return more error details, optional.
1164 */
1165DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
1166{
1167 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
1168 AssertRCReturn(rc, 0);
1169
1170 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
1171
1172 RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1173 return rc;
1174}
1175
1176#endif /* IN_TESTCASE */
1177
1178DECLR0VBGL(int) VbglR0PhysHeapInit(void)
1179{
1180 g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
1181
1182 /* Allocate the first chunk of the heap. */
1183 VBGLPHYSHEAPFREEBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
1184 if (pBlock)
1185 return RTSemFastMutexCreate(&g_vbgldata.hMtxHeap);
1186 return VERR_NO_CONT_MEMORY;
1187}
1188
1189DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
1190{
1191 while (g_vbgldata.pChunkHead)
1192 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
1193
1194 RTSemFastMutexDestroy(g_vbgldata.hMtxHeap);
1195 g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
1196}
1197
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette