VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 100360

最後變更 在這個檔案從100360是 100360,由 vboxsync 提交於 21 月 前

Additions/VBoxGuest/VBoxGuestR0LibPhysHeap: Make use of the RTR0MemObjContAlloc() API and support allocating memory above 4GiB, bugref:10457

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 43.0 KB
 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 100360 2023-07-04 07:09:24Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
32 *
33 * Traditional heap implementation keeping all blocks in a ordered list and
34 * keeping free blocks on additional list via pointers in the user area. This
35 * is similar to @ref grp_rt_heap_simple "RTHeapSimple" and
36 * @ref grp_rt_heap_offset "RTHeapOffset" in IPRT, except that this code handles
37 * mutiple chunks and has a physical address associated with each chunk and
38 * block. The alignment is fixed (VBGL_PH_ALLOC_ALIGN).
39 *
40 * When allocating memory, a free block is found that satisfies the request,
41 * extending the heap with another chunk if needed. The block is split if it's
42 * too large, and the tail end is put on the free list.
43 *
44 * When freeing memory, the block being freed is put back on the free list and
45 * we use the block list to check whether it can be merged with adjacent blocks.
46 *
47 * @note The original code managed the blocks in two separate lists for free and
48 * allocated blocks, which had the disadvantage only allowing merging with
49 * the block after the block being freed. On the plus side, it had the
50 * potential for slightly better locality when examining the free list,
51 * since the next pointer and block size members were closer to one
52 * another.
53 */
54
55
56/*********************************************************************************************************************************
57* Header Files *
58*********************************************************************************************************************************/
59#include "VBoxGuestR0LibInternal.h"
60
61#include <iprt/assert.h>
62#include <iprt/err.h>
63#include <iprt/mem.h>
64#include <iprt/memobj.h>
65#include <iprt/semaphore.h>
66
67
68/*********************************************************************************************************************************
69* Defined Constants And Macros *
70*********************************************************************************************************************************/
71/** Enables heap dumping. */
72#if defined(DOXYGEN_RUNNING) || 0
73# define VBGL_PH_DUMPHEAP
74#endif
75
76#ifdef VBGL_PH_DUMPHEAP
77# define VBGL_PH_DPRINTF(a) RTAssertMsg2Weak a
78#else
79# define VBGL_PH_DPRINTF(a) do { } while (0)
80#endif
81
82/** Heap chunk signature */
83#define VBGL_PH_CHUNKSIGNATURE UINT32_C(0xADDCCCCC)
84/** Heap chunk allocation unit */
85#define VBGL_PH_CHUNKSIZE (0x10000)
86
87/** Heap block signature */
88#define VBGL_PH_BLOCKSIGNATURE UINT32_C(0xADDBBBBB)
89
90/** The allocation block alignment.
91 *
92 * This cannot be larger than VBGLPHYSHEAPBLOCK.
93 */
94#define VBGL_PH_ALLOC_ALIGN (sizeof(void *))
95
96/** Max number of free nodes to search before just using the best fit.
97 *
98 * This is used to limit the free list walking during allocation and just get
99 * on with the job. A low number should reduce the cache trashing at the
100 * possible cost of heap fragmentation.
101 *
102 * Picked 16 after comparing the tstVbglR0PhysHeap-1 results w/ uRandSeed=42 for
103 * different max values.
104 */
105#define VBGL_PH_MAX_FREE_SEARCH 16
106
107/** Threshold to stop the block search if a free block is at least this much too big.
108 *
109 * May cause more fragmation (depending on usage pattern), but should speed up
110 * allocation and hopefully reduce cache trashing.
111 *
112 * Since we merge adjacent free blocks when we can, free blocks should typically
113 * be a lot larger that what's requested. So, it is probably a good idea to
114 * just chop up a large block rather than keep searching for a perfect-ish
115 * match.
116 *
117 * Undefine this to disable this trick.
118 */
119#if defined(DOXYGEN_RUNNING) || 1
120# define VBGL_PH_STOP_SEARCH_AT_EXCESS _4K
121#endif
122
123/** Threshold at which to split out a tail free block when allocating.
124 *
125 * The value gives the amount of user space, i.e. excluding the header.
126 *
127 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
128 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
129 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
130 * some 25 requests out of about 60, which is reasonable.
131 */
132#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
133
134
135/** The smallest amount of user data that can be allocated.
136 *
137 * This is to ensure that the block can be converted into a
138 * VBGLPHYSHEAPFREEBLOCK structure when freed. This must be smaller or equal
139 * to VBGL_PH_MIN_SPLIT_FREE_BLOCK.
140 */
141#define VBGL_PH_SMALLEST_ALLOC_SIZE 16
142
143/** The maximum allocation request size. */
144#define VBGL_PH_LARGEST_ALLOC_SIZE RT_ALIGN_32( _128M \
145 - sizeof(VBGLPHYSHEAPBLOCK) \
146 - sizeof(VBGLPHYSHEAPCHUNK) \
147 - VBGL_PH_ALLOC_ALIGN, \
148 VBGL_PH_ALLOC_ALIGN)
149
150/**
151 * Whether to use the RTR0MemObjAllocCont API or RTMemContAlloc for
152 * allocating chunks.
153 *
154 * This can be enabled on hosts where RTMemContAlloc is more complicated than
155 * RTR0MemObjAllocCont. This can also be done if we wish to save code space, as
156 * the latter is typically always dragged into the link on guests where the
157 * linker cannot eliminiate functions within objects. Only drawback is that
158 * RTR0MemObjAllocCont requires another heap allocation for the handle.
159 *
160 * Update: Just enable it everywhere so we can more easily use memory above 4G.
161 */
162#define VBGL_PH_USE_MEMOBJ
163
164
165/*********************************************************************************************************************************
166* Structures and Typedefs *
167*********************************************************************************************************************************/
168/**
169 * A heap block (within a chunk).
170 *
171 * This is used to track a part of a heap chunk that's either free or
172 * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
173 */
174struct VBGLPHYSHEAPBLOCK
175{
176 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
177 uint32_t u32Signature;
178
179 /** Size of user data in the block. Does not include this block header. */
180 uint32_t cbUser : 31;
181 /** The top bit indicates whether it's allocated or free. */
182 uint32_t fAllocated : 1;
183
184 /** Pointer to the next block on the list. */
185 VBGLPHYSHEAPBLOCK *pNext;
186 /** Pointer to the previous block on the list. */
187 VBGLPHYSHEAPBLOCK *pPrev;
188 /** Pointer back to the chunk. */
189 VBGLPHYSHEAPCHUNK *pChunk;
190};
191
192/**
193 * A free block.
194 */
195struct VBGLPHYSHEAPFREEBLOCK
196{
197 /** Core block data. */
198 VBGLPHYSHEAPBLOCK Core;
199 /** Pointer to the next free list entry. */
200 VBGLPHYSHEAPFREEBLOCK *pNextFree;
201 /** Pointer to the previous free list entry. */
202 VBGLPHYSHEAPFREEBLOCK *pPrevFree;
203};
204AssertCompile(VBGL_PH_SMALLEST_ALLOC_SIZE >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
205AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
206AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= VBGL_PH_SMALLEST_ALLOC_SIZE);
207
208/**
209 * A chunk of memory used by the heap for sub-allocations.
210 *
211 * There is a list of these.
212 */
213struct VBGLPHYSHEAPCHUNK
214{
215 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
216 uint32_t u32Signature;
217 /** Size of the chunk. Includes the chunk header. */
218 uint32_t cbChunk;
219 /** Number of block of any kind. */
220 int32_t cBlocks;
221 /** Number of free blocks. */
222 int32_t cFreeBlocks;
223
224 /** Physical address of the chunk (contiguous). */
225 RTCCPHYS physAddr;
226
227 /** Pointer to the next chunk. */
228 VBGLPHYSHEAPCHUNK *pNext;
229 /** Pointer to the previous chunk. */
230 VBGLPHYSHEAPCHUNK *pPrev;
231
232#if defined(VBGL_PH_USE_MEMOBJ)
233 /** The allocation handle. */
234 RTR0MEMOBJ hMemObj;
235#endif
236#if ARCH_BITS == 64
237 /** Pad the size up to 64 bytes. */
238# ifdef VBGL_PH_USE_MEMOBJ
239 uintptr_t auPadding2[2];
240# else
241 uintptr_t auPadding2[3];
242# endif
243#endif
244};
245#if ARCH_BITS == 64
246AssertCompileSize(VBGLPHYSHEAPCHUNK, 64);
247#elif ARCH_BITS == 32
248AssertCompileSize(VBGLPHYSHEAPCHUNK, 32);
249#else
250# error "Unknown architecture!"
251#endif
252
253
254/**
255 * Debug function that dumps the heap.
256 */
257#ifndef VBGL_PH_DUMPHEAP
258# define dumpheap(pszWhere) do { } while (0)
259#else
260static void dumpheap(const char *pszWhere)
261{
262 VBGL_PH_DPRINTF(("VBGL_PH dump at '%s'\n", pszWhere));
263
264 VBGL_PH_DPRINTF(("Chunks:\n"));
265 for (VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; pChunk; pChunk = pChunk->pNext)
266 VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, cBlocks = %8d, cFreeBlocks=%8d, phys = %08X\n",
267 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbChunk,
268 pChunk->cBlocks, pChunk->cFreeBlocks, pChunk->physAddr));
269
270 VBGL_PH_DPRINTF(("Allocated blocks:\n"));
271 for (VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pBlockHead; pBlock; pBlock = pBlock->pNext)
272 VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, size = %05x, sign = %08X, %s, pChunk = %p\n",
273 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->cbUser,
274 pBlock->u32Signature, pBlock->fAllocated ? "allocated" : " free", pBlock->pChunk));
275
276 VBGL_PH_DPRINTF(("Free blocks:\n"));
277 for (VBGLPHYSHEAPFREEBLOCK *pBlock = g_vbgldata.pFreeHead; pBlock; pBlock = pBlock->pNextFree)
278 VBGL_PH_DPRINTF(("%p: pNextFree = %p, pPrevFree = %p, size = %05x, sign = %08X, pChunk = %p%s\n",
279 pBlock, pBlock->pNextFree, pBlock->pPrevFree, pBlock->Core.cbUser,
280 pBlock->Core.u32Signature, pBlock->Core.pChunk,
281 !pBlock->Core.fAllocated ? "" : " !!allocated-block-on-freelist!!"));
282
283 VBGL_PH_DPRINTF(("VBGL_PH dump at '%s' done\n", pszWhere));
284}
285#endif
286
287
288/**
289 * Initialize a free block
290 */
291static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbUser)
292{
293 Assert(pBlock != NULL);
294 Assert(pChunk != NULL);
295
296 pBlock->Core.u32Signature = VBGL_PH_BLOCKSIGNATURE;
297 pBlock->Core.cbUser = cbUser;
298 pBlock->Core.fAllocated = false;
299 pBlock->Core.pNext = NULL;
300 pBlock->Core.pPrev = NULL;
301 pBlock->Core.pChunk = pChunk;
302 pBlock->pNextFree = NULL;
303 pBlock->pPrevFree = NULL;
304}
305
306
307/**
308 * Updates block statistics when a block is added.
309 */
310DECLINLINE(void) vbglPhysHeapStatsBlockAdded(VBGLPHYSHEAPBLOCK *pBlock)
311{
312 g_vbgldata.cBlocks += 1;
313 pBlock->pChunk->cBlocks += 1;
314 AssertMsg((uint32_t)pBlock->pChunk->cBlocks <= pBlock->pChunk->cbChunk / sizeof(VBGLPHYSHEAPFREEBLOCK),
315 ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
316}
317
318
319/**
320 * Links @a pBlock onto the head of block list.
321 *
322 * This also update the per-chunk block counts.
323 */
324static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pBlock)
325{
326 AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
327 AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
328
329 /* inserting to head of list */
330 VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.pBlockHead;
331
332 pBlock->pNext = pOldHead;
333 pBlock->pPrev = NULL;
334
335 if (pOldHead)
336 pOldHead->pPrev = pBlock;
337 g_vbgldata.pBlockHead = pBlock;
338
339 /* Update the stats: */
340 vbglPhysHeapStatsBlockAdded(pBlock);
341}
342
343
344/**
345 * Links @a pBlock onto the block list after @a pInsertAfter.
346 *
347 * This also update the per-chunk block counts.
348 */
349static void vbglPhysHeapInsertBlockAfter(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPBLOCK *pInsertAfter)
350{
351 AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
352 AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
353
354 pBlock->pNext = pInsertAfter->pNext;
355 pBlock->pPrev = pInsertAfter;
356
357 if (pInsertAfter->pNext)
358 pInsertAfter->pNext->pPrev = pBlock;
359
360 pInsertAfter->pNext = pBlock;
361
362 /* Update the stats: */
363 vbglPhysHeapStatsBlockAdded(pBlock);
364}
365
366
367/**
368 * Unlinks @a pBlock from the block list.
369 *
370 * This also update the per-chunk block counts.
371 */
372static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock)
373{
374 VBGLPHYSHEAPBLOCK *pOtherBlock = pBlock->pNext;
375 if (pOtherBlock)
376 pOtherBlock->pPrev = pBlock->pPrev;
377 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
378
379 pOtherBlock = pBlock->pPrev;
380 if (pOtherBlock)
381 pOtherBlock->pNext = pBlock->pNext;
382 else
383 {
384 Assert(g_vbgldata.pBlockHead == pBlock);
385 g_vbgldata.pBlockHead = pBlock->pNext;
386 }
387
388 pBlock->pNext = NULL;
389 pBlock->pPrev = NULL;
390
391 /* Update the stats: */
392 g_vbgldata.cBlocks -= 1;
393 pBlock->pChunk->cBlocks -= 1;
394 AssertMsg(pBlock->pChunk->cBlocks >= 0,
395 ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
396 Assert(g_vbgldata.cBlocks >= 0);
397}
398
399
400
401/**
402 * Updates statistics after adding a free block.
403 */
404DECLINLINE(void) vbglPhysHeapStatsFreeBlockAdded(VBGLPHYSHEAPFREEBLOCK *pBlock)
405{
406 g_vbgldata.cFreeBlocks += 1;
407 pBlock->Core.pChunk->cFreeBlocks += 1;
408}
409
410
411/**
412 * Links @a pBlock onto head of the free chain.
413 *
414 * This is used during block freeing and when adding a new chunk.
415 *
416 * This also update the per-chunk block counts.
417 */
418static void vbglPhysHeapInsertFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
419{
420 Assert(!pBlock->Core.fAllocated);
421 AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
422 AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
423
424 /* inserting to head of list */
425 VBGLPHYSHEAPFREEBLOCK *pOldHead = g_vbgldata.pFreeHead;
426
427 pBlock->pNextFree = pOldHead;
428 pBlock->pPrevFree = NULL;
429
430 if (pOldHead)
431 pOldHead->pPrevFree = pBlock;
432 g_vbgldata.pFreeHead = pBlock;
433
434 /* Update the stats: */
435 vbglPhysHeapStatsFreeBlockAdded(pBlock);
436}
437
438
439/**
440 * Links @a pBlock after @a pInsertAfter.
441 *
442 * This is used when splitting a free block during allocation to preserve the
443 * place in the free list.
444 *
445 * This also update the per-chunk block counts.
446 */
447static void vbglPhysHeapInsertFreeBlockAfter(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPFREEBLOCK *pInsertAfter)
448{
449 Assert(!pBlock->Core.fAllocated);
450 AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
451 AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
452
453 /* inserting after the tiven node */
454 pBlock->pNextFree = pInsertAfter->pNextFree;
455 pBlock->pPrevFree = pInsertAfter;
456
457 if (pInsertAfter->pNextFree)
458 pInsertAfter->pNextFree->pPrevFree = pBlock;
459
460 pInsertAfter->pNextFree = pBlock;
461
462 /* Update the stats: */
463 vbglPhysHeapStatsFreeBlockAdded(pBlock);
464}
465
466
467/**
468 * Unlinks @a pBlock from the free list.
469 *
470 * This also update the per-chunk block counts.
471 */
472static void vbglPhysHeapUnlinkFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
473{
474 Assert(!pBlock->Core.fAllocated);
475
476 VBGLPHYSHEAPFREEBLOCK *pOtherBlock = pBlock->pNextFree;
477 if (pOtherBlock)
478 pOtherBlock->pPrevFree = pBlock->pPrevFree;
479 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
480
481 pOtherBlock = pBlock->pPrevFree;
482 if (pOtherBlock)
483 pOtherBlock->pNextFree = pBlock->pNextFree;
484 else
485 {
486 Assert(g_vbgldata.pFreeHead == pBlock);
487 g_vbgldata.pFreeHead = pBlock->pNextFree;
488 }
489
490 pBlock->pNextFree = NULL;
491 pBlock->pPrevFree = NULL;
492
493 /* Update the stats: */
494 g_vbgldata.cFreeBlocks -= 1;
495 pBlock->Core.pChunk->cFreeBlocks -= 1;
496 AssertMsg(pBlock->Core.pChunk->cFreeBlocks >= 0,
497 ("pChunk=%p: cbChunk=%#x cFreeBlocks=%d\n",
498 pBlock->Core.pChunk, pBlock->Core.pChunk->cbChunk, pBlock->Core.pChunk->cFreeBlocks));
499 Assert(g_vbgldata.cFreeBlocks >= 0);
500}
501
502
503/**
504 * Allocate another chunk and add it to the heap.
505 *
506 * @returns Pointer to the free block in the new chunk on success, NULL on
507 * allocation failure.
508 * @param cbMinBlock The size of the user block we need this chunk for.
509 */
510static VBGLPHYSHEAPFREEBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
511{
512 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
513 VBGLPHYSHEAPCHUNK *pChunk;
514 uint32_t cbChunk;
515#ifdef VBGL_PH_USE_MEMOBJ
516 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
517 int rc;
518#endif
519
520 VBGL_PH_DPRINTF(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
521 AssertReturn(cbMinBlock <= VBGL_PH_LARGEST_ALLOC_SIZE, NULL); /* paranoia */
522
523 /*
524 * Compute the size of the new chunk, rounding up to next chunk size,
525 * which must be power of 2.
526 *
527 * Note! Using VBGLPHYSHEAPFREEBLOCK here means the minimum block size is
528 * 8 or 16 bytes too high, but safer this way since cbMinBlock is
529 * zero during the init code call.
530 */
531 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
532 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
533 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
534
535#ifdef VBGL_PH_USE_MEMOBJ
536 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, g_vbgldata.HCPhysMax, false /*fExecutable*/);
537 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
538 PhysAddr = RT_SUCCESS(rc) ? (RTCCPHYS)RTR0MemObjGetPagePhysAddr(hMemObj, 0 /*iPage*/) : NIL_RTCCPHYS;
539#else
540 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
541#endif
542 if (!pChunk)
543 {
544 /* If the allocation fail, halv the size till and try again. */
545 uint32_t cbMinChunk = RT_MAX(cbMinBlock, PAGE_SIZE / 2) + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
546 cbMinChunk = RT_ALIGN_32(cbMinChunk, PAGE_SIZE);
547 if (cbChunk > cbMinChunk)
548 do
549 {
550 cbChunk >>= 2;
551 cbChunk = RT_ALIGN_32(cbChunk, PAGE_SIZE);
552#ifdef VBGL_PH_USE_MEMOBJ
553 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, g_vbgldata.HCPhysMax, false /*fExecutable*/);
554 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
555 PhysAddr = RT_SUCCESS(rc) ? (RTCCPHYS)RTR0MemObjGetPagePhysAddr(hMemObj, 0 /*iPage*/) : NIL_RTCCPHYS;
556#else
557 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
558#endif
559 } while (!pChunk && cbChunk > cbMinChunk);
560 }
561 if (pChunk)
562 {
563 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
564 VBGLPHYSHEAPFREEBLOCK *pBlock;
565 AssertRelease( g_vbgldata.HCPhysMax == NIL_RTHCPHYS
566 || (PhysAddr < _4G && PhysAddr + cbChunk <= _4G));
567
568 /*
569 * Init the new chunk.
570 */
571 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
572 pChunk->cbChunk = cbChunk;
573 pChunk->physAddr = PhysAddr;
574 pChunk->cBlocks = 0;
575 pChunk->cFreeBlocks = 0;
576 pChunk->pNext = NULL;
577 pChunk->pPrev = NULL;
578#ifdef VBGL_PH_USE_MEMOBJ
579 pChunk->hMemObj = hMemObj;
580#endif
581
582 /* Initialize the padding too: */
583#if ARCH_BITS == 64
584 pChunk->auPadding2[0] = UINT64_C(0xADDCAAA3ADDCAAA2);
585 pChunk->auPadding2[1] = UINT64_C(0xADDCAAA5ADDCAAA4);
586# ifndef VBGL_PH_USE_MEMOBJ
587 pChunk->auPadding2[2] = UINT64_C(0xADDCAAA7ADDCAAA6);
588# endif
589#endif
590
591 /*
592 * Initialize the free block, which now occupies entire chunk.
593 */
594 pBlock = (VBGLPHYSHEAPFREEBLOCK *)(pChunk + 1);
595 vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
596 vbglPhysHeapInsertBlock(&pBlock->Core);
597 vbglPhysHeapInsertFreeBlock(pBlock);
598
599 /*
600 * Add the chunk to the list.
601 */
602 pOldHeadChunk = g_vbgldata.pChunkHead;
603 pChunk->pNext = pOldHeadChunk;
604 if (pOldHeadChunk)
605 pOldHeadChunk->pPrev = pChunk;
606 g_vbgldata.pChunkHead = pChunk;
607
608 VBGL_PH_DPRINTF(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->Core.cbUser));
609 return pBlock;
610 }
611 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
612 return NULL;
613}
614
615
616/**
617 * Deletes a chunk: Unlinking all its blocks and freeing its memory.
618 */
619static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
620{
621 uintptr_t uEnd, uCur;
622 Assert(pChunk != NULL);
623 AssertMsg(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
624
625 VBGL_PH_DPRINTF(("Deleting chunk %p size %x\n", pChunk, pChunk->cbChunk));
626
627 /*
628 * First scan the chunk and unlink all blocks from the lists.
629 *
630 * Note! We could do this by finding the first and last block list entries
631 * and just drop the whole chain relating to this chunk, rather than
632 * doing it one by one. But doing it one by one is simpler and will
633 * continue to work if the block list ends in an unsorted state.
634 */
635 uEnd = (uintptr_t)pChunk + pChunk->cbChunk;
636 uCur = (uintptr_t)(pChunk + 1);
637
638 while (uCur < uEnd)
639 {
640 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
641 Assert(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
642 Assert(pBlock->pChunk == pChunk);
643
644 uCur += pBlock->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
645 Assert(uCur == (uintptr_t)pBlock->pNext || uCur >= uEnd);
646
647 if (!pBlock->fAllocated)
648 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pBlock);
649 vbglPhysHeapUnlinkBlock(pBlock);
650 }
651
652 AssertMsg(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbChunk = %08X\n", uCur, uEnd, pChunk->cbChunk));
653
654 /*
655 * Unlink the chunk from the chunk list.
656 */
657 if (pChunk->pNext)
658 pChunk->pNext->pPrev = pChunk->pPrev;
659 /* else: we do not maintain tail pointer. */
660
661 if (pChunk->pPrev)
662 pChunk->pPrev->pNext = pChunk->pNext;
663 else
664 {
665 Assert(g_vbgldata.pChunkHead == pChunk);
666 g_vbgldata.pChunkHead = pChunk->pNext;
667 }
668
669 /*
670 * Finally, free the chunk memory.
671 */
672#ifdef VBGL_PH_USE_MEMOBJ
673 RTR0MemObjFree(pChunk->hMemObj, true /*fFreeMappings*/);
674#else
675 RTMemContFree(pChunk, pChunk->cbChunk);
676#endif
677}
678
679
680DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cb)
681{
682 VBGLPHYSHEAPFREEBLOCK *pBlock;
683 VBGLPHYSHEAPFREEBLOCK *pIter;
684 int32_t cLeft;
685#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
686 uint32_t cbAlwaysSplit;
687#endif
688 int rc;
689
690 /*
691 * Make sure we don't allocate anything too small to turn into a free node
692 * and align the size to prevent pointer misalignment and whatnot.
693 */
694 cb = RT_MAX(cb, VBGL_PH_SMALLEST_ALLOC_SIZE);
695 cb = RT_ALIGN_32(cb, VBGL_PH_ALLOC_ALIGN);
696 AssertCompile(VBGL_PH_ALLOC_ALIGN <= sizeof(pBlock->Core));
697
698 rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
699 AssertRCReturn(rc, NULL);
700
701 dumpheap("pre alloc");
702
703 /*
704 * Search the free list. We do this in linear fashion as we don't expect
705 * there to be many blocks in the heap.
706 */
707#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
708 cbAlwaysSplit = cb + VBGL_PH_STOP_SEARCH_AT_EXCESS;
709#endif
710 cLeft = VBGL_PH_MAX_FREE_SEARCH;
711 pBlock = NULL;
712 if (cb <= PAGE_SIZE / 4 * 3)
713 {
714 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
715 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
716 VBGLPHYSHEAPFREEBLOCK *pFallback = NULL;
717 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
718 {
719 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
720 if (pIter->Core.cbUser >= cb)
721 {
722 if (pIter->Core.cbUser == cb)
723 {
724 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
725 {
726 pBlock = pIter;
727 break;
728 }
729 pFallback = pIter;
730 }
731 else
732 {
733 if (!pFallback || pIter->Core.cbUser < pFallback->Core.cbUser)
734 pFallback = pIter;
735 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
736 {
737 if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
738 pBlock = pIter;
739#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
740 else if (pIter->Core.cbUser >= cbAlwaysSplit)
741 {
742 pBlock = pIter;
743 break;
744 }
745#endif
746 }
747 }
748
749 if (cLeft > 0)
750 { /* likely */ }
751 else
752 break;
753 }
754 }
755
756 if (!pBlock)
757 pBlock = pFallback;
758 }
759 else
760 {
761 /* Large than 3/4 page: Find closest free list match. */
762 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
763 {
764 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
765 if (pIter->Core.cbUser >= cb)
766 {
767 if (pIter->Core.cbUser == cb)
768 {
769 /* Exact match - we're done! */
770 pBlock = pIter;
771 break;
772 }
773
774#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
775 if (pIter->Core.cbUser >= cbAlwaysSplit)
776 {
777 /* Really big block - no point continue searching! */
778 pBlock = pIter;
779 break;
780 }
781#endif
782 /* Looking for a free block with nearest size. */
783 if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
784 pBlock = pIter;
785
786 if (cLeft > 0)
787 { /* likely */ }
788 else
789 break;
790 }
791 }
792 }
793
794 if (!pBlock)
795 {
796 /* No free blocks, allocate a new chunk, the only free block of the
797 chunk will be returned. */
798 pBlock = vbglPhysHeapChunkAlloc(cb);
799 }
800
801 if (pBlock)
802 {
803 /* We have a free block, either found or allocated. */
804 AssertMsg(pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
805 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->Core.u32Signature));
806 AssertMsg(!pBlock->Core.fAllocated, ("pBlock = %p\n", pBlock));
807
808 /*
809 * If the block is too large, split off a free block with the unused space.
810 *
811 * We do this before unlinking the block so we can preserve the location
812 * in the free list.
813 *
814 * Note! We cannot split off and return the tail end here, because that may
815 * violate the same page requirements for requests smaller than 3/4 page.
816 */
817 AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(*pBlock) - sizeof(pBlock->Core));
818 if (pBlock->Core.cbUser >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cb)
819 {
820 pIter = (VBGLPHYSHEAPFREEBLOCK *)((uintptr_t)(&pBlock->Core + 1) + cb);
821 vbglPhysHeapInitFreeBlock(pIter, pBlock->Core.pChunk, pBlock->Core.cbUser - cb - sizeof(VBGLPHYSHEAPBLOCK));
822
823 pBlock->Core.cbUser = cb;
824
825 /* Insert the new 'pIter' block after the 'pBlock' in the block list
826 and in the free list. */
827 vbglPhysHeapInsertBlockAfter(&pIter->Core, &pBlock->Core);
828 vbglPhysHeapInsertFreeBlockAfter(pIter, pBlock);
829 }
830
831 /*
832 * Unlink the block from the free list and mark it as allocated.
833 */
834 vbglPhysHeapUnlinkFreeBlock(pBlock);
835 pBlock->Core.fAllocated = true;
836
837 dumpheap("post alloc");
838
839 /*
840 * Return success.
841 */
842 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
843
844 VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->Core.cbUser));
845 return &pBlock->Core + 1;
846 }
847
848 /*
849 * Return failure.
850 */
851 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
852 AssertRC(rc);
853
854 VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns NULL (requested %#x bytes)\n", cb));
855 return NULL;
856}
857
858
859DECLR0VBGL(RTCCPHYS) VbglR0PhysHeapGetPhysAddr(void *pv)
860{
861 /*
862 * Validate the incoming pointer.
863 */
864 if (pv != NULL)
865 {
866 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
867 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
868 && pBlock->fAllocated)
869 {
870 /*
871 * Calculate and return its physical address.
872 */
873 VBGLPHYSHEAPCHUNK *pChunk = pBlock->pChunk;
874 return pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pChunk);
875 }
876
877 AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n",
878 pv, pBlock, pBlock->u32Signature, pBlock->cbUser, pBlock->fAllocated));
879 }
880 else
881 AssertMsgFailed(("Unexpected NULL pointer\n"));
882 return 0;
883}
884
885
886DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
887{
888 if (pv != NULL)
889 {
890 VBGLPHYSHEAPFREEBLOCK *pBlock;
891
892 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
893 AssertRCReturnVoid(rc);
894
895 dumpheap("pre free");
896
897 /*
898 * Validate the block header.
899 */
900 pBlock = (VBGLPHYSHEAPFREEBLOCK *)((VBGLPHYSHEAPBLOCK *)pv - 1);
901 if ( pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE
902 && pBlock->Core.fAllocated
903 && pBlock->Core.cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE)
904 {
905 VBGLPHYSHEAPCHUNK *pChunk;
906 VBGLPHYSHEAPBLOCK *pNeighbour;
907
908 /*
909 * Change the block status to freeed.
910 */
911 VBGL_PH_DPRINTF(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->Core.cbUser));
912
913 pBlock->Core.fAllocated = false;
914 pBlock->pNextFree = pBlock->pPrevFree = NULL;
915 vbglPhysHeapInsertFreeBlock(pBlock);
916
917 dumpheap("post insert");
918
919 /*
920 * Check if the block after this one is also free and we can merge it into this one.
921 */
922 pChunk = pBlock->Core.pChunk;
923
924 pNeighbour = pBlock->Core.pNext;
925 if ( pNeighbour
926 && !pNeighbour->fAllocated
927 && pNeighbour->pChunk == pChunk)
928 {
929 Assert((uintptr_t)pBlock + sizeof(pBlock->Core) + pBlock->Core.cbUser == (uintptr_t)pNeighbour);
930
931 /* Adjust size of current memory block */
932 pBlock->Core.cbUser += pNeighbour->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
933
934 /* Unlink the following node and invalid it. */
935 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pNeighbour);
936 vbglPhysHeapUnlinkBlock(pNeighbour);
937
938 pNeighbour->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
939 pNeighbour->cbUser = UINT32_MAX / 4;
940
941 dumpheap("post merge after");
942 }
943
944 /*
945 * Same check for the block before us. This invalidates pBlock.
946 */
947 pNeighbour = pBlock->Core.pPrev;
948 if ( pNeighbour
949 && !pNeighbour->fAllocated
950 && pNeighbour->pChunk == pChunk)
951 {
952 Assert((uintptr_t)pNeighbour + sizeof(*pNeighbour) + pNeighbour->cbUser == (uintptr_t)pBlock);
953
954 /* Adjust size of the block before us */
955 pNeighbour->cbUser += pBlock->Core.cbUser + sizeof(VBGLPHYSHEAPBLOCK);
956
957 /* Unlink this node and invalid it. */
958 vbglPhysHeapUnlinkFreeBlock(pBlock);
959 vbglPhysHeapUnlinkBlock(&pBlock->Core);
960
961 pBlock->Core.u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
962 pBlock->Core.cbUser = UINT32_MAX / 8;
963
964 pBlock = NULL; /* invalid */
965
966 dumpheap("post merge before");
967 }
968
969 /*
970 * If this chunk is now completely unused, delete it if there are
971 * more completely free ones.
972 */
973 if ( pChunk->cFreeBlocks == pChunk->cBlocks
974 && (pChunk->pPrev || pChunk->pNext))
975 {
976 VBGLPHYSHEAPCHUNK *pCurChunk;
977 uint32_t cUnusedChunks = 0;
978 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
979 {
980 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
981 if (pCurChunk->cFreeBlocks == pCurChunk->cBlocks)
982 {
983 cUnusedChunks++;
984 if (cUnusedChunks > 1)
985 {
986 /* Delete current chunk, it will also unlink all free blocks
987 * remaining in the chunk from the free list, so the pBlock
988 * will also be invalid after this.
989 */
990 vbglPhysHeapChunkDelete(pChunk);
991 pBlock = NULL; /* invalid */
992 pChunk = NULL;
993 pNeighbour = NULL;
994 break;
995 }
996 }
997 }
998 }
999
1000 dumpheap("post free");
1001 }
1002 else
1003 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
1004 pBlock, pBlock->Core.u32Signature, pBlock->Core.cbUser, pBlock->Core.fAllocated));
1005
1006 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1007 AssertRC(rc);
1008 }
1009}
1010
1011#ifdef IN_TESTCASE /* For the testcase only */
1012
1013/**
1014 * Returns the sum of all free heap blocks.
1015 *
1016 * This is the amount of memory you can theoretically allocate if you do
1017 * allocations exactly matching the free blocks.
1018 *
1019 * @returns The size of the free blocks.
1020 * @returns 0 if heap was safely detected as being bad.
1021 */
1022DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
1023{
1024 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
1025 AssertRCReturn(rc, 0);
1026
1027 size_t cbTotal = 0;
1028 for (VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead; pCurBlock; pCurBlock = pCurBlock->pNextFree)
1029 {
1030 Assert(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
1031 Assert(!pCurBlock->Core.fAllocated);
1032 cbTotal += pCurBlock->Core.cbUser;
1033 }
1034
1035 RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1036 return cbTotal;
1037}
1038
1039
1040/**
1041 * Checks the heap, caller responsible for locking.
1042 *
1043 * @returns VINF_SUCCESS if okay, error status if not.
1044 * @param pErrInfo Where to return more error details, optional.
1045 */
1046static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
1047{
1048 /*
1049 * Scan the blocks in each chunk, walking the block list in parallel.
1050 */
1051 const VBGLPHYSHEAPBLOCK *pPrevBlockListEntry = NULL;
1052 const VBGLPHYSHEAPBLOCK *pCurBlockListEntry = g_vbgldata.pBlockHead;
1053 unsigned acTotalBlocks[2] = { 0, 0 };
1054 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
1055 {
1056 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
1057 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x", pCurChunk, pCurChunk->u32Signature));
1058 AssertReturn(pCurChunk->pPrev == pPrevChunk,
1059 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1060 "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
1061
1062 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
1063 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbChunk;
1064 unsigned acBlocks[2] = { 0, 0 };
1065 while ((uintptr_t)pCurBlock < uEnd)
1066 {
1067 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
1068 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
1069 "pCurBlock=%p: magic=%#x", pCurBlock, pCurBlock->u32Signature));
1070 AssertReturn(pCurBlock->pChunk == pCurChunk,
1071 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1072 "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
1073 AssertReturn( pCurBlock->cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE
1074 && pCurBlock->cbUser <= VBGL_PH_LARGEST_ALLOC_SIZE
1075 && RT_ALIGN_32(pCurBlock->cbUser, VBGL_PH_ALLOC_ALIGN) == pCurBlock->cbUser,
1076 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
1077 "pCurBlock=%p: cbUser=%#x", pCurBlock, pCurBlock->cbUser));
1078 AssertReturn(pCurBlock == pCurBlockListEntry,
1079 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1080 "pCurChunk=%p: pCurBlock=%p, pCurBlockListEntry=%p\n",
1081 pCurChunk, pCurBlock, pCurBlockListEntry));
1082 AssertReturn(pCurBlock->pPrev == pPrevBlockListEntry,
1083 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
1084 "pCurChunk=%p: pCurBlock->pPrev=%p, pPrevBlockListEntry=%p\n",
1085 pCurChunk, pCurBlock->pPrev, pPrevBlockListEntry));
1086
1087 acBlocks[pCurBlock->fAllocated] += 1;
1088
1089 /* advance */
1090 pPrevBlockListEntry = pCurBlock;
1091 pCurBlockListEntry = pCurBlock->pNext;
1092 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbUser);
1093 }
1094 AssertReturn((uintptr_t)pCurBlock == uEnd,
1095 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1096 "pCurBlock=%p uEnd=%p", pCurBlock, uEnd));
1097
1098 acTotalBlocks[1] += acBlocks[1];
1099 AssertReturn(acBlocks[0] + acBlocks[1] == (uint32_t)pCurChunk->cBlocks,
1100 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1101 "pCurChunk=%p: cBlocks=%u, expected %u",
1102 pCurChunk, pCurChunk->cBlocks, acBlocks[0] + acBlocks[1]));
1103
1104 acTotalBlocks[0] += acBlocks[0];
1105 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->cFreeBlocks,
1106 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
1107 "pCurChunk=%p: cFreeBlocks=%u, expected %u",
1108 pCurChunk, pCurChunk->cFreeBlocks, acBlocks[0]));
1109
1110 pPrevChunk = pCurChunk;
1111 }
1112
1113 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.cFreeBlocks,
1114 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
1115 "g_vbgldata.cFreeBlocks=%u, expected %u", g_vbgldata.cFreeBlocks, acTotalBlocks[0]));
1116 AssertReturn(acTotalBlocks[0] + acTotalBlocks[1] == (uint32_t)g_vbgldata.cBlocks,
1117 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
1118 "g_vbgldata.cBlocks=%u, expected %u", g_vbgldata.cBlocks, acTotalBlocks[0] + acTotalBlocks[1]));
1119
1120 /*
1121 * Check that the free list contains the same number of blocks as we
1122 * encountered during the above scan.
1123 */
1124 {
1125 unsigned cFreeListBlocks = 0;
1126 for (const VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead, *pPrevBlock = NULL;
1127 pCurBlock;
1128 pCurBlock = pCurBlock->pNextFree)
1129 {
1130 AssertReturn(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
1131 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
1132 "pCurBlock=%p/free: magic=%#x", pCurBlock, pCurBlock->Core.u32Signature));
1133 AssertReturn(pCurBlock->pPrevFree == pPrevBlock,
1134 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1135 "pCurBlock=%p/free: pPrev=%p, expected %p", pCurBlock, pCurBlock->pPrevFree, pPrevBlock));
1136 AssertReturn(pCurBlock->Core.pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
1137 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/free: chunk (%p) magic=%#x",
1138 pCurBlock, pCurBlock->Core.pChunk, pCurBlock->Core.pChunk->u32Signature));
1139 cFreeListBlocks++;
1140 pPrevBlock = pCurBlock;
1141 }
1142
1143 AssertReturn(cFreeListBlocks == acTotalBlocks[0],
1144 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
1145 "Found %u in free list, expected %u", cFreeListBlocks, acTotalBlocks[0]));
1146 }
1147 return VINF_SUCCESS;
1148}
1149
1150
1151/**
1152 * Performs a heap check.
1153 *
1154 * @returns Problem description on failure, NULL on success.
1155 * @param pErrInfo Where to return more error details, optional.
1156 */
1157DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
1158{
1159 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
1160 AssertRCReturn(rc, 0);
1161
1162 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
1163
1164 RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1165 return rc;
1166}
1167
1168#endif /* IN_TESTCASE */
1169
1170DECLR0VBGL(int) VbglR0PhysHeapInit(RTHCPHYS HCPhysMax)
1171{
1172 g_vbgldata.HCPhysMax = HCPhysMax;
1173 g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
1174
1175 /* Allocate the first chunk of the heap. */
1176 VBGLPHYSHEAPFREEBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
1177 if (pBlock)
1178 return RTSemFastMutexCreate(&g_vbgldata.hMtxHeap);
1179 return VERR_NO_CONT_MEMORY;
1180}
1181
1182DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
1183{
1184 while (g_vbgldata.pChunkHead)
1185 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
1186
1187 RTSemFastMutexDestroy(g_vbgldata.hMtxHeap);
1188 g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
1189}
1190
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette