VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 100267

最後變更 在這個檔案從100267是 100267,由 vboxsync 提交於 21 月 前

Additions: Make the R0 physical heap configurable to allow for allocations >= 4GiB if supported by the VBox device (the MMIO request path is available), add support for the MMIO request path required for ARM, bugref:10457

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 43.4 KB
 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 100267 2023-06-23 14:57:53Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
32 *
33 * Traditional heap implementation keeping all blocks in a ordered list and
34 * keeping free blocks on additional list via pointers in the user area. This
35 * is similar to @ref grp_rt_heap_simple "RTHeapSimple" and
36 * @ref grp_rt_heap_offset "RTHeapOffset" in IPRT, except that this code handles
37 * mutiple chunks and has a physical address associated with each chunk and
38 * block. The alignment is fixed (VBGL_PH_ALLOC_ALIGN).
39 *
40 * When allocating memory, a free block is found that satisfies the request,
41 * extending the heap with another chunk if needed. The block is split if it's
42 * too large, and the tail end is put on the free list.
43 *
44 * When freeing memory, the block being freed is put back on the free list and
45 * we use the block list to check whether it can be merged with adjacent blocks.
46 *
47 * @note The original code managed the blocks in two separate lists for free and
48 * allocated blocks, which had the disadvantage only allowing merging with
49 * the block after the block being freed. On the plus side, it had the
50 * potential for slightly better locality when examining the free list,
51 * since the next pointer and block size members were closer to one
52 * another.
53 */
54
55
56/*********************************************************************************************************************************
57* Header Files *
58*********************************************************************************************************************************/
59#include "VBoxGuestR0LibInternal.h"
60
61#include <iprt/assert.h>
62#include <iprt/err.h>
63#include <iprt/mem.h>
64#include <iprt/memobj.h>
65#include <iprt/semaphore.h>
66
67
68/*********************************************************************************************************************************
69* Defined Constants And Macros *
70*********************************************************************************************************************************/
71/** Enables heap dumping. */
72#if defined(DOXYGEN_RUNNING) || 0
73# define VBGL_PH_DUMPHEAP
74#endif
75
76#ifdef VBGL_PH_DUMPHEAP
77# define VBGL_PH_DPRINTF(a) RTAssertMsg2Weak a
78#else
79# define VBGL_PH_DPRINTF(a) do { } while (0)
80#endif
81
82/** Heap chunk signature */
83#define VBGL_PH_CHUNKSIGNATURE UINT32_C(0xADDCCCCC)
84/** Heap chunk allocation unit */
85#define VBGL_PH_CHUNKSIZE (0x10000)
86
87/** Heap block signature */
88#define VBGL_PH_BLOCKSIGNATURE UINT32_C(0xADDBBBBB)
89
90/** The allocation block alignment.
91 *
92 * This cannot be larger than VBGLPHYSHEAPBLOCK.
93 */
94#define VBGL_PH_ALLOC_ALIGN (sizeof(void *))
95
96/** Max number of free nodes to search before just using the best fit.
97 *
98 * This is used to limit the free list walking during allocation and just get
99 * on with the job. A low number should reduce the cache trashing at the
100 * possible cost of heap fragmentation.
101 *
102 * Picked 16 after comparing the tstVbglR0PhysHeap-1 results w/ uRandSeed=42 for
103 * different max values.
104 */
105#define VBGL_PH_MAX_FREE_SEARCH 16
106
107/** Threshold to stop the block search if a free block is at least this much too big.
108 *
109 * May cause more fragmation (depending on usage pattern), but should speed up
110 * allocation and hopefully reduce cache trashing.
111 *
112 * Since we merge adjacent free blocks when we can, free blocks should typically
113 * be a lot larger that what's requested. So, it is probably a good idea to
114 * just chop up a large block rather than keep searching for a perfect-ish
115 * match.
116 *
117 * Undefine this to disable this trick.
118 */
119#if defined(DOXYGEN_RUNNING) || 1
120# define VBGL_PH_STOP_SEARCH_AT_EXCESS _4K
121#endif
122
123/** Threshold at which to split out a tail free block when allocating.
124 *
125 * The value gives the amount of user space, i.e. excluding the header.
126 *
127 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
128 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
129 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
130 * some 25 requests out of about 60, which is reasonable.
131 */
132#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
133
134
135/** The smallest amount of user data that can be allocated.
136 *
137 * This is to ensure that the block can be converted into a
138 * VBGLPHYSHEAPFREEBLOCK structure when freed. This must be smaller or equal
139 * to VBGL_PH_MIN_SPLIT_FREE_BLOCK.
140 */
141#define VBGL_PH_SMALLEST_ALLOC_SIZE 16
142
143/** The maximum allocation request size. */
144#define VBGL_PH_LARGEST_ALLOC_SIZE RT_ALIGN_32( _128M \
145 - sizeof(VBGLPHYSHEAPBLOCK) \
146 - sizeof(VBGLPHYSHEAPCHUNK) \
147 - VBGL_PH_ALLOC_ALIGN, \
148 VBGL_PH_ALLOC_ALIGN)
149
150/**
151 * Whether to use the RTR0MemObjAllocCont API or RTMemContAlloc for
152 * allocating chunks.
153 *
154 * This can be enabled on hosts where RTMemContAlloc is more complicated than
155 * RTR0MemObjAllocCont. This can also be done if we wish to save code space, as
156 * the latter is typically always dragged into the link on guests where the
157 * linker cannot eliminiate functions within objects. Only drawback is that
158 * RTR0MemObjAllocCont requires another heap allocation for the handle.
159 */
160#if defined(DOXYGEN_RUNNING) || (!defined(IN_TESTCASE) && 0)
161# define VBGL_PH_USE_MEMOBJ
162#endif
163
164
165/*********************************************************************************************************************************
166* Structures and Typedefs *
167*********************************************************************************************************************************/
168/**
169 * A heap block (within a chunk).
170 *
171 * This is used to track a part of a heap chunk that's either free or
172 * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
173 */
174struct VBGLPHYSHEAPBLOCK
175{
176 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
177 uint32_t u32Signature;
178
179 /** Size of user data in the block. Does not include this block header. */
180 uint32_t cbUser : 31;
181 /** The top bit indicates whether it's allocated or free. */
182 uint32_t fAllocated : 1;
183
184 /** Pointer to the next block on the list. */
185 VBGLPHYSHEAPBLOCK *pNext;
186 /** Pointer to the previous block on the list. */
187 VBGLPHYSHEAPBLOCK *pPrev;
188 /** Pointer back to the chunk. */
189 VBGLPHYSHEAPCHUNK *pChunk;
190};
191
192/**
193 * A free block.
194 */
195struct VBGLPHYSHEAPFREEBLOCK
196{
197 /** Core block data. */
198 VBGLPHYSHEAPBLOCK Core;
199 /** Pointer to the next free list entry. */
200 VBGLPHYSHEAPFREEBLOCK *pNextFree;
201 /** Pointer to the previous free list entry. */
202 VBGLPHYSHEAPFREEBLOCK *pPrevFree;
203};
204AssertCompile(VBGL_PH_SMALLEST_ALLOC_SIZE >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
205AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
206AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= VBGL_PH_SMALLEST_ALLOC_SIZE);
207
208/**
209 * A chunk of memory used by the heap for sub-allocations.
210 *
211 * There is a list of these.
212 */
213struct VBGLPHYSHEAPCHUNK
214{
215 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
216 uint32_t u32Signature;
217 /** Size of the chunk. Includes the chunk header. */
218 uint32_t cbChunk;
219 /** Number of block of any kind. */
220 int32_t cBlocks;
221 /** Number of free blocks. */
222 int32_t cFreeBlocks;
223
224 /** Physical address of the chunk (contiguous). */
225 RTCCPHYS physAddr;
226
227 /** Pointer to the next chunk. */
228 VBGLPHYSHEAPCHUNK *pNext;
229 /** Pointer to the previous chunk. */
230 VBGLPHYSHEAPCHUNK *pPrev;
231
232#if defined(VBGL_PH_USE_MEMOBJ)
233 /** The allocation handle. */
234 RTR0MEMOBJ hMemObj;
235#elif ARCH_BITS == 64
236 /** Pad the size up to 64 bytes. */
237# ifdef VBGL_PH_USE_MEMOBJ
238 uintptr_t auPadding2[2];
239# else
240 uintptr_t auPadding2[3];
241# endif
242#endif
243};
244#if ARCH_BITS == 64
245AssertCompileSize(VBGLPHYSHEAPCHUNK, 64);
246#elif ARCH_BITS == 32
247AssertCompileSize(VBGLPHYSHEAPCHUNK, 32);
248#else
249# error "Unknown architecture!"
250#endif
251
252
253/**
254 * Debug function that dumps the heap.
255 */
256#ifndef VBGL_PH_DUMPHEAP
257# define dumpheap(pszWhere) do { } while (0)
258#else
259static void dumpheap(const char *pszWhere)
260{
261 VBGL_PH_DPRINTF(("VBGL_PH dump at '%s'\n", pszWhere));
262
263 VBGL_PH_DPRINTF(("Chunks:\n"));
264 for (VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; pChunk; pChunk = pChunk->pNext)
265 VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, cBlocks = %8d, cFreeBlocks=%8d, phys = %08X\n",
266 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbChunk,
267 pChunk->cBlocks, pChunk->cFreeBlocks, pChunk->physAddr));
268
269 VBGL_PH_DPRINTF(("Allocated blocks:\n"));
270 for (VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pBlockHead; pBlock; pBlock = pBlock->pNext)
271 VBGL_PH_DPRINTF(("%p: pNext = %p, pPrev = %p, size = %05x, sign = %08X, %s, pChunk = %p\n",
272 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->cbUser,
273 pBlock->u32Signature, pBlock->fAllocated ? "allocated" : " free", pBlock->pChunk));
274
275 VBGL_PH_DPRINTF(("Free blocks:\n"));
276 for (VBGLPHYSHEAPFREEBLOCK *pBlock = g_vbgldata.pFreeHead; pBlock; pBlock = pBlock->pNextFree)
277 VBGL_PH_DPRINTF(("%p: pNextFree = %p, pPrevFree = %p, size = %05x, sign = %08X, pChunk = %p%s\n",
278 pBlock, pBlock->pNextFree, pBlock->pPrevFree, pBlock->Core.cbUser,
279 pBlock->Core.u32Signature, pBlock->Core.pChunk,
280 !pBlock->Core.fAllocated ? "" : " !!allocated-block-on-freelist!!"));
281
282 VBGL_PH_DPRINTF(("VBGL_PH dump at '%s' done\n", pszWhere));
283}
284#endif
285
286
287/**
288 * Initialize a free block
289 */
290static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbUser)
291{
292 Assert(pBlock != NULL);
293 Assert(pChunk != NULL);
294
295 pBlock->Core.u32Signature = VBGL_PH_BLOCKSIGNATURE;
296 pBlock->Core.cbUser = cbUser;
297 pBlock->Core.fAllocated = false;
298 pBlock->Core.pNext = NULL;
299 pBlock->Core.pPrev = NULL;
300 pBlock->Core.pChunk = pChunk;
301 pBlock->pNextFree = NULL;
302 pBlock->pPrevFree = NULL;
303}
304
305
306/**
307 * Updates block statistics when a block is added.
308 */
309DECLINLINE(void) vbglPhysHeapStatsBlockAdded(VBGLPHYSHEAPBLOCK *pBlock)
310{
311 g_vbgldata.cBlocks += 1;
312 pBlock->pChunk->cBlocks += 1;
313 AssertMsg((uint32_t)pBlock->pChunk->cBlocks <= pBlock->pChunk->cbChunk / sizeof(VBGLPHYSHEAPFREEBLOCK),
314 ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
315}
316
317
318/**
319 * Links @a pBlock onto the head of block list.
320 *
321 * This also update the per-chunk block counts.
322 */
323static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pBlock)
324{
325 AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
326 AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
327
328 /* inserting to head of list */
329 VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.pBlockHead;
330
331 pBlock->pNext = pOldHead;
332 pBlock->pPrev = NULL;
333
334 if (pOldHead)
335 pOldHead->pPrev = pBlock;
336 g_vbgldata.pBlockHead = pBlock;
337
338 /* Update the stats: */
339 vbglPhysHeapStatsBlockAdded(pBlock);
340}
341
342
343/**
344 * Links @a pBlock onto the block list after @a pInsertAfter.
345 *
346 * This also update the per-chunk block counts.
347 */
348static void vbglPhysHeapInsertBlockAfter(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPBLOCK *pInsertAfter)
349{
350 AssertMsg(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
351 AssertMsg(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
352
353 pBlock->pNext = pInsertAfter->pNext;
354 pBlock->pPrev = pInsertAfter;
355
356 if (pInsertAfter->pNext)
357 pInsertAfter->pNext->pPrev = pBlock;
358
359 pInsertAfter->pNext = pBlock;
360
361 /* Update the stats: */
362 vbglPhysHeapStatsBlockAdded(pBlock);
363}
364
365
366/**
367 * Unlinks @a pBlock from the block list.
368 *
369 * This also update the per-chunk block counts.
370 */
371static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock)
372{
373 VBGLPHYSHEAPBLOCK *pOtherBlock = pBlock->pNext;
374 if (pOtherBlock)
375 pOtherBlock->pPrev = pBlock->pPrev;
376 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
377
378 pOtherBlock = pBlock->pPrev;
379 if (pOtherBlock)
380 pOtherBlock->pNext = pBlock->pNext;
381 else
382 {
383 Assert(g_vbgldata.pBlockHead == pBlock);
384 g_vbgldata.pBlockHead = pBlock->pNext;
385 }
386
387 pBlock->pNext = NULL;
388 pBlock->pPrev = NULL;
389
390 /* Update the stats: */
391 g_vbgldata.cBlocks -= 1;
392 pBlock->pChunk->cBlocks -= 1;
393 AssertMsg(pBlock->pChunk->cBlocks >= 0,
394 ("pChunk=%p: cbChunk=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbChunk, pBlock->pChunk->cBlocks));
395 Assert(g_vbgldata.cBlocks >= 0);
396}
397
398
399
400/**
401 * Updates statistics after adding a free block.
402 */
403DECLINLINE(void) vbglPhysHeapStatsFreeBlockAdded(VBGLPHYSHEAPFREEBLOCK *pBlock)
404{
405 g_vbgldata.cFreeBlocks += 1;
406 pBlock->Core.pChunk->cFreeBlocks += 1;
407}
408
409
410/**
411 * Links @a pBlock onto head of the free chain.
412 *
413 * This is used during block freeing and when adding a new chunk.
414 *
415 * This also update the per-chunk block counts.
416 */
417static void vbglPhysHeapInsertFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
418{
419 Assert(!pBlock->Core.fAllocated);
420 AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
421 AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
422
423 /* inserting to head of list */
424 VBGLPHYSHEAPFREEBLOCK *pOldHead = g_vbgldata.pFreeHead;
425
426 pBlock->pNextFree = pOldHead;
427 pBlock->pPrevFree = NULL;
428
429 if (pOldHead)
430 pOldHead->pPrevFree = pBlock;
431 g_vbgldata.pFreeHead = pBlock;
432
433 /* Update the stats: */
434 vbglPhysHeapStatsFreeBlockAdded(pBlock);
435}
436
437
438/**
439 * Links @a pBlock after @a pInsertAfter.
440 *
441 * This is used when splitting a free block during allocation to preserve the
442 * place in the free list.
443 *
444 * This also update the per-chunk block counts.
445 */
446static void vbglPhysHeapInsertFreeBlockAfter(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPFREEBLOCK *pInsertAfter)
447{
448 Assert(!pBlock->Core.fAllocated);
449 AssertMsg(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
450 AssertMsg(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
451
452 /* inserting after the tiven node */
453 pBlock->pNextFree = pInsertAfter->pNextFree;
454 pBlock->pPrevFree = pInsertAfter;
455
456 if (pInsertAfter->pNextFree)
457 pInsertAfter->pNextFree->pPrevFree = pBlock;
458
459 pInsertAfter->pNextFree = pBlock;
460
461 /* Update the stats: */
462 vbglPhysHeapStatsFreeBlockAdded(pBlock);
463}
464
465
466/**
467 * Unlinks @a pBlock from the free list.
468 *
469 * This also update the per-chunk block counts.
470 */
471static void vbglPhysHeapUnlinkFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
472{
473 Assert(!pBlock->Core.fAllocated);
474
475 VBGLPHYSHEAPFREEBLOCK *pOtherBlock = pBlock->pNextFree;
476 if (pOtherBlock)
477 pOtherBlock->pPrevFree = pBlock->pPrevFree;
478 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
479
480 pOtherBlock = pBlock->pPrevFree;
481 if (pOtherBlock)
482 pOtherBlock->pNextFree = pBlock->pNextFree;
483 else
484 {
485 Assert(g_vbgldata.pFreeHead == pBlock);
486 g_vbgldata.pFreeHead = pBlock->pNextFree;
487 }
488
489 pBlock->pNextFree = NULL;
490 pBlock->pPrevFree = NULL;
491
492 /* Update the stats: */
493 g_vbgldata.cFreeBlocks -= 1;
494 pBlock->Core.pChunk->cFreeBlocks -= 1;
495 AssertMsg(pBlock->Core.pChunk->cFreeBlocks >= 0,
496 ("pChunk=%p: cbChunk=%#x cFreeBlocks=%d\n",
497 pBlock->Core.pChunk, pBlock->Core.pChunk->cbChunk, pBlock->Core.pChunk->cFreeBlocks));
498 Assert(g_vbgldata.cFreeBlocks >= 0);
499}
500
501
502/**
503 * Allocate another chunk and add it to the heap.
504 *
505 * @returns Pointer to the free block in the new chunk on success, NULL on
506 * allocation failure.
507 * @param cbMinBlock The size of the user block we need this chunk for.
508 */
509static VBGLPHYSHEAPFREEBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
510{
511 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
512 VBGLPHYSHEAPCHUNK *pChunk;
513 uint32_t cbChunk;
514#ifdef VBGL_PH_USE_MEMOBJ
515 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
516 int rc;
517#endif
518
519 VBGL_PH_DPRINTF(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
520 AssertReturn(cbMinBlock <= VBGL_PH_LARGEST_ALLOC_SIZE, NULL); /* paranoia */
521
522 /*
523 * Compute the size of the new chunk, rounding up to next chunk size,
524 * which must be power of 2.
525 *
526 * Note! Using VBGLPHYSHEAPFREEBLOCK here means the minimum block size is
527 * 8 or 16 bytes too high, but safer this way since cbMinBlock is
528 * zero during the init code call.
529 */
530 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
531 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
532 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
533
534 if (g_vbgldata.fAlloc32BitAddr)
535 {
536 /*
537 * This function allocates physical contiguous memory below 4 GB. This 4GB
538 * limitation stems from using a 32-bit OUT instruction to pass a block
539 * physical address to the host.
540 */
541#ifdef VBGL_PH_USE_MEMOBJ
542 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, false /*fExecutable*/);
543 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
544#else
545 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
546#endif
547 }
548 else
549 {
550 /** @todo Provide appropriate memory API. */
551#ifdef VBGL_PH_USE_MEMOBJ
552 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, false /*fExecutable*/);
553 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
554#else
555 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
556#endif
557 }
558 if (!pChunk)
559 {
560 /* If the allocation fail, halv the size till and try again. */
561 uint32_t cbMinChunk = RT_MAX(cbMinBlock, PAGE_SIZE / 2) + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
562 cbMinChunk = RT_ALIGN_32(cbMinChunk, PAGE_SIZE);
563 if (cbChunk > cbMinChunk)
564 do
565 {
566 cbChunk >>= 2;
567 cbChunk = RT_ALIGN_32(cbChunk, PAGE_SIZE);
568#ifdef VBGL_PH_USE_MEMOBJ
569 rc = RTR0MemObjAllocCont(&hMemObj, cbChunk, false /*fExecutable*/);
570 pChunk = (VBGLPHYSHEAPCHUNK *)(RT_SUCCESS(rc) ? RTR0MemObjAddress(hMemObj) : NULL);
571#else
572 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
573#endif
574 } while (!pChunk && cbChunk > cbMinChunk);
575 }
576 if (pChunk)
577 {
578 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
579 VBGLPHYSHEAPFREEBLOCK *pBlock;
580 AssertRelease( !g_vbgldata.fAlloc32BitAddr
581 || (PhysAddr < _4G && PhysAddr + cbChunk <= _4G));
582
583 /*
584 * Init the new chunk.
585 */
586 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
587 pChunk->cbChunk = cbChunk;
588 pChunk->physAddr = PhysAddr;
589 pChunk->cBlocks = 0;
590 pChunk->cFreeBlocks = 0;
591 pChunk->pNext = NULL;
592 pChunk->pPrev = NULL;
593#ifdef VBGL_PH_USE_MEMOBJ
594 pChunk->hMemObj = hMemObj;
595#endif
596
597 /* Initialize the padding too: */
598#if ARCH_BITS == 64
599 pChunk->auPadding2[0] = UINT64_C(0xADDCAAA3ADDCAAA2);
600 pChunk->auPadding2[1] = UINT64_C(0xADDCAAA5ADDCAAA4);
601# ifndef VBGL_PH_USE_MEMOBJ
602 pChunk->auPadding2[2] = UINT64_C(0xADDCAAA7ADDCAAA6);
603# endif
604#endif
605
606 /*
607 * Initialize the free block, which now occupies entire chunk.
608 */
609 pBlock = (VBGLPHYSHEAPFREEBLOCK *)(pChunk + 1);
610 vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
611 vbglPhysHeapInsertBlock(&pBlock->Core);
612 vbglPhysHeapInsertFreeBlock(pBlock);
613
614 /*
615 * Add the chunk to the list.
616 */
617 pOldHeadChunk = g_vbgldata.pChunkHead;
618 pChunk->pNext = pOldHeadChunk;
619 if (pOldHeadChunk)
620 pOldHeadChunk->pPrev = pChunk;
621 g_vbgldata.pChunkHead = pChunk;
622
623 VBGL_PH_DPRINTF(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->Core.cbUser));
624 return pBlock;
625 }
626 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
627 return NULL;
628}
629
630
631/**
632 * Deletes a chunk: Unlinking all its blocks and freeing its memory.
633 */
634static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
635{
636 uintptr_t uEnd, uCur;
637 Assert(pChunk != NULL);
638 AssertMsg(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
639
640 VBGL_PH_DPRINTF(("Deleting chunk %p size %x\n", pChunk, pChunk->cbChunk));
641
642 /*
643 * First scan the chunk and unlink all blocks from the lists.
644 *
645 * Note! We could do this by finding the first and last block list entries
646 * and just drop the whole chain relating to this chunk, rather than
647 * doing it one by one. But doing it one by one is simpler and will
648 * continue to work if the block list ends in an unsorted state.
649 */
650 uEnd = (uintptr_t)pChunk + pChunk->cbChunk;
651 uCur = (uintptr_t)(pChunk + 1);
652
653 while (uCur < uEnd)
654 {
655 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
656 Assert(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
657 Assert(pBlock->pChunk == pChunk);
658
659 uCur += pBlock->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
660 Assert(uCur == (uintptr_t)pBlock->pNext || uCur >= uEnd);
661
662 if (!pBlock->fAllocated)
663 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pBlock);
664 vbglPhysHeapUnlinkBlock(pBlock);
665 }
666
667 AssertMsg(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbChunk = %08X\n", uCur, uEnd, pChunk->cbChunk));
668
669 /*
670 * Unlink the chunk from the chunk list.
671 */
672 if (pChunk->pNext)
673 pChunk->pNext->pPrev = pChunk->pPrev;
674 /* else: we do not maintain tail pointer. */
675
676 if (pChunk->pPrev)
677 pChunk->pPrev->pNext = pChunk->pNext;
678 else
679 {
680 Assert(g_vbgldata.pChunkHead == pChunk);
681 g_vbgldata.pChunkHead = pChunk->pNext;
682 }
683
684 /*
685 * Finally, free the chunk memory.
686 */
687#ifdef VBGL_PH_USE_MEMOBJ
688 RTR0MemObjFree(pChunk->hMemObj, true /*fFreeMappings*/);
689#else
690 RTMemContFree(pChunk, pChunk->cbChunk);
691#endif
692}
693
694
695DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cb)
696{
697 VBGLPHYSHEAPFREEBLOCK *pBlock;
698 VBGLPHYSHEAPFREEBLOCK *pIter;
699 int32_t cLeft;
700#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
701 uint32_t cbAlwaysSplit;
702#endif
703 int rc;
704
705 /*
706 * Make sure we don't allocate anything too small to turn into a free node
707 * and align the size to prevent pointer misalignment and whatnot.
708 */
709 cb = RT_MAX(cb, VBGL_PH_SMALLEST_ALLOC_SIZE);
710 cb = RT_ALIGN_32(cb, VBGL_PH_ALLOC_ALIGN);
711 AssertCompile(VBGL_PH_ALLOC_ALIGN <= sizeof(pBlock->Core));
712
713 rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
714 AssertRCReturn(rc, NULL);
715
716 dumpheap("pre alloc");
717
718 /*
719 * Search the free list. We do this in linear fashion as we don't expect
720 * there to be many blocks in the heap.
721 */
722#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
723 cbAlwaysSplit = cb + VBGL_PH_STOP_SEARCH_AT_EXCESS;
724#endif
725 cLeft = VBGL_PH_MAX_FREE_SEARCH;
726 pBlock = NULL;
727 if (cb <= PAGE_SIZE / 4 * 3)
728 {
729 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
730 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
731 VBGLPHYSHEAPFREEBLOCK *pFallback = NULL;
732 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
733 {
734 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
735 if (pIter->Core.cbUser >= cb)
736 {
737 if (pIter->Core.cbUser == cb)
738 {
739 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
740 {
741 pBlock = pIter;
742 break;
743 }
744 pFallback = pIter;
745 }
746 else
747 {
748 if (!pFallback || pIter->Core.cbUser < pFallback->Core.cbUser)
749 pFallback = pIter;
750 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cb)
751 {
752 if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
753 pBlock = pIter;
754#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
755 else if (pIter->Core.cbUser >= cbAlwaysSplit)
756 {
757 pBlock = pIter;
758 break;
759 }
760#endif
761 }
762 }
763
764 if (cLeft > 0)
765 { /* likely */ }
766 else
767 break;
768 }
769 }
770
771 if (!pBlock)
772 pBlock = pFallback;
773 }
774 else
775 {
776 /* Large than 3/4 page: Find closest free list match. */
777 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
778 {
779 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
780 if (pIter->Core.cbUser >= cb)
781 {
782 if (pIter->Core.cbUser == cb)
783 {
784 /* Exact match - we're done! */
785 pBlock = pIter;
786 break;
787 }
788
789#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
790 if (pIter->Core.cbUser >= cbAlwaysSplit)
791 {
792 /* Really big block - no point continue searching! */
793 pBlock = pIter;
794 break;
795 }
796#endif
797 /* Looking for a free block with nearest size. */
798 if (!pBlock || pIter->Core.cbUser < pBlock->Core.cbUser)
799 pBlock = pIter;
800
801 if (cLeft > 0)
802 { /* likely */ }
803 else
804 break;
805 }
806 }
807 }
808
809 if (!pBlock)
810 {
811 /* No free blocks, allocate a new chunk, the only free block of the
812 chunk will be returned. */
813 pBlock = vbglPhysHeapChunkAlloc(cb);
814 }
815
816 if (pBlock)
817 {
818 /* We have a free block, either found or allocated. */
819 AssertMsg(pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
820 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->Core.u32Signature));
821 AssertMsg(!pBlock->Core.fAllocated, ("pBlock = %p\n", pBlock));
822
823 /*
824 * If the block is too large, split off a free block with the unused space.
825 *
826 * We do this before unlinking the block so we can preserve the location
827 * in the free list.
828 *
829 * Note! We cannot split off and return the tail end here, because that may
830 * violate the same page requirements for requests smaller than 3/4 page.
831 */
832 AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(*pBlock) - sizeof(pBlock->Core));
833 if (pBlock->Core.cbUser >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cb)
834 {
835 pIter = (VBGLPHYSHEAPFREEBLOCK *)((uintptr_t)(&pBlock->Core + 1) + cb);
836 vbglPhysHeapInitFreeBlock(pIter, pBlock->Core.pChunk, pBlock->Core.cbUser - cb - sizeof(VBGLPHYSHEAPBLOCK));
837
838 pBlock->Core.cbUser = cb;
839
840 /* Insert the new 'pIter' block after the 'pBlock' in the block list
841 and in the free list. */
842 vbglPhysHeapInsertBlockAfter(&pIter->Core, &pBlock->Core);
843 vbglPhysHeapInsertFreeBlockAfter(pIter, pBlock);
844 }
845
846 /*
847 * Unlink the block from the free list and mark it as allocated.
848 */
849 vbglPhysHeapUnlinkFreeBlock(pBlock);
850 pBlock->Core.fAllocated = true;
851
852 dumpheap("post alloc");
853
854 /*
855 * Return success.
856 */
857 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
858
859 VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->Core.cbUser));
860 return &pBlock->Core + 1;
861 }
862
863 /*
864 * Return failure.
865 */
866 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
867 AssertRC(rc);
868
869 VBGL_PH_DPRINTF(("VbglR0PhysHeapAlloc: returns NULL (requested %#x bytes)\n", cb));
870 return NULL;
871}
872
873
874DECLR0VBGL(RTCCPHYS) VbglR0PhysHeapGetPhysAddr(void *pv)
875{
876 /*
877 * Validate the incoming pointer.
878 */
879 if (pv != NULL)
880 {
881 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
882 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
883 && pBlock->fAllocated)
884 {
885 /*
886 * Calculate and return its physical address.
887 */
888 VBGLPHYSHEAPCHUNK *pChunk = pBlock->pChunk;
889 return pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pChunk);
890 }
891
892 AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n",
893 pv, pBlock, pBlock->u32Signature, pBlock->cbUser, pBlock->fAllocated));
894 }
895 else
896 AssertMsgFailed(("Unexpected NULL pointer\n"));
897 return 0;
898}
899
900
901DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
902{
903 if (pv != NULL)
904 {
905 VBGLPHYSHEAPFREEBLOCK *pBlock;
906
907 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
908 AssertRCReturnVoid(rc);
909
910 dumpheap("pre free");
911
912 /*
913 * Validate the block header.
914 */
915 pBlock = (VBGLPHYSHEAPFREEBLOCK *)((VBGLPHYSHEAPBLOCK *)pv - 1);
916 if ( pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE
917 && pBlock->Core.fAllocated
918 && pBlock->Core.cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE)
919 {
920 VBGLPHYSHEAPCHUNK *pChunk;
921 VBGLPHYSHEAPBLOCK *pNeighbour;
922
923 /*
924 * Change the block status to freeed.
925 */
926 VBGL_PH_DPRINTF(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->Core.cbUser));
927
928 pBlock->Core.fAllocated = false;
929 pBlock->pNextFree = pBlock->pPrevFree = NULL;
930 vbglPhysHeapInsertFreeBlock(pBlock);
931
932 dumpheap("post insert");
933
934 /*
935 * Check if the block after this one is also free and we can merge it into this one.
936 */
937 pChunk = pBlock->Core.pChunk;
938
939 pNeighbour = pBlock->Core.pNext;
940 if ( pNeighbour
941 && !pNeighbour->fAllocated
942 && pNeighbour->pChunk == pChunk)
943 {
944 Assert((uintptr_t)pBlock + sizeof(pBlock->Core) + pBlock->Core.cbUser == (uintptr_t)pNeighbour);
945
946 /* Adjust size of current memory block */
947 pBlock->Core.cbUser += pNeighbour->cbUser + sizeof(VBGLPHYSHEAPBLOCK);
948
949 /* Unlink the following node and invalid it. */
950 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pNeighbour);
951 vbglPhysHeapUnlinkBlock(pNeighbour);
952
953 pNeighbour->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
954 pNeighbour->cbUser = UINT32_MAX / 4;
955
956 dumpheap("post merge after");
957 }
958
959 /*
960 * Same check for the block before us. This invalidates pBlock.
961 */
962 pNeighbour = pBlock->Core.pPrev;
963 if ( pNeighbour
964 && !pNeighbour->fAllocated
965 && pNeighbour->pChunk == pChunk)
966 {
967 Assert((uintptr_t)pNeighbour + sizeof(*pNeighbour) + pNeighbour->cbUser == (uintptr_t)pBlock);
968
969 /* Adjust size of the block before us */
970 pNeighbour->cbUser += pBlock->Core.cbUser + sizeof(VBGLPHYSHEAPBLOCK);
971
972 /* Unlink this node and invalid it. */
973 vbglPhysHeapUnlinkFreeBlock(pBlock);
974 vbglPhysHeapUnlinkBlock(&pBlock->Core);
975
976 pBlock->Core.u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
977 pBlock->Core.cbUser = UINT32_MAX / 8;
978
979 pBlock = NULL; /* invalid */
980
981 dumpheap("post merge before");
982 }
983
984 /*
985 * If this chunk is now completely unused, delete it if there are
986 * more completely free ones.
987 */
988 if ( pChunk->cFreeBlocks == pChunk->cBlocks
989 && (pChunk->pPrev || pChunk->pNext))
990 {
991 VBGLPHYSHEAPCHUNK *pCurChunk;
992 uint32_t cUnusedChunks = 0;
993 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
994 {
995 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
996 if (pCurChunk->cFreeBlocks == pCurChunk->cBlocks)
997 {
998 cUnusedChunks++;
999 if (cUnusedChunks > 1)
1000 {
1001 /* Delete current chunk, it will also unlink all free blocks
1002 * remaining in the chunk from the free list, so the pBlock
1003 * will also be invalid after this.
1004 */
1005 vbglPhysHeapChunkDelete(pChunk);
1006 pBlock = NULL; /* invalid */
1007 pChunk = NULL;
1008 pNeighbour = NULL;
1009 break;
1010 }
1011 }
1012 }
1013 }
1014
1015 dumpheap("post free");
1016 }
1017 else
1018 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
1019 pBlock, pBlock->Core.u32Signature, pBlock->Core.cbUser, pBlock->Core.fAllocated));
1020
1021 rc = RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1022 AssertRC(rc);
1023 }
1024}
1025
1026#ifdef IN_TESTCASE /* For the testcase only */
1027
1028/**
1029 * Returns the sum of all free heap blocks.
1030 *
1031 * This is the amount of memory you can theoretically allocate if you do
1032 * allocations exactly matching the free blocks.
1033 *
1034 * @returns The size of the free blocks.
1035 * @returns 0 if heap was safely detected as being bad.
1036 */
1037DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
1038{
1039 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
1040 AssertRCReturn(rc, 0);
1041
1042 size_t cbTotal = 0;
1043 for (VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead; pCurBlock; pCurBlock = pCurBlock->pNextFree)
1044 {
1045 Assert(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
1046 Assert(!pCurBlock->Core.fAllocated);
1047 cbTotal += pCurBlock->Core.cbUser;
1048 }
1049
1050 RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1051 return cbTotal;
1052}
1053
1054
1055/**
1056 * Checks the heap, caller responsible for locking.
1057 *
1058 * @returns VINF_SUCCESS if okay, error status if not.
1059 * @param pErrInfo Where to return more error details, optional.
1060 */
1061static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
1062{
1063 /*
1064 * Scan the blocks in each chunk, walking the block list in parallel.
1065 */
1066 const VBGLPHYSHEAPBLOCK *pPrevBlockListEntry = NULL;
1067 const VBGLPHYSHEAPBLOCK *pCurBlockListEntry = g_vbgldata.pBlockHead;
1068 unsigned acTotalBlocks[2] = { 0, 0 };
1069 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
1070 {
1071 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
1072 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x", pCurChunk, pCurChunk->u32Signature));
1073 AssertReturn(pCurChunk->pPrev == pPrevChunk,
1074 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1075 "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
1076
1077 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
1078 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbChunk;
1079 unsigned acBlocks[2] = { 0, 0 };
1080 while ((uintptr_t)pCurBlock < uEnd)
1081 {
1082 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
1083 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
1084 "pCurBlock=%p: magic=%#x", pCurBlock, pCurBlock->u32Signature));
1085 AssertReturn(pCurBlock->pChunk == pCurChunk,
1086 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1087 "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
1088 AssertReturn( pCurBlock->cbUser >= VBGL_PH_SMALLEST_ALLOC_SIZE
1089 && pCurBlock->cbUser <= VBGL_PH_LARGEST_ALLOC_SIZE
1090 && RT_ALIGN_32(pCurBlock->cbUser, VBGL_PH_ALLOC_ALIGN) == pCurBlock->cbUser,
1091 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
1092 "pCurBlock=%p: cbUser=%#x", pCurBlock, pCurBlock->cbUser));
1093 AssertReturn(pCurBlock == pCurBlockListEntry,
1094 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1095 "pCurChunk=%p: pCurBlock=%p, pCurBlockListEntry=%p\n",
1096 pCurChunk, pCurBlock, pCurBlockListEntry));
1097 AssertReturn(pCurBlock->pPrev == pPrevBlockListEntry,
1098 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
1099 "pCurChunk=%p: pCurBlock->pPrev=%p, pPrevBlockListEntry=%p\n",
1100 pCurChunk, pCurBlock->pPrev, pPrevBlockListEntry));
1101
1102 acBlocks[pCurBlock->fAllocated] += 1;
1103
1104 /* advance */
1105 pPrevBlockListEntry = pCurBlock;
1106 pCurBlockListEntry = pCurBlock->pNext;
1107 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbUser);
1108 }
1109 AssertReturn((uintptr_t)pCurBlock == uEnd,
1110 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1111 "pCurBlock=%p uEnd=%p", pCurBlock, uEnd));
1112
1113 acTotalBlocks[1] += acBlocks[1];
1114 AssertReturn(acBlocks[0] + acBlocks[1] == (uint32_t)pCurChunk->cBlocks,
1115 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
1116 "pCurChunk=%p: cBlocks=%u, expected %u",
1117 pCurChunk, pCurChunk->cBlocks, acBlocks[0] + acBlocks[1]));
1118
1119 acTotalBlocks[0] += acBlocks[0];
1120 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->cFreeBlocks,
1121 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
1122 "pCurChunk=%p: cFreeBlocks=%u, expected %u",
1123 pCurChunk, pCurChunk->cFreeBlocks, acBlocks[0]));
1124
1125 pPrevChunk = pCurChunk;
1126 }
1127
1128 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.cFreeBlocks,
1129 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
1130 "g_vbgldata.cFreeBlocks=%u, expected %u", g_vbgldata.cFreeBlocks, acTotalBlocks[0]));
1131 AssertReturn(acTotalBlocks[0] + acTotalBlocks[1] == (uint32_t)g_vbgldata.cBlocks,
1132 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
1133 "g_vbgldata.cBlocks=%u, expected %u", g_vbgldata.cBlocks, acTotalBlocks[0] + acTotalBlocks[1]));
1134
1135 /*
1136 * Check that the free list contains the same number of blocks as we
1137 * encountered during the above scan.
1138 */
1139 {
1140 unsigned cFreeListBlocks = 0;
1141 for (const VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead, *pPrevBlock = NULL;
1142 pCurBlock;
1143 pCurBlock = pCurBlock->pNextFree)
1144 {
1145 AssertReturn(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
1146 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
1147 "pCurBlock=%p/free: magic=%#x", pCurBlock, pCurBlock->Core.u32Signature));
1148 AssertReturn(pCurBlock->pPrevFree == pPrevBlock,
1149 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
1150 "pCurBlock=%p/free: pPrev=%p, expected %p", pCurBlock, pCurBlock->pPrevFree, pPrevBlock));
1151 AssertReturn(pCurBlock->Core.pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
1152 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/free: chunk (%p) magic=%#x",
1153 pCurBlock, pCurBlock->Core.pChunk, pCurBlock->Core.pChunk->u32Signature));
1154 cFreeListBlocks++;
1155 pPrevBlock = pCurBlock;
1156 }
1157
1158 AssertReturn(cFreeListBlocks == acTotalBlocks[0],
1159 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
1160 "Found %u in free list, expected %u", cFreeListBlocks, acTotalBlocks[0]));
1161 }
1162 return VINF_SUCCESS;
1163}
1164
1165
1166/**
1167 * Performs a heap check.
1168 *
1169 * @returns Problem description on failure, NULL on success.
1170 * @param pErrInfo Where to return more error details, optional.
1171 */
1172DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
1173{
1174 int rc = RTSemFastMutexRequest(g_vbgldata.hMtxHeap);
1175 AssertRCReturn(rc, 0);
1176
1177 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
1178
1179 RTSemFastMutexRelease(g_vbgldata.hMtxHeap);
1180 return rc;
1181}
1182
1183#endif /* IN_TESTCASE */
1184
1185DECLR0VBGL(int) VbglR0PhysHeapInit(bool fAlloc32BitAddr)
1186{
1187 g_vbgldata.fAlloc32BitAddr = fAlloc32BitAddr;
1188 g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
1189
1190 /* Allocate the first chunk of the heap. */
1191 VBGLPHYSHEAPFREEBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
1192 if (pBlock)
1193 return RTSemFastMutexCreate(&g_vbgldata.hMtxHeap);
1194 return VERR_NO_CONT_MEMORY;
1195}
1196
1197DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
1198{
1199 while (g_vbgldata.pChunkHead)
1200 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
1201
1202 RTSemFastMutexDestroy(g_vbgldata.hMtxHeap);
1203 g_vbgldata.hMtxHeap = NIL_RTSEMFASTMUTEX;
1204}
1205
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette