VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 99775

最後變更 在這個檔案從99775是 99775,由 vboxsync 提交於 23 月 前

*: Mark functions as static if not used outside of a given compilation unit. Enables the compiler to optimize inlining, reduces the symbol tables, exposes unused functions and in some rare cases exposes mismtaches between function declarations and definitions, but most importantly reduces the number of parfait reports for the extern-function-no-forward-declaration category. This should not result in any functional changes, bugref:3409

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 25.3 KB
 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 99775 2023-05-12 12:21:58Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/mem.h>
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/avl.h>
47#include <iprt/critsect.h>
48#include <iprt/errcore.h>
49#include <iprt/once.h>
50#include <iprt/param.h>
51#include <iprt/string.h>
52#include "internal/mem.h"
53#include "../alloc-ef.h"
54
55#include <stdlib.h>
56#include <errno.h>
57#include <sys/mman.h>
58#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
59# define MAP_ANONYMOUS MAP_ANON
60#endif
61
62
63/*********************************************************************************************************************************
64* Defined Constants And Macros *
65*********************************************************************************************************************************/
66/** Threshold at which to we switch to simply calling mmap. */
67#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _128K
68/** The size of a heap block (power of two) - in bytes. */
69#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
70AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
71/** The number of pages per heap block. */
72#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
73
74
75/*********************************************************************************************************************************
76* Structures and Typedefs *
77*********************************************************************************************************************************/
78/** Pointer to a page heap block. */
79typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
80
81/**
82 * A simple page heap.
83 */
84typedef struct RTHEAPPAGE
85{
86 /** Magic number (RTHEAPPAGE_MAGIC). */
87 uint32_t u32Magic;
88 /** The number of pages in the heap (in BlockTree). */
89 uint32_t cHeapPages;
90 /** The number of currently free pages. */
91 uint32_t cFreePages;
92 /** Number of successful calls. */
93 uint32_t cAllocCalls;
94 /** Number of successful free calls. */
95 uint32_t cFreeCalls;
96 /** The free call number at which we last tried to minimize the heap. */
97 uint32_t uLastMinimizeCall;
98 /** Tree of heap blocks. */
99 AVLRPVTREE BlockTree;
100 /** Allocation hint no 1 (last freed). */
101 PRTHEAPPAGEBLOCK pHint1;
102 /** Allocation hint no 2 (last alloc). */
103 PRTHEAPPAGEBLOCK pHint2;
104 /** Critical section protecting the heap. */
105 RTCRITSECT CritSect;
106 /** Set if the memory must allocated with execute access. */
107 bool fExec;
108} RTHEAPPAGE;
109#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
110/** Pointer to a page heap. */
111typedef RTHEAPPAGE *PRTHEAPPAGE;
112
113
114/**
115 * Describes a page heap block.
116 */
117typedef struct RTHEAPPAGEBLOCK
118{
119 /** The AVL tree node core (void pointer range). */
120 AVLRPVNODECORE Core;
121 /** Allocation bitmap. Set bits marks allocated pages. */
122 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
123 /** Allocation boundrary bitmap. Set bits marks the start of
124 * allocations. */
125 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
126 /** The number of free pages. */
127 uint32_t cFreePages;
128 /** Pointer back to the heap. */
129 PRTHEAPPAGE pHeap;
130} RTHEAPPAGEBLOCK;
131
132
133/**
134 * Argument package for rtHeapPageAllocCallback.
135 */
136typedef struct RTHEAPPAGEALLOCARGS
137{
138 /** The number of pages to allocate. */
139 size_t cPages;
140 /** Non-null on success. */
141 void *pvAlloc;
142 /** RTMEMPAGEALLOC_F_XXX. */
143 uint32_t fFlags;
144} RTHEAPPAGEALLOCARGS;
145
146
147/*********************************************************************************************************************************
148* Global Variables *
149*********************************************************************************************************************************/
150/** Initialize once structure. */
151static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
152/** The page heap. */
153static RTHEAPPAGE g_MemPagePosixHeap;
154/** The exec page heap. */
155static RTHEAPPAGE g_MemExecPosixHeap;
156
157
158#ifdef RT_OS_OS2
159/*
160 * A quick mmap/munmap mockup for avoid duplicating lots of good code.
161 */
162# define INCL_BASE
163# include <os2.h>
164# undef MAP_PRIVATE
165# define MAP_PRIVATE 0
166# undef MAP_ANONYMOUS
167# define MAP_ANONYMOUS 0
168# undef MAP_FAILED
169# define MAP_FAILED (void *)-1
170# undef mmap
171# define mmap iprt_mmap
172# undef munmap
173# define munmap iprt_munmap
174
175static void *mmap(void *pvWhere, size_t cb, int fProt, int fFlags, int fd, off_t off)
176{
177 NOREF(pvWhere); NOREF(fd); NOREF(off);
178 void *pv = NULL;
179 ULONG fAlloc = OBJ_ANY | PAG_COMMIT;
180 if (fProt & PROT_EXEC)
181 fAlloc |= PAG_EXECUTE;
182 if (fProt & PROT_READ)
183 fAlloc |= PAG_READ;
184 if (fProt & PROT_WRITE)
185 fAlloc |= PAG_WRITE;
186 APIRET rc = DosAllocMem(&pv, cb, fAlloc);
187 if (rc == NO_ERROR)
188 return pv;
189 errno = ENOMEM;
190 return MAP_FAILED;
191}
192
193static int munmap(void *pv, size_t cb)
194{
195 APIRET rc = DosFreeMem(pv);
196 if (rc == NO_ERROR)
197 return 0;
198 errno = EINVAL;
199 return -1;
200}
201
202#endif
203
204/**
205 * Initializes the heap.
206 *
207 * @returns IPRT status code.
208 * @param pHeap The page heap to initialize.
209 * @param fExec Whether the heap memory should be marked as
210 * executable or not.
211 */
212static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
213{
214 int rc = RTCritSectInitEx(&pHeap->CritSect,
215 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
216 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
217 if (RT_SUCCESS(rc))
218 {
219 pHeap->cHeapPages = 0;
220 pHeap->cFreePages = 0;
221 pHeap->cAllocCalls = 0;
222 pHeap->cFreeCalls = 0;
223 pHeap->uLastMinimizeCall = 0;
224 pHeap->BlockTree = NULL;
225 pHeap->fExec = fExec;
226 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
227 }
228 return rc;
229}
230
231
232/**
233 * Deletes the heap and all the memory it tracks.
234 *
235 * @returns IPRT status code.
236 * @param pHeap The page heap to delete.
237 */
238static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
239{
240 NOREF(pHeap);
241 return VERR_NOT_IMPLEMENTED;
242}
243
244
245/**
246 * Applies flags to an allocation.
247 *
248 * @param pv The allocation.
249 * @param cb The size of the allocation (page aligned).
250 * @param fFlags RTMEMPAGEALLOC_F_XXX.
251 */
252DECLINLINE(void) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
253{
254#ifndef RT_OS_OS2
255 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
256 {
257 int rc = mlock(pv, cb);
258# ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
259 AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
260# endif
261 NOREF(rc);
262 }
263
264# ifdef MADV_DONTDUMP
265 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
266 {
267 int rc = madvise(pv, cb, MADV_DONTDUMP);
268 AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
269 NOREF(rc);
270 }
271# endif
272#endif
273
274 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
275 RT_BZERO(pv, cb);
276}
277
278
279/**
280 * Avoids some gotos in rtHeapPageAllocFromBlock.
281 *
282 * @returns VINF_SUCCESS.
283 * @param pBlock The block.
284 * @param iPage The page to start allocating at.
285 * @param cPages The number of pages.
286 * @param fFlags RTMEMPAGEALLOC_F_XXX.
287 * @param ppv Where to return the allocation address.
288 */
289DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
290{
291 PRTHEAPPAGE pHeap = pBlock->pHeap;
292
293 ASMBitSet(&pBlock->bmFirst[0], iPage);
294 pBlock->cFreePages -= cPages;
295 pHeap->cFreePages -= cPages;
296 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
297 pHeap->pHint2 = pBlock;
298 pHeap->cAllocCalls++;
299
300 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
301 *ppv = pv;
302
303 if (fFlags)
304 rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
305
306 return VINF_SUCCESS;
307}
308
309
310/**
311 * Checks if a page range is free in the specified block.
312 *
313 * @returns @c true if the range is free, @c false if not.
314 * @param pBlock The block.
315 * @param iFirst The first page to check.
316 * @param cPages The number of pages to check.
317 */
318DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
319{
320 uint32_t i = iFirst + cPages;
321 while (i-- > iFirst)
322 {
323 if (ASMBitTest(&pBlock->bmAlloc[0], i))
324 return false;
325 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
326 }
327 return true;
328}
329
330
331/**
332 * Tries to allocate a chunk of pages from a heap block.
333 *
334 * @retval VINF_SUCCESS on success.
335 * @retval VERR_NO_MEMORY if the allocation failed.
336 * @param pBlock The block to allocate from.
337 * @param cPages The size of the allocation.
338 * @param fFlags RTMEMPAGEALLOC_F_XXX.
339 * @param ppv Where to return the allocation address on success.
340 */
341DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
342{
343 if (pBlock->cFreePages >= cPages)
344 {
345 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
346 Assert(iPage >= 0);
347
348 /* special case: single page. */
349 if (cPages == 1)
350 {
351 ASMBitSet(&pBlock->bmAlloc[0], iPage);
352 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
353 }
354
355 while ( iPage >= 0
356 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
357 {
358 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
359 {
360 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
361 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
362 }
363
364 /* next */
365 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
366 if (iPage < 0 || iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
367 break;
368 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
369 }
370 }
371
372 return VERR_NO_MEMORY;
373}
374
375
376/**
377 * RTAvlrPVDoWithAll callback.
378 *
379 * @returns 0 to continue the enum, non-zero to quit it.
380 * @param pNode The node.
381 * @param pvUser The user argument.
382 */
383static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
384{
385 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
386 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
387 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
388 return RT_SUCCESS(rc) ? 1 : 0;
389}
390
391
392/**
393 * Worker for RTHeapPageAlloc.
394 *
395 * @returns IPRT status code
396 * @param pHeap The heap - locked.
397 * @param cPages The page count.
398 * @param pszTag The tag.
399 * @param fFlags RTMEMPAGEALLOC_F_XXX.
400 * @param ppv Where to return the address of the allocation
401 * on success.
402 */
403static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
404{
405 int rc;
406 NOREF(pszTag);
407
408 /*
409 * Use the hints first.
410 */
411 if (pHeap->pHint1)
412 {
413 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
414 if (rc != VERR_NO_MEMORY)
415 return rc;
416 }
417 if (pHeap->pHint2)
418 {
419 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
420 if (rc != VERR_NO_MEMORY)
421 return rc;
422 }
423
424 /*
425 * Search the heap for a block with enough free space.
426 *
427 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
428 * it are the two hints above.
429 */
430 if (pHeap->cFreePages >= cPages)
431 {
432 RTHEAPPAGEALLOCARGS Args;
433 Args.cPages = cPages;
434 Args.pvAlloc = NULL;
435 Args.fFlags = fFlags;
436 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
437 if (Args.pvAlloc)
438 {
439 *ppv = Args.pvAlloc;
440 return VINF_SUCCESS;
441 }
442 }
443
444 /*
445 * Didn't find anytyhing, so expand the heap with a new block.
446 */
447 RTCritSectLeave(&pHeap->CritSect);
448 void *pvPages;
449 pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
450 PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
451 MAP_PRIVATE | MAP_ANONYMOUS,
452 -1, 0);
453 if (pvPages == MAP_FAILED)
454 {
455 RTCritSectEnter(&pHeap->CritSect);
456 return RTErrConvertFromErrno(errno);
457
458 }
459 /** @todo Eliminate this rtMemBaseAlloc dependency! */
460 PRTHEAPPAGEBLOCK pBlock;
461#ifdef RTALLOC_REPLACE_MALLOC
462 if (g_pfnOrgMalloc)
463 pBlock = (PRTHEAPPAGEBLOCK)g_pfnOrgMalloc(sizeof(*pBlock));
464 else
465#endif
466 pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock));
467 if (!pBlock)
468 {
469 munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
470 RTCritSectEnter(&pHeap->CritSect);
471 return VERR_NO_MEMORY;
472 }
473
474 RT_ZERO(*pBlock);
475 pBlock->Core.Key = pvPages;
476 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
477 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
478 pBlock->pHeap = pHeap;
479
480 RTCritSectEnter(&pHeap->CritSect);
481
482 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
483 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
484 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
485
486 /*
487 * Grab memory from the new block (cannot fail).
488 */
489 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
490 Assert(rc == VINF_SUCCESS);
491
492 return rc;
493}
494
495
496/**
497 * Allocates one or more pages off the heap.
498 *
499 * @returns IPRT status code.
500 * @param pHeap The page heap.
501 * @param cPages The number of pages to allocate.
502 * @param pszTag The allocation tag.
503 * @param fFlags RTMEMPAGEALLOC_F_XXX.
504 * @param ppv Where to return the pointer to the pages.
505 */
506static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
507{
508 /*
509 * Validate input.
510 */
511 AssertPtr(ppv);
512 *ppv = NULL;
513 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
514 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
515 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
516
517 /*
518 * Grab the lock and call a worker with many returns.
519 */
520 int rc = RTCritSectEnter(&pHeap->CritSect);
521 if (RT_SUCCESS(rc))
522 {
523 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
524 RTCritSectLeave(&pHeap->CritSect);
525 }
526
527 return rc;
528}
529
530
531/**
532 * RTAvlrPVDoWithAll callback.
533 *
534 * @returns 0 to continue the enum, non-zero to quit it.
535 * @param pNode The node.
536 * @param pvUser Pointer to a block pointer variable. For returning
537 * the address of the block to be freed.
538 */
539static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
540{
541 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
542 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
543 {
544 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
545 return 1;
546 }
547 return 0;
548}
549
550
551/**
552 * Allocates one or more pages off the heap.
553 *
554 * @returns IPRT status code.
555 * @param pHeap The page heap.
556 * @param pv Pointer to what RTHeapPageAlloc returned.
557 * @param cPages The number of pages that was allocated.
558 */
559static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
560{
561 /*
562 * Validate input.
563 */
564 if (!pv)
565 return VINF_SUCCESS;
566 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
567 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
568
569 /*
570 * Grab the lock and look up the page.
571 */
572 int rc = RTCritSectEnter(&pHeap->CritSect);
573 if (RT_SUCCESS(rc))
574 {
575 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
576 if (pBlock)
577 {
578 /*
579 * Validate the specified address range.
580 */
581 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
582 /* Check the range is within the block. */
583 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
584 /* Check that it's the start of an allocation. */
585 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
586 /* Check that the range ends at an allocation boundrary. */
587 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
588 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
589 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
590 /* Check the other pages. */
591 uint32_t const iLastPage = iPage + cPages - 1;
592 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
593 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
594 && !ASMBitTest(&pBlock->bmFirst[0], i);
595 if (fOk)
596 {
597 /*
598 * Free the memory.
599 */
600 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
601 ASMBitClear(&pBlock->bmFirst[0], iPage);
602 pBlock->cFreePages += cPages;
603 pHeap->cFreePages += cPages;
604 pHeap->cFreeCalls++;
605 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
606 pHeap->pHint1 = pBlock;
607
608 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
609
610 /*
611 * Shrink the heap. Not very efficient because of the AVL tree.
612 */
613 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
614 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
615 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
616 )
617 {
618 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
619 while (pHeap->cFreePages > cFreePageTarget)
620 {
621 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
622
623 pBlock = NULL;
624 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
625 rtHeapPageFindUnusedBlockCallback, &pBlock);
626 if (!pBlock)
627 break;
628
629 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
630 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
631 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
632 pHeap->pHint1 = NULL;
633 pHeap->pHint2 = NULL;
634 RTCritSectLeave(&pHeap->CritSect);
635
636 munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
637 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
638 pBlock->cFreePages = 0;
639#ifdef RTALLOC_REPLACE_MALLOC
640 if (g_pfnOrgFree)
641 g_pfnOrgFree(pBlock);
642 else
643#endif
644 rtMemBaseFree(pBlock);
645
646 RTCritSectEnter(&pHeap->CritSect);
647 }
648 }
649 }
650 else
651 rc = VERR_INVALID_POINTER;
652 }
653 else
654 rc = VERR_INVALID_POINTER;
655
656 RTCritSectLeave(&pHeap->CritSect);
657 }
658
659 return rc;
660}
661
662
663/**
664 * Initializes the heap.
665 *
666 * @returns IPRT status code
667 * @param pvUser Unused.
668 */
669static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser)
670{
671 NOREF(pvUser);
672 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
673 if (RT_SUCCESS(rc))
674 {
675 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
676 if (RT_SUCCESS(rc))
677 return rc;
678 RTHeapPageDelete(&g_MemPagePosixHeap);
679 }
680 return rc;
681}
682
683
684/**
685 * Allocates memory from the specified heap.
686 *
687 * @returns Address of the allocated memory.
688 * @param cb The number of bytes to allocate.
689 * @param pszTag The tag.
690 * @param fFlags RTMEMPAGEALLOC_F_XXX.
691 * @param pHeap The heap to use.
692 */
693static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
694{
695 /*
696 * Validate & adjust the input.
697 */
698 Assert(cb > 0);
699 NOREF(pszTag);
700 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
701
702 /*
703 * If the allocation is relatively large, we use mmap/munmap directly.
704 */
705 void *pv;
706 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
707 {
708
709 pv = mmap(NULL, cb,
710 PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
711 MAP_PRIVATE | MAP_ANONYMOUS,
712 -1, 0);
713 if (pv != MAP_FAILED)
714 {
715 AssertPtr(pv);
716
717 if (fFlags)
718 rtMemPagePosixApplyFlags(pv, cb, fFlags);
719 }
720 else
721 pv = NULL;
722 }
723 else
724 {
725 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL);
726 if (RT_SUCCESS(rc))
727 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
728 if (RT_FAILURE(rc))
729 pv = NULL;
730 }
731
732 return pv;
733}
734
735
736/**
737 * Free memory allocated by rtMemPagePosixAlloc.
738 *
739 * @param pv The address of the memory to free.
740 * @param cb The size.
741 * @param pHeap The heap.
742 */
743static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap)
744{
745 /*
746 * Validate & adjust the input.
747 */
748 if (!pv)
749 return;
750 AssertPtr(pv);
751 Assert(cb > 0);
752 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
753 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
754
755 /*
756 * If the allocation is relatively large, we use mmap/munmap directly.
757 */
758 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
759 {
760 int rc = munmap(pv, cb);
761 AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
762 }
763 else
764 {
765 int rc = RTHeapPageFree(pHeap, pv, cb >> PAGE_SHIFT);
766 AssertRC(rc);
767 }
768}
769
770
771
772
773
774RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
775{
776 return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemPagePosixHeap);
777}
778
779
780RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
781{
782 return rtMemPagePosixAlloc(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPagePosixHeap);
783}
784
785
786RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
787{
788 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
789 return rtMemPagePosixAlloc(cb, pszTag, fFlags, &g_MemPagePosixHeap);
790}
791
792
793RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
794{
795 return rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap);
796}
797
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette