VirtualBox

source: vbox/trunk/include/VBox/gmm.h@ 27102

最後變更 在這個檔案從27102是 27102,由 vboxsync 提交於 15 年 前

Reset the memory balloon during VM reset

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 16.6 KB
 
1/** @file
2 * GMM - The Global Memory Manager. (VMM)
3 */
4
5/*
6 * Copyright (C) 2007 Sun Microsystems, Inc.
7 *
8 * This file is part of VirtualBox Open Source Edition (OSE), as
9 * available from http://www.alldomusa.eu.org. This file is free software;
10 * you can redistribute it and/or modify it under the terms of the GNU
11 * General Public License (GPL) as published by the Free Software
12 * Foundation, in version 2 as it comes in the "COPYING" file of the
13 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
14 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
15 *
16 * The contents of this file may alternatively be used under the terms
17 * of the Common Development and Distribution License Version 1.0
18 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
19 * VirtualBox OSE distribution, in which case the provisions of the
20 * CDDL are applicable instead of those of the GPL.
21 *
22 * You may elect to license modified versions of this file under the
23 * terms and conditions of either the GPL or the CDDL or both.
24 *
25 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
26 * Clara, CA 95054 USA or visit http://www.sun.com if you need
27 * additional information or have any questions.
28 */
29
30#ifndef ___VBox_gmm_h
31#define ___VBox_gmm_h
32
33#include <VBox/types.h>
34#include <VBox/gvmm.h>
35#include <VBox/sup.h>
36
37RT_C_DECLS_BEGIN
38
39/** @defgroup grp_gmm GMM - The Global Memory Manager
40 * @{
41 */
42
43/** @def IN_GMM_R0
44 * Used to indicate whether we're inside the same link module as the ring 0
45 * part of the Global Memory Manager or not.
46 */
47#ifdef DOXYGEN_RUNNING
48# define IN_GMM_R0
49#endif
50/** @def GMMR0DECL
51 * Ring 0 GMM export or import declaration.
52 * @param type The return type of the function declaration.
53 */
54#ifdef IN_GMM_R0
55# define GMMR0DECL(type) DECLEXPORT(type) VBOXCALL
56#else
57# define GMMR0DECL(type) DECLIMPORT(type) VBOXCALL
58#endif
59
60/** @def IN_GMM_R3
61 * Used to indicate whether we're inside the same link module as the ring 3
62 * part of the Global Memory Manager or not.
63 */
64#ifdef DOXYGEN_RUNNING
65# define IN_GMM_R3
66#endif
67/** @def GMMR3DECL
68 * Ring 3 GMM export or import declaration.
69 * @param type The return type of the function declaration.
70 */
71#ifdef IN_GMM_R3
72# define GMMR3DECL(type) DECLEXPORT(type) VBOXCALL
73#else
74# define GMMR3DECL(type) DECLIMPORT(type) VBOXCALL
75#endif
76
77
78/** The chunk shift. (2^21 = 2 MB) */
79#define GMM_CHUNK_SHIFT 21
80/** The allocation chunk size. */
81#define GMM_CHUNK_SIZE (1U << GMM_CHUNK_SHIFT)
82/** The allocation chunk size in pages. */
83#define GMM_CHUNK_NUM_PAGES (1U << (GMM_CHUNK_SHIFT - PAGE_SHIFT))
84/** The shift factor for converting a page id into a chunk id. */
85#define GMM_CHUNKID_SHIFT (GMM_CHUNK_SHIFT - PAGE_SHIFT)
86/** The last valid Chunk ID value. */
87#define GMM_CHUNKID_LAST (GMM_PAGEID_LAST >> GMM_CHUNKID_SHIFT)
88/** The last valid Page ID value.
89 * The current limit is 2^28 - 1, or almost 1TB if you like.
90 * The constraints are currently dictated by PGMPAGE. */
91#define GMM_PAGEID_LAST (RT_BIT_32(28) - 1)
92/** Mask out the page index from the Page ID. */
93#define GMM_PAGEID_IDX_MASK ((1U << GMM_CHUNKID_SHIFT) - 1)
94/** The NIL Chunk ID value. */
95#define NIL_GMM_CHUNKID 0
96/** The NIL Page ID value. */
97#define NIL_GMM_PAGEID 0
98
99#if 0 /* wrong - these are guest page pfns and not page ids! */
100/** Special Page ID used by unassigned pages. */
101#define GMM_PAGEID_UNASSIGNED 0x0fffffffU
102/** Special Page ID used by unsharable pages.
103 * Like MMIO2, shadow and heap. This is for later, obviously. */
104#define GMM_PAGEID_UNSHARABLE 0x0ffffffeU
105/** The end of the valid Page IDs. This is the first special one. */
106#define GMM_PAGEID_END 0x0ffffff0U
107#endif
108
109
110/** @def GMM_GCPHYS_LAST
111 * The last of the valid guest physical address as it applies to GMM pages.
112 *
113 * This must reflect the constraints imposed by the RTGCPHYS type and
114 * the guest page frame number used internally in GMMPAGE.
115 *
116 * @note Note this corresponds to GMM_PAGE_PFN_LAST. */
117#if HC_ARCH_BITS == 64
118# define GMM_GCPHYS_LAST UINT64_C(0x00000fffffff0000) /* 2^44 (16TB) - 0x10000 */
119#else
120# define GMM_GCPHYS_LAST UINT64_C(0x0000000fffff0000) /* 2^36 (64GB) - 0x10000 */
121#endif
122
123/**
124 * Over-commitment policy.
125 */
126typedef enum GMMOCPOLICY
127{
128 /** The usual invalid 0 value. */
129 GMMOCPOLICY_INVALID = 0,
130 /** No over-commitment, fully backed.
131 * The GMM guarantees that it will be able to allocate all of the
132 * guest RAM for a VM with OC policy. */
133 GMMOCPOLICY_NO_OC,
134 /** to-be-determined. */
135 GMMOCPOLICY_TBD,
136 /** The end of the valid policy range. */
137 GMMOCPOLICY_END,
138 /** The usual 32-bit hack. */
139 GMMOCPOLICY_32BIT_HACK = 0x7fffffff
140} GMMOCPOLICY;
141
142/**
143 * VM / Memory priority.
144 */
145typedef enum GMMPRIORITY
146{
147 /** The usual invalid 0 value. */
148 GMMPRIORITY_INVALID = 0,
149 /** High.
150 * When ballooning, ask these VMs last.
151 * When running out of memory, try not to interrupt these VMs. */
152 GMMPRIORITY_HIGH,
153 /** Normal.
154 * When ballooning, don't wait to ask these.
155 * When running out of memory, pause, save and/or kill these VMs. */
156 GMMPRIORITY_NORMAL,
157 /** Low.
158 * When ballooning, maximize these first.
159 * When running out of memory, save or kill these VMs. */
160 GMMPRIORITY_LOW,
161 /** The end of the valid priority range. */
162 GMMPRIORITY_END,
163 /** The custom 32-bit type blowup. */
164 GMMPRIORITY_32BIT_HACK = 0x7fffffff
165} GMMPRIORITY;
166
167
168/**
169 * GMM Memory Accounts.
170 */
171typedef enum GMMACCOUNT
172{
173 /** The customary invalid zero entry. */
174 GMMACCOUNT_INVALID = 0,
175 /** Account with the base allocations. */
176 GMMACCOUNT_BASE,
177 /** Account with the shadow allocations. */
178 GMMACCOUNT_SHADOW,
179 /** Account with the fixed allocations. */
180 GMMACCOUNT_FIXED,
181 /** The end of the valid values. */
182 GMMACCOUNT_END,
183 /** The usual 32-bit value to finish it off. */
184 GMMACCOUNT_32BIT_HACK = 0x7fffffff
185} GMMACCOUNT;
186
187/**
188 * Balloon action enum.
189 */
190typedef enum
191{
192 GMMBALLOONACTION_INVALID = 0,
193 GMMBALLOONACTION_INFLATE = 1,
194 GMMBALLOONACTION_DEFLATE = 2,
195 GMMBALLOONACTION_RESET = 3,
196 /** hack forcing the size of the enum to 32-bits. */
197 GMMBALLOONACTION_MAKE_32BIT_HACK = 0x7fffffff
198} GMMBALLOONACTION;
199
200/**
201 * A page descriptor for use when freeing pages.
202 * See GMMR0FreePages, GMMR0BalloonedPages.
203 */
204typedef struct GMMFREEPAGEDESC
205{
206 /** The Page ID of the page to be freed. */
207 uint32_t idPage;
208} GMMFREEPAGEDESC;
209/** Pointer to a page descriptor for freeing pages. */
210typedef GMMFREEPAGEDESC *PGMMFREEPAGEDESC;
211
212
213/**
214 * A page descriptor for use when updating and allocating pages.
215 *
216 * This is a bit complicated because we want to do as much as possible
217 * with the same structure.
218 */
219typedef struct GMMPAGEDESC
220{
221 /** The physical address of the page.
222 *
223 * @input GMMR0AllocateHandyPages expects the guest physical address
224 * to update the GMMPAGE structure with. Pass GMM_GCPHYS_UNSHAREABLE
225 * when appropriate and NIL_RTHCPHYS when the page wasn't used
226 * for any specific guest address.
227 *
228 * GMMR0AllocatePage expects the guest physical address to put in
229 * the GMMPAGE structure for the page it allocates for this entry.
230 * Pass NIL_RTHCPHYS and GMM_GCPHYS_UNSHAREABLE as above.
231 *
232 * @output The host physical address of the allocated page.
233 * NIL_RTHCPHYS on allocation failure.
234 *
235 * ASSUMES: sizeof(RTHCPHYS) >= sizeof(RTGCPHYS).
236 */
237 RTHCPHYS HCPhysGCPhys;
238
239 /** The Page ID.
240 *
241 * @intput GMMR0AllocateHandyPages expects the Page ID of the page to
242 * update here. NIL_GMM_PAGEID means no page should be updated.
243 *
244 * GMMR0AllocatePages requires this to be initialized to
245 * NIL_GMM_PAGEID currently.
246 *
247 * @output The ID of the page, NIL_GMM_PAGEID if the allocation failed.
248 */
249 uint32_t idPage;
250
251 /** The Page ID of the shared page was replaced by this page.
252 *
253 * @input GMMR0AllocateHandyPages expects this to indicate a shared
254 * page that has been replaced by this page and should have its
255 * reference counter decremented and perhaps be freed up. Use
256 * NIL_GMM_PAGEID if no shared page was involved.
257 *
258 * All other APIs expects NIL_GMM_PAGEID here.
259 *
260 * @output All APIs sets this to NIL_GMM_PAGEID.
261 */
262 uint32_t idSharedPage;
263} GMMPAGEDESC;
264AssertCompileSize(GMMPAGEDESC, 16);
265/** Pointer to a page allocation. */
266typedef GMMPAGEDESC *PGMMPAGEDESC;
267
268/** GMMPAGEDESC::HCPhysGCPhys value that indicates that the page is unsharable.
269 * @note This corresponds to GMM_PAGE_PFN_UNSHAREABLE. */
270#if HC_ARCH_BITS == 64
271# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x00000fffffff1000)
272#else
273# define GMM_GCPHYS_UNSHAREABLE UINT64_C(0x0000000fffff1000)
274#endif
275
276GMMR0DECL(int) GMMR0Init(void);
277GMMR0DECL(void) GMMR0Term(void);
278GMMR0DECL(void) GMMR0InitPerVMData(PGVM pGVM);
279GMMR0DECL(void) GMMR0CleanupVM(PGVM pGVM);
280GMMR0DECL(int) GMMR0InitialReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
281 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
282GMMR0DECL(int) GMMR0UpdateReservation(PVM pVM, VMCPUID idCpu, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
283GMMR0DECL(int) GMMR0AllocateHandyPages(PVM pVM, VMCPUID idCpu, uint32_t cPagesToUpdate, uint32_t cPagesToAlloc, PGMMPAGEDESC paPages);
284GMMR0DECL(int) GMMR0AllocatePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMPAGEDESC paPages, GMMACCOUNT enmAccount);
285GMMR0DECL(int) GMMR0AllocateLargePage(PVM pVM, VMCPUID idCpu, uint32_t cbPage, uint32_t *pIdPage, RTHCPHYS *pHCPhys);
286GMMR0DECL(int) GMMR0FreePages(PVM pVM, VMCPUID idCpu, uint32_t cPages, PGMMFREEPAGEDESC paPages, GMMACCOUNT enmAccount);
287GMMR0DECL(int) GMMR0FreeLargePage(PVM pVM, VMCPUID idCpu, uint32_t idPage);
288GMMR0DECL(int) GMMR0BalloonedPages(PVM pVM, VMCPUID idCpu, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
289GMMR0DECL(int) GMMR0MapUnmapChunk(PVM pVM, VMCPUID idCpu, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
290GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3);
291
292
293
294/**
295 * Request buffer for GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION.
296 * @see GMMR0InitialReservation
297 */
298typedef struct GMMINITIALRESERVATIONREQ
299{
300 /** The header. */
301 SUPVMMR0REQHDR Hdr;
302 uint64_t cBasePages; /**< @see GMMR0InitialReservation */
303 uint32_t cShadowPages; /**< @see GMMR0InitialReservation */
304 uint32_t cFixedPages; /**< @see GMMR0InitialReservation */
305 GMMOCPOLICY enmPolicy; /**< @see GMMR0InitialReservation */
306 GMMPRIORITY enmPriority; /**< @see GMMR0InitialReservation */
307} GMMINITIALRESERVATIONREQ;
308/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
309typedef GMMINITIALRESERVATIONREQ *PGMMINITIALRESERVATIONREQ;
310
311GMMR0DECL(int) GMMR0InitialReservationReq(PVM pVM, VMCPUID idCpu, PGMMINITIALRESERVATIONREQ pReq);
312
313
314/**
315 * Request buffer for GMMR0UpdateReservationReq / VMMR0_DO_GMM_UPDATE_RESERVATION.
316 * @see GMMR0UpdateReservation
317 */
318typedef struct GMMUPDATERESERVATIONREQ
319{
320 /** The header. */
321 SUPVMMR0REQHDR Hdr;
322 uint64_t cBasePages; /**< @see GMMR0UpdateReservation */
323 uint32_t cShadowPages; /**< @see GMMR0UpdateReservation */
324 uint32_t cFixedPages; /**< @see GMMR0UpdateReservation */
325} GMMUPDATERESERVATIONREQ;
326/** Pointer to a GMMR0InitialReservationReq / VMMR0_DO_GMM_INITIAL_RESERVATION request buffer. */
327typedef GMMUPDATERESERVATIONREQ *PGMMUPDATERESERVATIONREQ;
328
329GMMR0DECL(int) GMMR0UpdateReservationReq(PVM pVM, VMCPUID idCpu, PGMMUPDATERESERVATIONREQ pReq);
330
331
332/**
333 * Request buffer for GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES.
334 * @see GMMR0AllocatePages.
335 */
336typedef struct GMMALLOCATEPAGESREQ
337{
338 /** The header. */
339 SUPVMMR0REQHDR Hdr;
340 /** The account to charge the allocation to. */
341 GMMACCOUNT enmAccount;
342 /** The number of pages to allocate. */
343 uint32_t cPages;
344 /** Array of page descriptors. */
345 GMMPAGEDESC aPages[1];
346} GMMALLOCATEPAGESREQ;
347/** Pointer to a GMMR0AllocatePagesReq / VMMR0_DO_GMM_ALLOCATE_PAGES request buffer. */
348typedef GMMALLOCATEPAGESREQ *PGMMALLOCATEPAGESREQ;
349
350GMMR0DECL(int) GMMR0AllocatePagesReq(PVM pVM, VMCPUID idCpu, PGMMALLOCATEPAGESREQ pReq);
351
352
353/**
354 * Request buffer for GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES.
355 * @see GMMR0FreePages.
356 */
357typedef struct GMMFREEPAGESREQ
358{
359 /** The header. */
360 SUPVMMR0REQHDR Hdr;
361 /** The account this relates to. */
362 GMMACCOUNT enmAccount;
363 /** The number of pages to free. */
364 uint32_t cPages;
365 /** Array of free page descriptors. */
366 GMMFREEPAGEDESC aPages[1];
367} GMMFREEPAGESREQ;
368/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
369typedef GMMFREEPAGESREQ *PGMMFREEPAGESREQ;
370
371GMMR0DECL(int) GMMR0FreePagesReq(PVM pVM, VMCPUID idCpu, PGMMFREEPAGESREQ pReq);
372
373/**
374 * Request buffer for GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES.
375 * @see GMMR0BalloonedPages.
376 */
377typedef struct GMMBALLOONEDPAGESREQ
378{
379 /** The header. */
380 SUPVMMR0REQHDR Hdr;
381 /** The number of ballooned pages. */
382 uint32_t cBalloonedPages;
383 /** Inflate or deflate the balloon. */
384 GMMBALLOONACTION enmAction;
385} GMMBALLOONEDPAGESREQ;
386/** Pointer to a GMMR0BalloonedPagesReq / VMMR0_DO_GMM_BALLOONED_PAGES request buffer. */
387typedef GMMBALLOONEDPAGESREQ *PGMMBALLOONEDPAGESREQ;
388
389GMMR0DECL(int) GMMR0BalloonedPagesReq(PVM pVM, VMCPUID idCpu, PGMMBALLOONEDPAGESREQ pReq);
390
391
392/**
393 * Request buffer for GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK.
394 * @see GMMR0MapUnmapChunk
395 */
396typedef struct GMMMAPUNMAPCHUNKREQ
397{
398 /** The header. */
399 SUPVMMR0REQHDR Hdr;
400 /** The chunk to map, NIL_GMM_CHUNKID if unmap only. (IN) */
401 uint32_t idChunkMap;
402 /** The chunk to unmap, NIL_GMM_CHUNKID if map only. (IN) */
403 uint32_t idChunkUnmap;
404 /** Where the mapping address is returned. (OUT) */
405 RTR3PTR pvR3;
406} GMMMAPUNMAPCHUNKREQ;
407/** Pointer to a GMMR0MapUnmapChunkReq / VMMR0_DO_GMM_MAP_UNMAP_CHUNK request buffer. */
408typedef GMMMAPUNMAPCHUNKREQ *PGMMMAPUNMAPCHUNKREQ;
409
410GMMR0DECL(int) GMMR0MapUnmapChunkReq(PVM pVM, VMCPUID idCpu, PGMMMAPUNMAPCHUNKREQ pReq);
411
412
413/**
414 * Request buffer for GMMR0FreeLargePageReq / VMMR0_DO_GMM_FREE_LARGE_PAGE.
415 * @see GMMR0FreeLargePage.
416 */
417typedef struct GMMFREELARGEPAGEREQ
418{
419 /** The header. */
420 SUPVMMR0REQHDR Hdr;
421 /** The Page ID. */
422 uint32_t idPage;
423} GMMFREELARGEPAGEREQ;
424/** Pointer to a GMMR0FreePagesReq / VMMR0_DO_GMM_FREE_PAGES request buffer. */
425typedef GMMFREELARGEPAGEREQ *PGMMFREELARGEPAGEREQ;
426
427GMMR0DECL(int) GMMR0FreeLargePageReq(PVM pVM, VMCPUID idCpu, PGMMFREELARGEPAGEREQ pReq);
428
429
430#ifdef IN_RING3
431/** @defgroup grp_gmm_r3 The Global Memory Manager Ring-3 API Wrappers
432 * @ingroup grp_gmm
433 * @{
434 */
435GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
436 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority);
437GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages);
438GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
439GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq);
440GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq);
441GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount);
442GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount);
443GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages);
444GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq);
445GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq);
446GMMR3DECL(int) GMMR3AllocateLargePage(PVM pVM, uint32_t cbPage);
447GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage);
448GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3);
449GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3);
450GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages);
451/** @} */
452#endif /* IN_RING3 */
453
454/** @} */
455
456RT_C_DECLS_END
457
458#endif
459
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette