VirtualBox

source: vbox/trunk/src/VBox/VMM/include/VMInternal.h@ 108386

最後變更 在這個檔案從108386是 108386,由 vboxsync 提交於 3 週 前

VMMR3/VMEmt.cpp,VMMR3/TM.cpp,VMMR3/NEMR3Native-win-armv8.cpp: Workaround for Windows/ARM hosts to allow for guests to use more than 1 vCPU, bugref:10392

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 18.5 KB
 
1/* $Id: VMInternal.h 108386 2025-02-26 09:14:34Z vboxsync $ */
2/** @file
3 * VM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_VMInternal_h
29#define VMM_INCLUDED_SRC_include_VMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/cdefs.h>
35#include <VBox/vmm/vmapi.h>
36#include <iprt/assert.h>
37#include <iprt/critsect.h>
38#include <iprt/setjmp-without-sigmask.h>
39
40
41
42/** @defgroup grp_vm_int Internals
43 * @ingroup grp_vm
44 * @internal
45 * @{
46 */
47
48
49/**
50 * VM state change callback.
51 */
52typedef struct VMATSTATE
53{
54 /** Pointer to the next one. */
55 struct VMATSTATE *pNext;
56 /** Pointer to the callback. */
57 PFNVMATSTATE pfnAtState;
58 /** The user argument. */
59 void *pvUser;
60} VMATSTATE;
61/** Pointer to a VM state change callback. */
62typedef VMATSTATE *PVMATSTATE;
63
64
65/**
66 * VM error callback.
67 */
68typedef struct VMATERROR
69{
70 /** Pointer to the next one. */
71 struct VMATERROR *pNext;
72 /** Pointer to the callback. */
73 PFNVMATERROR pfnAtError;
74 /** The user argument. */
75 void *pvUser;
76} VMATERROR;
77/** Pointer to a VM error callback. */
78typedef VMATERROR *PVMATERROR;
79
80
81/**
82 * Chunk of memory allocated off the hypervisor heap in which
83 * we copy the error details.
84 */
85typedef struct VMERROR
86{
87 /** The size of the chunk. */
88 uint32_t cbAllocated;
89 /** The current offset into the chunk.
90 * We start by putting the filename and function immediately
91 * after the end of the buffer. */
92 uint32_t off;
93 /** Offset from the start of this structure to the file name. */
94 uint32_t offFile;
95 /** The line number. */
96 uint32_t iLine;
97 /** Offset from the start of this structure to the function name. */
98 uint32_t offFunction;
99 /** Offset from the start of this structure to the formatted message text. */
100 uint32_t offMessage;
101 /** The VBox status code. */
102 int32_t rc;
103} VMERROR, *PVMERROR;
104
105
106/**
107 * VM runtime error callback.
108 */
109typedef struct VMATRUNTIMEERROR
110{
111 /** Pointer to the next one. */
112 struct VMATRUNTIMEERROR *pNext;
113 /** Pointer to the callback. */
114 PFNVMATRUNTIMEERROR pfnAtRuntimeError;
115 /** The user argument. */
116 void *pvUser;
117} VMATRUNTIMEERROR;
118/** Pointer to a VM error callback. */
119typedef VMATRUNTIMEERROR *PVMATRUNTIMEERROR;
120
121
122/**
123 * Chunk of memory allocated off the hypervisor heap in which
124 * we copy the runtime error details.
125 */
126typedef struct VMRUNTIMEERROR
127{
128 /** The size of the chunk. */
129 uint32_t cbAllocated;
130 /** The current offset into the chunk.
131 * We start by putting the error ID immediately
132 * after the end of the buffer. */
133 uint32_t off;
134 /** Offset from the start of this structure to the error ID. */
135 uint32_t offErrorId;
136 /** Offset from the start of this structure to the formatted message text. */
137 uint32_t offMessage;
138 /** Error flags. */
139 uint32_t fFlags;
140} VMRUNTIMEERROR, *PVMRUNTIMEERROR;
141
142/** The halt method. */
143typedef enum
144{
145 /** The usual invalid value. */
146 VMHALTMETHOD_INVALID = 0,
147 /** Use the method used during bootstrapping. */
148 VMHALTMETHOD_BOOTSTRAP,
149 /** Use the default method. */
150 VMHALTMETHOD_DEFAULT,
151 /** The old spin/yield/block method. */
152 VMHALTMETHOD_OLD,
153 /** The first go at a block/spin method. */
154 VMHALTMETHOD_1,
155 /** The first go at a more global approach. */
156 VMHALTMETHOD_GLOBAL_1,
157#if defined(VBOX_VMM_TARGET_ARMV8) && defined(RT_OS_WINDOWS)
158 /** NEM takes over halting. */
159 VMHALTMETHOD_NEM,
160#endif
161 /** The end of valid methods. (not inclusive of course) */
162 VMHALTMETHOD_END,
163 /** The usual 32-bit max value. */
164 VMHALTMETHOD_32BIT_HACK = 0x7fffffff
165} VMHALTMETHOD;
166
167
168/**
169 * VM Internal Data (part of the VM structure).
170 *
171 * @todo Move this and all related things to VMM. The VM component was, to some
172 * extent at least, a bad ad hoc design which should all have been put in
173 * VMM. @see pg_vm.
174 */
175typedef struct VMINT
176{
177 /** VM Error Message. */
178 R3PTRTYPE(PVMERROR) pErrorR3;
179 /** VM Runtime Error Message. */
180 R3PTRTYPE(PVMRUNTIMEERROR) pRuntimeErrorR3;
181 /** The VM was/is-being teleported and has not yet been fully resumed. */
182 bool fTeleportedAndNotFullyResumedYet;
183 /** The VM should power off instead of reset. */
184 bool fPowerOffInsteadOfReset;
185 /** Reset counter (soft + hard). */
186 uint32_t cResets;
187 /** Hard reset counter. */
188 uint32_t cHardResets;
189 /** Soft reset counter. */
190 uint32_t cSoftResets;
191} VMINT;
192/** Pointer to the VM Internal Data (part of the VM structure). */
193typedef VMINT *PVMINT;
194
195
196#ifdef IN_RING3
197
198/**
199 * VM internal data kept in the UVM.
200 */
201typedef struct VMINTUSERPERVM
202{
203 /** Head of the standard request queue. Atomic. */
204 volatile PVMREQ pNormalReqs;
205 /** Head of the priority request queue. Atomic. */
206 volatile PVMREQ pPriorityReqs;
207 /** The last index used during alloc/free. */
208 volatile uint32_t iReqFree;
209 /** Number of free request packets. */
210 volatile uint32_t cReqFree;
211 /** Array of pointers to lists of free request packets. Atomic. */
212 volatile PVMREQ apReqFree[16 - (HC_ARCH_BITS == 32 ? 5 : 4)];
213
214 /** The reference count of the UVM handle. */
215 volatile uint32_t cUvmRefs;
216
217 /** Number of active EMTs. */
218 volatile uint32_t cActiveEmts;
219
220# ifdef VBOX_WITH_STATISTICS
221# if HC_ARCH_BITS == 32
222 uint32_t uPadding;
223# endif
224 /** Number of VMR3ReqAlloc returning a new packet. */
225 STAMCOUNTER StatReqAllocNew;
226 /** Number of VMR3ReqAlloc causing races. */
227 STAMCOUNTER StatReqAllocRaces;
228 /** Number of VMR3ReqAlloc returning a recycled packet. */
229 STAMCOUNTER StatReqAllocRecycled;
230 /** Number of VMR3ReqFree calls. */
231 STAMCOUNTER StatReqFree;
232 /** Number of times the request was actually freed. */
233 STAMCOUNTER StatReqFreeOverflow;
234 /** Number of requests served. */
235 STAMCOUNTER StatReqProcessed;
236 /** Number of times there are more than one request and the others needed to be
237 * pushed back onto the list. */
238 STAMCOUNTER StatReqMoreThan1;
239 /** Number of times we've raced someone when pushing the other requests back
240 * onto the list. */
241 STAMCOUNTER StatReqPushBackRaces;
242# endif
243
244 /** Pointer to the support library session.
245 * Mainly for creation and destruction. */
246 PSUPDRVSESSION pSession;
247
248 /** Force EMT to terminate. */
249 bool volatile fTerminateEMT;
250
251 /** Critical section for pAtState and enmPrevVMState. */
252 RTCRITSECT AtStateCritSect;
253 /** List of registered state change callbacks. */
254 PVMATSTATE pAtState;
255 /** List of registered state change callbacks. */
256 PVMATSTATE *ppAtStateNext;
257 /** The previous VM state.
258 * This is mainly used for the 'Resetting' state, but may come in handy later
259 * and when debugging. */
260 VMSTATE enmPrevVMState;
261
262 /** Reason for the most recent suspend operation. */
263 VMSUSPENDREASON enmSuspendReason;
264 /** Reason for the most recent operation. */
265 VMRESUMEREASON enmResumeReason;
266
267 /** Critical section for pAtError and pAtRuntimeError. */
268 RTCRITSECT AtErrorCritSect;
269
270 /** List of registered error callbacks. */
271 PVMATERROR pAtError;
272 /** List of registered error callbacks. */
273 PVMATERROR *ppAtErrorNext;
274 /** The error message count.
275 * This is incremented every time an error is raised. */
276 uint32_t volatile cErrors;
277
278 /** The runtime error message count.
279 * This is incremented every time a runtime error is raised. */
280 uint32_t volatile cRuntimeErrors;
281 /** List of registered error callbacks. */
282 PVMATRUNTIMEERROR pAtRuntimeError;
283 /** List of registered error callbacks. */
284 PVMATRUNTIMEERROR *ppAtRuntimeErrorNext;
285
286 /** @name Generic Halt data
287 * @{
288 */
289 /** The current halt method.
290 * Can be selected by CFGM option 'VM/HaltMethod'. */
291 VMHALTMETHOD enmHaltMethod;
292 /** The index into g_aHaltMethods of the current halt method. */
293 uint32_t volatile iHaltMethod;
294 /** @} */
295
296 /** @todo Do NOT add new members here or reuse the current, we need to store the config for
297 * each halt method separately because we're racing on SMP guest rigs. */
298 union
299 {
300 /**
301 * Method 1 & 2 - Block whenever possible, and when lagging behind
302 * switch to spinning with regular blocking every 5-200ms (defaults)
303 * depending on the accumulated lag. The blocking interval is adjusted
304 * with the average oversleeping of the last 64 times.
305 *
306 * The difference between 1 and 2 is that we use native absolute
307 * time APIs for the blocking instead of the millisecond based IPRT
308 * interface.
309 */
310 struct
311 {
312 /** The max interval without blocking (when spinning). */
313 uint32_t u32MinBlockIntervalCfg;
314 /** The minimum interval between blocking (when spinning). */
315 uint32_t u32MaxBlockIntervalCfg;
316 /** The value to divide the current lag by to get the raw blocking interval (when spinning). */
317 uint32_t u32LagBlockIntervalDivisorCfg;
318 /** When to start spinning (lag / nano secs). */
319 uint32_t u32StartSpinningCfg;
320 /** When to stop spinning (lag / nano secs). */
321 uint32_t u32StopSpinningCfg;
322 } Method12;
323
324 /**
325 * The GVMM manages halted and waiting EMTs.
326 */
327 struct
328 {
329 /** The threshold between spinning and blocking. */
330 uint32_t cNsSpinBlockThresholdCfg;
331 } Global1;
332 } Halt;
333
334 /** Pointer to the DBGC instance data. */
335 void *pvDBGC;
336
337 /** TLS index for the VMINTUSERPERVMCPU pointer. */
338 RTTLS idxTLS;
339
340 /** The VM name. (Set after the config constructure has been called.) */
341 char *pszName;
342 /** The VM UUID. (Set after the config constructure has been called.) */
343 RTUUID Uuid;
344} VMINTUSERPERVM;
345# ifdef VBOX_WITH_STATISTICS
346AssertCompileMemberAlignment(VMINTUSERPERVM, StatReqAllocNew, 8);
347# endif
348
349/** Pointer to the VM internal data kept in the UVM. */
350typedef VMINTUSERPERVM *PVMINTUSERPERVM;
351
352
353/**
354 * VMCPU internal data kept in the UVM.
355 *
356 * Almost a copy of VMINTUSERPERVM. Separate data properly later on.
357 */
358typedef struct VMINTUSERPERVMCPU
359{
360 /** Head of the normal request queue. Atomic. */
361 volatile PVMREQ pNormalReqs;
362 /** Head of the priority request queue. Atomic. */
363 volatile PVMREQ pPriorityReqs;
364
365 /** The handle to the EMT thread. */
366 RTTHREAD ThreadEMT;
367 /** The native of the EMT thread. */
368 RTNATIVETHREAD NativeThreadEMT;
369 /** Wait event semaphore. */
370 RTSEMEVENT EventSemWait;
371 /** Wait/Idle indicator. */
372 bool volatile fWait;
373 /** Set if we've been thru vmR3Destroy and decremented the active EMT count
374 * already. */
375 bool volatile fBeenThruVmDestroy;
376 /** Align the next bit. */
377 bool afAlignment[HC_ARCH_BITS == 32 ? 2 : 6];
378
379 /** @name Generic Halt data
380 * @{
381 */
382 /** The average time (ns) between two halts in the last second. (updated once per second) */
383 uint32_t HaltInterval;
384 /** The average halt frequency for the last second. (updated once per second) */
385 uint32_t HaltFrequency;
386 /** The number of halts in the current period. */
387 uint32_t cHalts;
388 uint32_t padding; /**< alignment padding. */
389 /** When we started counting halts in cHalts (RTTimeNanoTS). */
390 uint64_t u64HaltsStartTS;
391 /** @} */
392
393 /** Union containing data and config for the different halt algorithms. */
394 union
395 {
396 /**
397 * Method 1 & 2 - Block whenever possible, and when lagging behind
398 * switch to spinning with regular blocking every 5-200ms (defaults)
399 * depending on the accumulated lag. The blocking interval is adjusted
400 * with the average oversleeping of the last 64 times.
401 *
402 * The difference between 1 and 2 is that we use native absolute
403 * time APIs for the blocking instead of the millisecond based IPRT
404 * interface.
405 */
406 struct
407 {
408 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
409 uint32_t cBlocks;
410 /** Align the next member. */
411 uint32_t u32Alignment;
412 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
413 uint64_t cNSBlockedTooLongAvg;
414 /** Total time spend oversleeping when blocking. */
415 uint64_t cNSBlockedTooLong;
416 /** Total time spent blocking. */
417 uint64_t cNSBlocked;
418 /** The timestamp (RTTimeNanoTS) of the last block. */
419 uint64_t u64LastBlockTS;
420
421 /** When we started spinning relentlessly in order to catch up some of the oversleeping.
422 * This is 0 when we're not spinning. */
423 uint64_t u64StartSpinTS;
424 } Method12;
425
426# if 0
427 /**
428 * Method 3 & 4 - Same as method 1 & 2 respectivly, except that we
429 * sprinkle it with yields.
430 */
431 struct
432 {
433 /** How many times we've blocked while cBlockedNS and cBlockedTooLongNS has been accumulating. */
434 uint32_t cBlocks;
435 /** Avg. time spend oversleeping when blocking. (Re-calculated every so often.) */
436 uint64_t cBlockedTooLongNSAvg;
437 /** Total time spend oversleeping when blocking. */
438 uint64_t cBlockedTooLongNS;
439 /** Total time spent blocking. */
440 uint64_t cBlockedNS;
441 /** The timestamp (RTTimeNanoTS) of the last block. */
442 uint64_t u64LastBlockTS;
443
444 /** How many times we've yielded while cBlockedNS and cBlockedTooLongNS has been accumulating. */
445 uint32_t cYields;
446 /** Avg. time spend oversleeping when yielding. */
447 uint32_t cYieldTooLongNSAvg;
448 /** Total time spend oversleeping when yielding. */
449 uint64_t cYieldTooLongNS;
450 /** Total time spent yielding. */
451 uint64_t cYieldedNS;
452 /** The timestamp (RTTimeNanoTS) of the last block. */
453 uint64_t u64LastYieldTS;
454
455 /** When we started spinning relentlessly in order to catch up some of the oversleeping. */
456 uint64_t u64StartSpinTS;
457 } Method34;
458# endif
459 } Halt;
460
461 /** Profiling the halted state; yielding vs blocking.
462 * @{ */
463 STAMPROFILE StatHaltYield;
464 STAMPROFILE StatHaltBlock;
465 STAMPROFILE StatHaltBlockOverslept;
466 STAMPROFILE StatHaltBlockInsomnia;
467 STAMPROFILE StatHaltBlockOnTime;
468 STAMPROFILE StatHaltTimers;
469 STAMPROFILE StatHaltPoll;
470 /** @} */
471} VMINTUSERPERVMCPU;
472AssertCompileMemberAlignment(VMINTUSERPERVMCPU, u64HaltsStartTS, 8);
473AssertCompileMemberAlignment(VMINTUSERPERVMCPU, Halt.Method12.cNSBlockedTooLongAvg, 8);
474AssertCompileMemberAlignment(VMINTUSERPERVMCPU, StatHaltYield, 8);
475
476/** Pointer to the VM internal data kept in the UVM. */
477typedef VMINTUSERPERVMCPU *PVMINTUSERPERVMCPU;
478
479#endif /* IN_RING3 */
480
481RT_C_DECLS_BEGIN
482
483DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArg);
484int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod);
485DECLCALLBACK(int) vmR3Destroy(PVM pVM);
486DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *args);
487void vmSetErrorCopy(PVM pVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list args);
488DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage);
489DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa);
490void vmSetRuntimeErrorCopy(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va);
491void vmR3SetTerminated(PVM pVM);
492
493RT_C_DECLS_END
494
495
496/** @} */
497
498#endif /* !VMM_INCLUDED_SRC_include_VMInternal_h */
499
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette