VirtualBox

source: vbox/trunk/src/VBox/VMM/VMEmt.cpp@ 6796

最後變更 在這個檔案從6796是 6796,由 vboxsync 提交於 17 年 前

Fixed init problems wrt. VM ownership by implementing the UVM structure (U = user mode) and moving problematic ring-3 stuff over there (emt+reqs, r3heap, stam, loader[VMMR0.r0]). Big change, but it works fine here... :-)

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 37.0 KB
 
1/* $Id: VMEmt.cpp 6796 2008-02-04 18:19:58Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine, The Emulation Thread.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_VM
23#include <VBox/tm.h>
24#include <VBox/dbgf.h>
25#include <VBox/em.h>
26#include <VBox/pdmapi.h>
27#include <VBox/rem.h>
28#include "VMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/uvm.h>
31
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include <iprt/semaphore.h>
37#include <iprt/string.h>
38#include <iprt/thread.h>
39#include <iprt/time.h>
40
41
42
43
44/**
45 * The emulation thread.
46 *
47 * @returns Thread exit code.
48 * @param ThreadSelf The handle to the executing thread.
49 * @param pvArgs Pointer to the user mode VM structure (UVM).
50 */
51DECLCALLBACK(int) vmR3EmulationThread(RTTHREAD ThreadSelf, void *pvArgs)
52{
53 PUVM pUVM = (PUVM)pvArgs;
54 AssertReleaseMsg(VALID_PTR(pUVM) && pUVM->u32Magic == UVM_MAGIC,
55 ("Invalid arguments to the emulation thread!\n"));
56
57 /*
58 * Init the native thread member.
59 */
60 pUVM->vm.s.NativeThreadEMT = RTThreadGetNative(ThreadSelf);
61
62 /*
63 * The request loop.
64 */
65 int rc = VINF_SUCCESS;
66 VMSTATE enmBefore = VMSTATE_CREATING;
67 Log(("vmR3EmulationThread: Emulation thread starting the days work... Thread=%#x pUVM=%p\n", ThreadSelf, pUVM));
68 for (;;)
69 {
70 /* Requested to exit the EMT thread out of sync? (currently only VMR3WaitForResume) */
71 if (setjmp(pUVM->vm.s.emtJumpEnv) != 0)
72 {
73 rc = VINF_SUCCESS;
74 break;
75 }
76
77 /*
78 * During early init there is no pVM, so make a special path
79 * for that to keep things clearly separate.
80 */
81 if (!pUVM->pVM)
82 {
83 /*
84 * Check for termination first.
85 */
86 if (pUVM->vm.s.fTerminateEMT)
87 {
88 rc = VINF_EM_TERMINATE;
89 break;
90 }
91 if (pUVM->vm.s.pReqs)
92 {
93 /*
94 * Service execute in EMT request.
95 */
96 rc = VMR3ReqProcessU(pUVM);
97 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_CREATING));
98 }
99 else
100 {
101 /*
102 * Nothing important is pending, so wait for something.
103 */
104 rc = VMR3WaitU(pUVM);
105 if (VBOX_FAILURE(rc))
106 break;
107 }
108 }
109 else
110 {
111
112 /*
113 * Pending requests which needs servicing?
114 *
115 * We check for state changes in addition to status codes when
116 * servicing requests. (Look after the ifs.)
117 */
118 PVM pVM = pUVM->pVM;
119 enmBefore = pVM->enmVMState;
120 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
121 || pUVM->vm.s.fTerminateEMT)
122 {
123 rc = VINF_EM_TERMINATE;
124 break;
125 }
126 if (pUVM->vm.s.pReqs)
127 {
128 /*
129 * Service execute in EMT request.
130 */
131 rc = VMR3ReqProcessU(pUVM);
132 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
133 }
134 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
135 {
136 /*
137 * Service the debugger request.
138 */
139 rc = DBGFR3VMMForcedAction(pVM);
140 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
141 }
142 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
143 {
144 /*
145 * Service a delayed reset request.
146 */
147 rc = VMR3Reset(pVM);
148 VM_FF_CLEAR(pVM, VM_FF_RESET);
149 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
150 }
151 else
152 {
153 /*
154 * Nothing important is pending, so wait for something.
155 */
156 rc = VMR3WaitU(pUVM);
157 if (VBOX_FAILURE(rc))
158 break;
159 }
160
161 /*
162 * Check for termination requests, these have extremely high priority.
163 */
164 if ( rc == VINF_EM_TERMINATE
165 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
166 || pUVM->vm.s.fTerminateEMT)
167 break;
168
169 /*
170 * Some requests (both VMR3Req* and the DBGF) can potentially
171 * resume or start the VM, in that case we'll get a change in
172 * VM status indicating that we're now running.
173 */
174 if ( VBOX_SUCCESS(rc)
175 && enmBefore != pVM->enmVMState
176 && (pVM->enmVMState == VMSTATE_RUNNING))
177 {
178 rc = EMR3ExecuteVM(pVM);
179 Log(("vmR3EmulationThread: EMR3ExecuteVM() -> rc=%Vrc, enmVMState=%d\n", rc, pVM->enmVMState));
180 if (EMGetState(pVM) == EMSTATE_GURU_MEDITATION)
181 vmR3SetState(pVM, VMSTATE_GURU_MEDITATION);
182 }
183 }
184 } /* forever */
185
186
187 /*
188 * Exiting.
189 */
190 Log(("vmR3EmulationThread: Terminating emulation thread! Thread=%#x pUVM=%p rc=%Vrc enmBefore=%d enmVMState=%d\n",
191 ThreadSelf, pUVM, rc, enmBefore, pUVM->pVM ? pUVM->pVM->enmVMState : VMSTATE_TERMINATED));
192 if (pUVM->vm.s.fEMTDoesTheCleanup)
193 {
194 Log(("vmR3EmulationThread: executing delayed Destroy\n"));
195 Assert(pUVM->pVM);
196 vmR3Destroy(pUVM->pVM);
197 vmR3DestroyFinalBitFromEMT(pUVM);
198 }
199 else
200 {
201 vmR3DestroyFinalBitFromEMT(pUVM);
202
203 /* we don't reset ThreadEMT here because it's used in waiting. */
204 pUVM->vm.s.NativeThreadEMT = NIL_RTNATIVETHREAD;
205 }
206 Log(("vmR3EmulationThread: EMT is terminated.\n"));
207 return rc;
208}
209
210
211/**
212 * Wait for VM to be resumed. Handle events like vmR3EmulationThread does.
213 * In case the VM is stopped, clean up and long jump to the main EMT loop.
214 *
215 * @returns VINF_SUCCESS or doesn't return
216 * @param pVM VM handle.
217 */
218VMR3DECL(int) VMR3WaitForResume(PVM pVM)
219{
220 /*
221 * The request loop.
222 */
223 PUVM pUVM = pVM->pUVM;
224 VMSTATE enmBefore;
225 int rc;
226 for (;;)
227 {
228
229 /*
230 * Pending requests which needs servicing?
231 *
232 * We check for state changes in addition to status codes when
233 * servicing requests. (Look after the ifs.)
234 */
235 enmBefore = pVM->enmVMState;
236 if ( VM_FF_ISSET(pVM, VM_FF_TERMINATE)
237 || pUVM->vm.s.fTerminateEMT)
238 {
239 rc = VINF_EM_TERMINATE;
240 break;
241 }
242 else if (pUVM->vm.s.pReqs)
243 {
244 /*
245 * Service execute in EMT request.
246 */
247 rc = VMR3ReqProcessU(pUVM);
248 Log(("vmR3EmulationThread: Req rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
249 }
250 else if (VM_FF_ISSET(pVM, VM_FF_DBGF))
251 {
252 /*
253 * Service the debugger request.
254 */
255 rc = DBGFR3VMMForcedAction(pVM);
256 Log(("vmR3EmulationThread: Dbg rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
257 }
258 else if (VM_FF_ISSET(pVM, VM_FF_RESET))
259 {
260 /*
261 * Service a delay reset request.
262 */
263 rc = VMR3Reset(pVM);
264 VM_FF_CLEAR(pVM, VM_FF_RESET);
265 Log(("vmR3EmulationThread: Reset rc=%Vrc, VM state %d -> %d\n", rc, enmBefore, pVM->enmVMState));
266 }
267 else
268 {
269 /*
270 * Nothing important is pending, so wait for something.
271 */
272 rc = VMR3WaitU(pUVM);
273 if (VBOX_FAILURE(rc))
274 break;
275 }
276
277 /*
278 * Check for termination requests, these are extremely high priority.
279 */
280 if ( rc == VINF_EM_TERMINATE
281 || VM_FF_ISSET(pVM, VM_FF_TERMINATE)
282 || pUVM->vm.s.fTerminateEMT)
283 break;
284
285 /*
286 * Some requests (both VMR3Req* and the DBGF) can potentially
287 * resume or start the VM, in that case we'll get a change in
288 * VM status indicating that we're now running.
289 */
290 if ( VBOX_SUCCESS(rc)
291 && enmBefore != pVM->enmVMState
292 && pVM->enmVMState == VMSTATE_RUNNING)
293 {
294 /* Only valid exit reason. */
295 return VINF_SUCCESS;
296 }
297
298 } /* forever */
299
300 /* Return to the main loop in vmR3EmulationThread, which will clean up for us. */
301 longjmp(pUVM->vm.s.emtJumpEnv, 1);
302}
303
304
305/**
306 * Gets the name of a halt method.
307 *
308 * @returns Pointer to a read only string.
309 * @param enmMethod The method.
310 */
311static const char *vmR3GetHaltMethodName(VMHALTMETHOD enmMethod)
312{
313 switch (enmMethod)
314 {
315 case VMHALTMETHOD_BOOTSTRAP: return "bootstrap";
316 case VMHALTMETHOD_DEFAULT: return "default";
317 case VMHALTMETHOD_OLD: return "old";
318 case VMHALTMETHOD_1: return "method1";
319 //case VMHALTMETHOD_2: return "method2";
320 case VMHALTMETHOD_GLOBAL_1: return "global1";
321 default: return "unknown";
322 }
323}
324
325
326/**
327 * The old halt loop.
328 *
329 * @param pUVM Pointer to the user mode VM structure.
330 */
331static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVM pUVM, const uint32_t fMask, uint64_t /* u64Now*/)
332{
333 /*
334 * Halt loop.
335 */
336 PVM pVM = pUVM->pVM;
337 int rc = VINF_SUCCESS;
338 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, true);
339 //unsigned cLoops = 0;
340 for (;;)
341 {
342 /*
343 * Work the timers and check if we can exit.
344 * The poll call gives us the ticks left to the next event in
345 * addition to perhaps set an FF.
346 */
347 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
348 PDMR3Poll(pVM);
349 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
350 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
351 TMR3TimerQueuesDo(pVM);
352 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
353 if (VM_FF_ISPENDING(pVM, fMask))
354 break;
355 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
356 if (VM_FF_ISPENDING(pVM, fMask))
357 break;
358
359 /*
360 * Wait for a while. Someone will wake us up or interrupt the call if
361 * anything needs our attention.
362 */
363 if (u64NanoTS < 50000)
364 {
365 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
366 /* spin */;
367 }
368 else
369 {
370 VMMR3YieldStop(pVM);
371 //uint64_t u64Start = RTTimeNanoTS();
372 if (u64NanoTS < 870000) /* this is a bit speculative... works fine on linux. */
373 {
374 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
375 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, a);
376 RTThreadYield(); /* this is the best we can do here */
377 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, a);
378 }
379 else if (u64NanoTS < 2000000)
380 {
381 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
382 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
383 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1);
384 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
385 }
386 else
387 {
388 //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
389 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
390 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
391 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
392 }
393 //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
394 //RTLogPrintf(" -> rc=%Vrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
395 }
396 if (rc == VERR_TIMEOUT)
397 rc = VINF_SUCCESS;
398 else if (VBOX_FAILURE(rc))
399 {
400 AssertRC(rc != VERR_INTERRUPTED);
401 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
402 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
403 VM_FF_SET(pVM, VM_FF_TERMINATE);
404 rc = VERR_INTERNAL_ERROR;
405 break;
406 }
407 }
408
409 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
410 return rc;
411}
412
413
414/**
415 * Initialize the configuration of halt method 1 & 2.
416 *
417 * @return VBox status code. Failure on invalid CFGM data.
418 * @param pVM The VM handle.
419 */
420static int vmR3HaltMethod12ReadConfigU(PUVM pUVM)
421{
422 /*
423 * The defaults.
424 */
425#if 1 /* DEBUGGING STUFF - REMOVE LATER */
426 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
427 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 2*1000000;
428 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 75*1000000;
429 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 30*1000000;
430 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 20*1000000;
431#else
432 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = 4;
433 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = 5*1000000;
434 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = 200*1000000;
435 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = 20*1000000;
436 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = 2*1000000;
437#endif
438
439 /*
440 * Query overrides.
441 *
442 * I don't have time to bother with niceities such as invalid value checks
443 * here right now. sorry.
444 */
445 PCFGMNODE pCfg = CFGMR3GetChild(CFGMR3GetRoot(pUVM->pVM), "/VMM/HaltedMethod1");
446 if (pCfg)
447 {
448 uint32_t u32;
449 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "LagBlockIntervalDivisor", &u32)))
450 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg = u32;
451 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MinBlockInterval", &u32)))
452 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg = u32;
453 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "MaxBlockInterval", &u32)))
454 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg = u32;
455 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StartSpinning", &u32)))
456 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg = u32;
457 if (RT_SUCCESS(CFGMR3QueryU32(pCfg, "StopSpinning", &u32)))
458 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg = u32;
459 LogRel(("HaltedMethod1 config: %d/%d/%d/%d/%d\n",
460 pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
461 pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
462 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg,
463 pUVM->vm.s.Halt.Method12.u32StartSpinningCfg,
464 pUVM->vm.s.Halt.Method12.u32StopSpinningCfg));
465 }
466
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Initialize halt method 1.
473 *
474 * @return VBox status code.
475 * @param pUVM Pointer to the user mode VM structure.
476 */
477static DECLCALLBACK(int) vmR3HaltMethod1Init(PUVM pUVM)
478{
479 return vmR3HaltMethod12ReadConfigU(pUVM);
480}
481
482
483/**
484 * Method 1 - Block whenever possible, and when lagging behind
485 * switch to spinning for 10-30ms with occational blocking until
486 * the lag has been eliminated.
487 */
488static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
489{
490 PVM pVM = pUVM->pVM;
491
492 /*
493 * To simplify things, we decide up-front whether we should switch to spinning or
494 * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
495 * and that it will generate interrupts or other events that will cause us to exit
496 * the halt loop.
497 */
498 bool fBlockOnce = false;
499 bool fSpinning = false;
500 uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
501 if (u32CatchUpPct /* non-zero if catching up */)
502 {
503 if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
504 {
505 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
506 if (fSpinning)
507 {
508 uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
509 fBlockOnce = u64Now - pUVM->vm.s.Halt.Method12.u64LastBlockTS
510 > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
511 RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
512 pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
513 }
514 else
515 {
516 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
517 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
518 }
519 }
520 else
521 {
522 fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
523 if (fSpinning)
524 pUVM->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
525 }
526 }
527 else if (pUVM->vm.s.Halt.Method12.u64StartSpinTS)
528 {
529 //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVM->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
530 pUVM->vm.s.Halt.Method12.u64StartSpinTS = 0;
531 }
532
533 /*
534 * Halt loop.
535 */
536 int rc = VINF_SUCCESS;
537 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, true);
538 unsigned cLoops = 0;
539 for (;; cLoops++)
540 {
541 /*
542 * Work the timers and check if we can exit.
543 */
544 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
545 PDMR3Poll(pVM);
546 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
547 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
548 TMR3TimerQueuesDo(pVM);
549 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
550 if (VM_FF_ISPENDING(pVM, fMask))
551 break;
552
553 /*
554 * Estimate time left to the next event.
555 */
556 uint64_t u64NanoTS = TMVirtualToNano(pVM, TMTimerPoll(pVM));
557 if (VM_FF_ISPENDING(pVM, fMask))
558 break;
559
560 /*
561 * Block if we're not spinning and the interval isn't all that small.
562 */
563 if ( ( !fSpinning
564 || fBlockOnce)
565#if 1 /* DEBUGGING STUFF - REMOVE LATER */
566 && u64NanoTS >= 100000) /* 0.100 ms */
567#else
568 && u64NanoTS >= 250000) /* 0.250 ms */
569#endif
570 {
571 const uint64_t Start = pUVM->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
572 VMMR3YieldStop(pVM);
573
574 uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
575 if (cMilliSecs <= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
576 cMilliSecs = 1;
577 else
578 cMilliSecs -= pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg;
579 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
580 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, a);
581 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, cMilliSecs);
582 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, a);
583 if (rc == VERR_TIMEOUT)
584 rc = VINF_SUCCESS;
585 else if (VBOX_FAILURE(rc))
586 {
587 AssertRC(rc != VERR_INTERRUPTED);
588 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
589 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
590 VM_FF_SET(pVM, VM_FF_TERMINATE);
591 rc = VERR_INTERNAL_ERROR;
592 break;
593 }
594
595 /*
596 * Calc the statistics.
597 * Update averages every 16th time, and flush parts of the history every 64th time.
598 */
599 const uint64_t Elapsed = RTTimeNanoTS() - Start;
600 pUVM->vm.s.Halt.Method12.cNSBlocked += Elapsed;
601 if (Elapsed > u64NanoTS)
602 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
603 pUVM->vm.s.Halt.Method12.cBlocks++;
604 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0xf))
605 {
606 pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVM->vm.s.Halt.Method12.cNSBlockedTooLong / pUVM->vm.s.Halt.Method12.cBlocks;
607 if (!(pUVM->vm.s.Halt.Method12.cBlocks & 0x3f))
608 {
609 pUVM->vm.s.Halt.Method12.cNSBlockedTooLong = pUVM->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
610 pUVM->vm.s.Halt.Method12.cBlocks = 0x40;
611 }
612 }
613 //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");
614
615 /*
616 * Clear the block once flag if we actually blocked.
617 */
618 if ( fBlockOnce
619 && Elapsed > 100000 /* 0.1 ms */)
620 fBlockOnce = false;
621 }
622 }
623 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
624
625 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
626 return rc;
627}
628
629
630/**
631 * Initialize the global 1 halt method.
632 *
633 * @return VBox status code.
634 * @param pUVM Pointer to the user mode VM structure.
635 */
636static DECLCALLBACK(int) vmR3HaltGlobal1Init(PUVM pUVM)
637{
638 return VINF_SUCCESS;
639}
640
641
642/**
643 * The global 1 halt method - Block in GMM (ring-0) and let it
644 * try take care of the global scheduling of EMT threads.
645 */
646static DECLCALLBACK(int) vmR3HaltGlobal1Halt(PUVM pUVM, const uint32_t fMask, uint64_t u64Now)
647{
648 PVM pVM = pUVM->pVM;
649
650 /*
651 * Halt loop.
652 */
653 int rc = VINF_SUCCESS;
654 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, true);
655 unsigned cLoops = 0;
656 for (;; cLoops++)
657 {
658 /*
659 * Work the timers and check if we can exit.
660 */
661 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltPoll, a);
662 PDMR3Poll(pVM);
663 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltPoll, a);
664 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltTimers, b);
665 TMR3TimerQueuesDo(pVM);
666 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltTimers, b);
667 if (VM_FF_ISPENDING(pVM, fMask))
668 break;
669
670 /*
671 * Estimate time left to the next event.
672 */
673 uint64_t u64Delta;
674 uint64_t u64GipTime = TMTimerPollGIP(pVM, &u64Delta);
675 if (VM_FF_ISPENDING(pVM, fMask))
676 break;
677
678 /*
679 * Block if we're not spinning and the interval isn't all that small.
680 */
681 if (u64Delta > 50000 /* 0.050ms */)
682 {
683 VMMR3YieldStop(pVM);
684 if (VM_FF_ISPENDING(pVM, fMask))
685 break;
686
687 //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
688 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltBlock, c);
689 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, u64GipTime, NULL);
690 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltBlock, c);
691 if (rc == VERR_INTERRUPTED)
692 rc = VINF_SUCCESS;
693 else if (VBOX_FAILURE(rc))
694 {
695 AssertMsgFailed(("VMMR0_DO_GVMM_SCHED_HALT->%Vrc\n", rc));
696 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
697 VM_FF_SET(pVM, VM_FF_TERMINATE);
698 rc = VERR_INTERNAL_ERROR;
699 break;
700 }
701 }
702 /*
703 * When spinning call upon the GVMM and do some wakups once
704 * in a while, it's not like we're actually busy or anything.
705 */
706 else if (!(cLoops & 0x1fff))
707 {
708 STAM_REL_PROFILE_START(&pUVM->vm.s.StatHaltYield, d);
709 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_POLL, false /* don't yield */, NULL);
710 STAM_REL_PROFILE_STOP(&pUVM->vm.s.StatHaltYield, d);
711 }
712 }
713 //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);
714
715 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
716 return rc;
717}
718
719
720/**
721 * The global 1 halt method - VMR3Wait() worker.
722 *
723 * @returns VBox status code.
724 * @param pUVM Pointer to the user mode VM structure.
725 */
726static DECLCALLBACK(int) vmR3HaltGlobal1Wait(PUVM pUVM)
727{
728 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, true);
729
730 PVM pVM = pUVM->pVM;
731 int rc = VINF_SUCCESS;
732 for (;;)
733 {
734 /*
735 * Check Relevant FFs.
736 */
737 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
738 break;
739
740 /*
741 * Wait for a while. Someone will wake us up or interrupt the call if
742 * anything needs our attention.
743 */
744 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GVMM_SCHED_HALT, RTTimeNanoTS() + 1000000000 /* +1s */, NULL);
745 if (rc == VERR_INTERRUPTED)
746 rc = VINF_SUCCESS;
747 else if (VBOX_FAILURE(rc))
748 {
749 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
750 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
751 VM_FF_SET(pVM, VM_FF_TERMINATE);
752 rc = VERR_INTERNAL_ERROR;
753 break;
754 }
755
756 }
757
758 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
759 return rc;
760}
761
762
763/**
764 * The global 1 halt method - VMR3NotifyFF() worker.
765 *
766 * @param pUVM Pointer to the user mode VM structure.
767 * @param fNotifiedREM See VMR3NotifyFF().
768 */
769static DECLCALLBACK(void) vmR3HaltGlobal1NotifyFF(PUVM pUVM, bool fNotifiedREM)
770{
771 if (pUVM->vm.s.fWait)
772 {
773 int rc = SUPCallVMMR0Ex(pUVM->pVM->pVMR0, VMMR0_DO_GVMM_SCHED_WAKE_UP, 0, NULL);
774 AssertRC(rc);
775 }
776 else if (!fNotifiedREM)
777 REMR3NotifyFF(pUVM->pVM);
778}
779
780
781/**
782 * Bootstrap VMR3Wait() worker.
783 *
784 * @returns VBox status code.
785 * @param pUVM Pointer to the user mode VM structure.
786 */
787static DECLCALLBACK(int) vmR3BootstrapWait(PUVM pUVM)
788{
789 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, true);
790
791 int rc = VINF_SUCCESS;
792 for (;;)
793 {
794 /*
795 * Check Relevant FFs.
796 */
797 if (pUVM->vm.s.pReqs)
798 break;
799 if ( pUVM->pVM
800 && VM_FF_ISPENDING(pUVM->pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
801 break;
802
803 /*
804 * Wait for a while. Someone will wake us up or interrupt the call if
805 * anything needs our attention.
806 */
807 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
808 if (rc == VERR_TIMEOUT)
809 rc = VINF_SUCCESS;
810 else if (VBOX_FAILURE(rc))
811 {
812 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
813 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
814 if (pUVM->pVM)
815 VM_FF_SET(pUVM->pVM, VM_FF_TERMINATE);
816 rc = VERR_INTERNAL_ERROR;
817 break;
818 }
819
820 }
821
822 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
823 return rc;
824}
825
826
827/**
828 * Bootstrap VMR3NotifyFF() worker.
829 *
830 * @param pUVM Pointer to the user mode VM structure.
831 * @param fNotifiedREM See VMR3NotifyFF().
832 */
833static DECLCALLBACK(void) vmR3BootstrapNotifyFF(PUVM pUVM, bool fNotifiedREM)
834{
835 if (pUVM->vm.s.fWait)
836 {
837 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
838 AssertRC(rc);
839 }
840}
841
842
843
844/**
845 * Default VMR3Wait() worker.
846 *
847 * @returns VBox status code.
848 * @param pUVM Pointer to the user mode VM structure.
849 */
850static DECLCALLBACK(int) vmR3DefaultWait(PUVM pUVM)
851{
852 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, true);
853
854 PVM pVM = pUVM->pVM;
855 int rc = VINF_SUCCESS;
856 for (;;)
857 {
858 /*
859 * Check Relevant FFs.
860 */
861 if (VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
862 break;
863
864 /*
865 * Wait for a while. Someone will wake us up or interrupt the call if
866 * anything needs our attention.
867 */
868 rc = RTSemEventWait(pUVM->vm.s.EventSemWait, 1000);
869 if (rc == VERR_TIMEOUT)
870 rc = VINF_SUCCESS;
871 else if (VBOX_FAILURE(rc))
872 {
873 AssertMsgFailed(("RTSemEventWait->%Vrc\n", rc));
874 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
875 VM_FF_SET(pVM, VM_FF_TERMINATE);
876 rc = VERR_INTERNAL_ERROR;
877 break;
878 }
879
880 }
881
882 ASMAtomicUoWriteBool(&pUVM->vm.s.fWait, false);
883 return rc;
884}
885
886
887/**
888 * Default VMR3NotifyFF() worker.
889 *
890 * @param pUVM Pointer to the user mode VM structure.
891 * @param fNotifiedREM See VMR3NotifyFF().
892 */
893static DECLCALLBACK(void) vmR3DefaultNotifyFF(PUVM pUVM, bool fNotifiedREM)
894{
895 if (pUVM->vm.s.fWait)
896 {
897 int rc = RTSemEventSignal(pUVM->vm.s.EventSemWait);
898 AssertRC(rc);
899 }
900 else if (!fNotifiedREM)
901 REMR3NotifyFF(pUVM->pVM);
902}
903
904
905/**
906 * Array with halt method descriptors.
907 * VMINT::iHaltMethod contains an index into this array.
908 */
909static const struct VMHALTMETHODDESC
910{
911 /** The halt method id. */
912 VMHALTMETHOD enmHaltMethod;
913 /** The init function for loading config and initialize variables. */
914 DECLR3CALLBACKMEMBER(int, pfnInit,(PUVM pUVM));
915 /** The term function. */
916 DECLR3CALLBACKMEMBER(void, pfnTerm,(PUVM pUVM));
917 /** The halt function. */
918 DECLR3CALLBACKMEMBER(int, pfnHalt,(PUVM pUVM, const uint32_t fMask, uint64_t u64Now));
919 /** The wait function. */
920 DECLR3CALLBACKMEMBER(int, pfnWait,(PUVM pUVM));
921 /** The notifyFF function. */
922 DECLR3CALLBACKMEMBER(void, pfnNotifyFF,(PUVM pUVM, bool fNotifiedREM));
923} g_aHaltMethods[] =
924{
925 { VMHALTMETHOD_BOOTSTRAP, NULL, NULL, NULL, vmR3BootstrapWait, vmR3BootstrapNotifyFF },
926 { VMHALTMETHOD_OLD, NULL, NULL, vmR3HaltOldDoHalt, vmR3DefaultWait, vmR3DefaultNotifyFF },
927 { VMHALTMETHOD_1, vmR3HaltMethod1Init, NULL, vmR3HaltMethod1Halt, vmR3DefaultWait, vmR3DefaultNotifyFF },
928 //{ VMHALTMETHOD_2, vmR3HaltMethod2Init, vmR3HaltMethod2Term, vmR3HaltMethod2DoHalt, vmR3HaltMethod2Wait, vmR3HaltMethod2NotifyFF },
929 { VMHALTMETHOD_GLOBAL_1,vmR3HaltGlobal1Init, NULL, vmR3HaltGlobal1Halt, vmR3HaltGlobal1Wait, vmR3HaltGlobal1NotifyFF },
930};
931
932
933/**
934 * Notify the emulation thread (EMT) about pending Forced Action (FF).
935 *
936 * This function is called by thread other than EMT to make
937 * sure EMT wakes up and promptly service an FF request.
938 *
939 * @param pVM VM handle.
940 * @param fNotifiedREM Set if REM have already been notified. If clear the
941 * generic REMR3NotifyFF() method is called.
942 */
943VMR3DECL(void) VMR3NotifyFF(PVM pVM, bool fNotifiedREM)
944{
945 LogFlow(("VMR3NotifyFF:\n"));
946 PUVM pUVM = pVM->pUVM;
947 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
948}
949
950
951/**
952 * Notify the emulation thread (EMT) about pending Forced Action (FF).
953 *
954 * This function is called by thread other than EMT to make
955 * sure EMT wakes up and promptly service an FF request.
956 *
957 * @param pUVM Pointer to the user mode VM structure.
958 * @param fNotifiedREM Set if REM have already been notified. If clear the
959 * generic REMR3NotifyFF() method is called.
960 */
961VMR3DECL(void) VMR3NotifyFFU(PUVM pUVM, bool fNotifiedREM)
962{
963 LogFlow(("VMR3NotifyFF:\n"));
964 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyFF(pUVM, fNotifiedREM);
965}
966
967
968/**
969 * Halted VM Wait.
970 * Any external event will unblock the thread.
971 *
972 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
973 * case an appropriate status code is returned.
974 * @param pVM VM handle.
975 * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored.
976 * @thread The emulation thread.
977 */
978VMR3DECL(int) VMR3WaitHalted(PVM pVM, bool fIgnoreInterrupts)
979{
980 LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts));
981
982 /*
983 * Check Relevant FFs.
984 */
985 const uint32_t fMask = !fIgnoreInterrupts
986 ? VM_FF_EXTERNAL_HALTED_MASK
987 : VM_FF_EXTERNAL_HALTED_MASK & ~(VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC);
988 if (VM_FF_ISPENDING(pVM, fMask))
989 {
990 LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
991 return VINF_SUCCESS;
992 }
993
994 /*
995 * The yielder is suspended while we're halting.
996 */
997 VMMR3YieldSuspend(pVM);
998
999 /*
1000 * Record halt averages for the last second.
1001 */
1002 PUVM pUVM = pVM->pUVM;
1003 uint64_t u64Now = RTTimeNanoTS();
1004 int64_t off = u64Now - pUVM->vm.s.u64HaltsStartTS;
1005 if (off > 1000000000)
1006 {
1007 if (off > _4G || !pUVM->vm.s.cHalts)
1008 {
1009 pUVM->vm.s.HaltInterval = 1000000000 /* 1 sec */;
1010 pUVM->vm.s.HaltFrequency = 1;
1011 }
1012 else
1013 {
1014 pUVM->vm.s.HaltInterval = (uint32_t)off / pUVM->vm.s.cHalts;
1015 pUVM->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVM->vm.s.cHalts, 1000000000, (uint32_t)off);
1016 }
1017 pUVM->vm.s.u64HaltsStartTS = u64Now;
1018 pUVM->vm.s.cHalts = 0;
1019 }
1020 pUVM->vm.s.cHalts++;
1021
1022 /*
1023 * Do the halt.
1024 */
1025 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVM, fMask, u64Now);
1026
1027 /*
1028 * Resume the yielder.
1029 */
1030 VMMR3YieldResume(pVM);
1031
1032 LogFlow(("VMR3WaitHalted: returns %Vrc (FF %#x)\n", rc, pVM->fForcedActions));
1033 return rc;
1034}
1035
1036
1037/**
1038 * Suspended VM Wait.
1039 * Only a handful of forced actions will cause the function to
1040 * return to the caller.
1041 *
1042 * @returns VINF_SUCCESS unless a fatal error occured. In the latter
1043 * case an appropriate status code is returned.
1044 * @param pUVM Pointer to the user mode VM structure.
1045 * @thread The emulation thread.
1046 */
1047VMR3DECL(int) VMR3WaitU(PUVM pUVM)
1048{
1049 LogFlow(("VMR3WaitU:\n"));
1050
1051 /*
1052 * Check Relevant FFs.
1053 */
1054 PVM pVM = pUVM->pVM;
1055 if ( pVM
1056 && VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_SUSPENDED_MASK))
1057 {
1058 LogFlow(("VMR3Wait: returns VINF_SUCCESS (FF %#x)\n", pVM->fForcedActions));
1059 return VINF_SUCCESS;
1060 }
1061
1062 /*
1063 * Do waiting according to the halt method (so VMR3NotifyFF
1064 * doesn't have to special case anything).
1065 */
1066 int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnWait(pUVM);
1067 LogFlow(("VMR3WaitU: returns %Vrc (FF %#x)\n", rc, pVM ? pVM->fForcedActions : 0));
1068 return rc;
1069}
1070
1071
1072/**
1073 * Changes the halt method.
1074 *
1075 * @returns VBox status code.
1076 * @param pUVM Pointer to the user mode VM structure.
1077 * @param enmHaltMethod The new halt method.
1078 * @thread EMT.
1079 */
1080int vmR3SetHaltMethodU(PUVM pUVM, VMHALTMETHOD enmHaltMethod)
1081{
1082 PVM pVM = pUVM->pVM; Assert(pVM);
1083 VM_ASSERT_EMT(pVM);
1084 AssertReturn(enmHaltMethod > VMHALTMETHOD_INVALID && enmHaltMethod < VMHALTMETHOD_END, VERR_INVALID_PARAMETER);
1085
1086 /*
1087 * Resolve default (can be overridden in the configuration).
1088 */
1089 if (enmHaltMethod == VMHALTMETHOD_DEFAULT)
1090 {
1091 uint32_t u32;
1092 int rc = CFGMR3QueryU32(CFGMR3GetChild(CFGMR3GetRoot(pVM), "VM"), "HaltMethod", &u32);
1093 if (VBOX_SUCCESS(rc))
1094 {
1095 enmHaltMethod = (VMHALTMETHOD)u32;
1096 if (enmHaltMethod <= VMHALTMETHOD_INVALID || enmHaltMethod >= VMHALTMETHOD_END)
1097 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Invalid VM/HaltMethod value %d"), enmHaltMethod);
1098 }
1099 else if (rc == VERR_CFGM_VALUE_NOT_FOUND || rc == VERR_CFGM_CHILD_NOT_FOUND)
1100 return VMSetError(pVM, rc, RT_SRC_POS, N_("Failed to Query VM/HaltMethod as uint32_t"));
1101 else
1102 enmHaltMethod = VMHALTMETHOD_GLOBAL_1;
1103 //enmHaltMethod = VMHALTMETHOD_1;
1104 //enmHaltMethod = VMHALTMETHOD_OLD;
1105 }
1106 LogRel(("VM: Halt method %s (%d)\n", vmR3GetHaltMethodName(enmHaltMethod), enmHaltMethod));
1107
1108 /*
1109 * Find the descriptor.
1110 */
1111 unsigned i = 0;
1112 while ( i < RT_ELEMENTS(g_aHaltMethods)
1113 && g_aHaltMethods[i].enmHaltMethod != enmHaltMethod)
1114 i++;
1115 AssertReturn(i < RT_ELEMENTS(g_aHaltMethods), VERR_INVALID_PARAMETER);
1116
1117 /*
1118 * Terminate the old one.
1119 */
1120 if ( pUVM->vm.s.enmHaltMethod != VMHALTMETHOD_INVALID
1121 && g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm)
1122 {
1123 g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnTerm(pUVM);
1124 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_INVALID;
1125 }
1126
1127 /*
1128 * Init the new one.
1129 */
1130 memset(&pUVM->vm.s.Halt, 0, sizeof(pUVM->vm.s.Halt));
1131 if (g_aHaltMethods[i].pfnInit)
1132 {
1133 int rc = g_aHaltMethods[i].pfnInit(pUVM);
1134 AssertRCReturn(rc, rc);
1135 }
1136 pUVM->vm.s.enmHaltMethod = enmHaltMethod;
1137
1138 ASMAtomicWriteU32(&pUVM->vm.s.iHaltMethod, i);
1139 return VINF_SUCCESS;
1140}
1141
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette