VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/VM.cpp@ 62869

最後變更 在這個檔案從62869是 62869,由 vboxsync 提交於 9 年 前

VMM: warnings.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 168.7 KB
 
1/* $Id: VM.cpp 62869 2016-08-02 12:01:23Z vboxsync $ */
2/** @file
3 * VM - Virtual Machine
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_vm VM API
19 *
20 * This is the encapsulating bit. It provides the APIs that Main and VBoxBFE
21 * use to create a VMM instance for running a guest in. It also provides
22 * facilities for queuing request for execution in EMT (serialization purposes
23 * mostly) and for reporting error back to the VMM user (Main/VBoxBFE).
24 *
25 *
26 * @section sec_vm_design Design Critique / Things To Do
27 *
28 * In hindsight this component is a big design mistake, all this stuff really
29 * belongs in the VMM component. It just seemed like a kind of ok idea at a
30 * time when the VMM bit was a kind of vague. 'VM' also happened to be the name
31 * of the per-VM instance structure (see vm.h), so it kind of made sense.
32 * However as it turned out, VMM(.cpp) is almost empty all it provides in ring-3
33 * is some minor functionally and some "routing" services.
34 *
35 * Fixing this is just a matter of some more or less straight forward
36 * refactoring, the question is just when someone will get to it. Moving the EMT
37 * would be a good start.
38 *
39 */
40
41
42/*********************************************************************************************************************************
43* Header Files *
44*********************************************************************************************************************************/
45#define LOG_GROUP LOG_GROUP_VM
46#include <VBox/vmm/cfgm.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/vmm/gvmm.h>
49#include <VBox/vmm/mm.h>
50#include <VBox/vmm/cpum.h>
51#include <VBox/vmm/selm.h>
52#include <VBox/vmm/trpm.h>
53#include <VBox/vmm/dbgf.h>
54#include <VBox/vmm/pgm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmdev.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/em.h>
59#include <VBox/vmm/iem.h>
60#ifdef VBOX_WITH_REM
61# include <VBox/vmm/rem.h>
62#endif
63#ifdef VBOX_WITH_NEW_APIC
64# include <VBox/vmm/apic.h>
65#endif
66#include <VBox/vmm/tm.h>
67#include <VBox/vmm/stam.h>
68#include <VBox/vmm/patm.h>
69#include <VBox/vmm/csam.h>
70#include <VBox/vmm/iom.h>
71#include <VBox/vmm/ssm.h>
72#include <VBox/vmm/ftm.h>
73#include <VBox/vmm/hm.h>
74#include <VBox/vmm/gim.h>
75#include "VMInternal.h"
76#include <VBox/vmm/vm.h>
77#include <VBox/vmm/uvm.h>
78
79#include <VBox/sup.h>
80#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
81# include <VBox/VBoxTpG.h>
82#endif
83#include <VBox/dbg.h>
84#include <VBox/err.h>
85#include <VBox/param.h>
86#include <VBox/log.h>
87#include <iprt/assert.h>
88#include <iprt/alloc.h>
89#include <iprt/asm.h>
90#include <iprt/env.h>
91#include <iprt/string.h>
92#include <iprt/time.h>
93#include <iprt/semaphore.h>
94#include <iprt/thread.h>
95#include <iprt/uuid.h>
96
97
98/*********************************************************************************************************************************
99* Internal Functions *
100*********************************************************************************************************************************/
101static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM);
102static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM);
103static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus);
104static int vmR3InitRing3(PVM pVM, PUVM pUVM);
105static int vmR3InitRing0(PVM pVM);
106#ifdef VBOX_WITH_RAW_MODE
107static int vmR3InitRC(PVM pVM);
108#endif
109static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat);
110#ifdef LOG_ENABLED
111static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
112#endif
113static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait);
114static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew);
115static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
116static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...);
117static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF);
118static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld);
119static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...) RT_IPRT_FORMAT_ATTR(6, 7);
120
121
122/**
123 * Do global VMM init.
124 *
125 * @returns VBox status code.
126 */
127VMMR3DECL(int) VMR3GlobalInit(void)
128{
129 /*
130 * Only once.
131 */
132 static bool volatile s_fDone = false;
133 if (s_fDone)
134 return VINF_SUCCESS;
135
136#if defined(VBOX_WITH_DTRACE_R3) && !defined(VBOX_WITH_NATIVE_DTRACE)
137 SUPR3TracerRegisterModule(~(uintptr_t)0, "VBoxVMM", &g_VTGObjHeader, (uintptr_t)&g_VTGObjHeader,
138 SUP_TRACER_UMOD_FLAGS_SHARED);
139#endif
140
141 /*
142 * We're done.
143 */
144 s_fDone = true;
145 return VINF_SUCCESS;
146}
147
148
149/**
150 * Creates a virtual machine by calling the supplied configuration constructor.
151 *
152 * On successful returned the VM is powered, i.e. VMR3PowerOn() should be
153 * called to start the execution.
154 *
155 * @returns 0 on success.
156 * @returns VBox error code on failure.
157 * @param cCpus Number of virtual CPUs for the new VM.
158 * @param pVmm2UserMethods An optional method table that the VMM can use
159 * to make the user perform various action, like
160 * for instance state saving.
161 * @param pfnVMAtError Pointer to callback function for setting VM
162 * errors. This was added as an implicit call to
163 * VMR3AtErrorRegister() since there is no way the
164 * caller can get to the VM handle early enough to
165 * do this on its own.
166 * This is called in the context of an EMT.
167 * @param pvUserVM The user argument passed to pfnVMAtError.
168 * @param pfnCFGMConstructor Pointer to callback function for constructing the VM configuration tree.
169 * This is called in the context of an EMT0.
170 * @param pvUserCFGM The user argument passed to pfnCFGMConstructor.
171 * @param ppVM Where to optionally store the 'handle' of the
172 * created VM.
173 * @param ppUVM Where to optionally store the user 'handle' of
174 * the created VM, this includes one reference as
175 * if VMR3RetainUVM() was called. The caller
176 * *MUST* remember to pass the returned value to
177 * VMR3ReleaseUVM() once done with the handle.
178 */
179VMMR3DECL(int) VMR3Create(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods,
180 PFNVMATERROR pfnVMAtError, void *pvUserVM,
181 PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM,
182 PVM *ppVM, PUVM *ppUVM)
183{
184 LogFlow(("VMR3Create: cCpus=%RU32 pVmm2UserMethods=%p pfnVMAtError=%p pvUserVM=%p pfnCFGMConstructor=%p pvUserCFGM=%p ppVM=%p ppUVM=%p\n",
185 cCpus, pVmm2UserMethods, pfnVMAtError, pvUserVM, pfnCFGMConstructor, pvUserCFGM, ppVM, ppUVM));
186
187 if (pVmm2UserMethods)
188 {
189 AssertPtrReturn(pVmm2UserMethods, VERR_INVALID_POINTER);
190 AssertReturn(pVmm2UserMethods->u32Magic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
191 AssertReturn(pVmm2UserMethods->u32Version == VMM2USERMETHODS_VERSION, VERR_INVALID_PARAMETER);
192 AssertPtrNullReturn(pVmm2UserMethods->pfnSaveState, VERR_INVALID_POINTER);
193 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtInit, VERR_INVALID_POINTER);
194 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyEmtTerm, VERR_INVALID_POINTER);
195 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtInit, VERR_INVALID_POINTER);
196 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyPdmtTerm, VERR_INVALID_POINTER);
197 AssertPtrNullReturn(pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff, VERR_INVALID_POINTER);
198 AssertReturn(pVmm2UserMethods->u32EndMagic == VMM2USERMETHODS_MAGIC, VERR_INVALID_PARAMETER);
199 }
200 AssertPtrNullReturn(pfnVMAtError, VERR_INVALID_POINTER);
201 AssertPtrNullReturn(pfnCFGMConstructor, VERR_INVALID_POINTER);
202 AssertPtrNullReturn(ppVM, VERR_INVALID_POINTER);
203 AssertPtrNullReturn(ppUVM, VERR_INVALID_POINTER);
204 AssertReturn(ppVM || ppUVM, VERR_INVALID_PARAMETER);
205
206 /*
207 * Because of the current hackiness of the applications
208 * we'll have to initialize global stuff from here.
209 * Later the applications will take care of this in a proper way.
210 */
211 static bool fGlobalInitDone = false;
212 if (!fGlobalInitDone)
213 {
214 int rc = VMR3GlobalInit();
215 if (RT_FAILURE(rc))
216 return rc;
217 fGlobalInitDone = true;
218 }
219
220 /*
221 * Validate input.
222 */
223 AssertLogRelMsgReturn(cCpus > 0 && cCpus <= VMM_MAX_CPU_COUNT, ("%RU32\n", cCpus), VERR_TOO_MANY_CPUS);
224
225 /*
226 * Create the UVM so we can register the at-error callback
227 * and consolidate a bit of cleanup code.
228 */
229 PUVM pUVM = NULL; /* shuts up gcc */
230 int rc = vmR3CreateUVM(cCpus, pVmm2UserMethods, &pUVM);
231 if (RT_FAILURE(rc))
232 return rc;
233 if (pfnVMAtError)
234 rc = VMR3AtErrorRegister(pUVM, pfnVMAtError, pvUserVM);
235 if (RT_SUCCESS(rc))
236 {
237 /*
238 * Initialize the support library creating the session for this VM.
239 */
240 rc = SUPR3Init(&pUVM->vm.s.pSession);
241 if (RT_SUCCESS(rc))
242 {
243 /*
244 * Call vmR3CreateU in the EMT thread and wait for it to finish.
245 *
246 * Note! VMCPUID_ANY is used here because VMR3ReqQueueU would have trouble
247 * submitting a request to a specific VCPU without a pVM. So, to make
248 * sure init is running on EMT(0), vmR3EmulationThreadWithId makes sure
249 * that only EMT(0) is servicing VMCPUID_ANY requests when pVM is NULL.
250 */
251 PVMREQ pReq;
252 rc = VMR3ReqCallU(pUVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, VMREQFLAGS_VBOX_STATUS,
253 (PFNRT)vmR3CreateU, 4, pUVM, cCpus, pfnCFGMConstructor, pvUserCFGM);
254 if (RT_SUCCESS(rc))
255 {
256 rc = pReq->iStatus;
257 VMR3ReqFree(pReq);
258 if (RT_SUCCESS(rc))
259 {
260 /*
261 * Success!
262 */
263 if (ppVM)
264 *ppVM = pUVM->pVM;
265 if (ppUVM)
266 {
267 VMR3RetainUVM(pUVM);
268 *ppUVM = pUVM;
269 }
270 LogFlow(("VMR3Create: returns VINF_SUCCESS (pVM=%p, pUVM=%p\n", pUVM->pVM, pUVM));
271 return VINF_SUCCESS;
272 }
273 }
274 else
275 AssertMsgFailed(("VMR3ReqCallU failed rc=%Rrc\n", rc));
276
277 /*
278 * An error occurred during VM creation. Set the error message directly
279 * using the initial callback, as the callback list might not exist yet.
280 */
281 const char *pszError;
282 switch (rc)
283 {
284 case VERR_VMX_IN_VMX_ROOT_MODE:
285#ifdef RT_OS_LINUX
286 pszError = N_("VirtualBox can't operate in VMX root mode. "
287 "Please disable the KVM kernel extension, recompile your kernel and reboot");
288#else
289 pszError = N_("VirtualBox can't operate in VMX root mode. Please close all other virtualization programs.");
290#endif
291 break;
292
293#ifndef RT_OS_DARWIN
294 case VERR_HM_CONFIG_MISMATCH:
295 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
296 "This hardware extension is required by the VM configuration");
297 break;
298#endif
299
300 case VERR_SVM_IN_USE:
301#ifdef RT_OS_LINUX
302 pszError = N_("VirtualBox can't enable the AMD-V extension. "
303 "Please disable the KVM kernel extension, recompile your kernel and reboot");
304#else
305 pszError = N_("VirtualBox can't enable the AMD-V extension. Please close all other virtualization programs.");
306#endif
307 break;
308
309#ifdef RT_OS_LINUX
310 case VERR_SUPDRV_COMPONENT_NOT_FOUND:
311 pszError = N_("One of the kernel modules was not successfully loaded. Make sure "
312 "that no kernel modules from an older version of VirtualBox exist. "
313 "Then try to recompile and reload the kernel modules by executing "
314 "'/sbin/vboxconfig' as root");
315 break;
316#endif
317
318 case VERR_RAW_MODE_INVALID_SMP:
319 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. "
320 "VirtualBox requires this hardware extension to emulate more than one "
321 "guest CPU");
322 break;
323
324 case VERR_SUPDRV_KERNEL_TOO_OLD_FOR_VTX:
325#ifdef RT_OS_LINUX
326 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
327 "extension. Either upgrade your kernel to Linux 2.6.13 or later or disable "
328 "the VT-x extension in the VM settings. Note that without VT-x you have "
329 "to reduce the number of guest CPUs to one");
330#else
331 pszError = N_("Because the host kernel is too old, VirtualBox cannot enable the VT-x "
332 "extension. Either upgrade your kernel or disable the VT-x extension in the "
333 "VM settings. Note that without VT-x you have to reduce the number of guest "
334 "CPUs to one");
335#endif
336 break;
337
338 case VERR_PDM_DEVICE_NOT_FOUND:
339 pszError = N_("A virtual device is configured in the VM settings but the device "
340 "implementation is missing.\n"
341 "A possible reason for this error is a missing extension pack. Note "
342 "that as of VirtualBox 4.0, certain features (for example USB 2.0 "
343 "support and remote desktop) are only available from an 'extension "
344 "pack' which must be downloaded and installed separately");
345 break;
346
347 case VERR_PCI_PASSTHROUGH_NO_HM:
348 pszError = N_("PCI passthrough requires VT-x/AMD-V");
349 break;
350
351 case VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING:
352 pszError = N_("PCI passthrough requires nested paging");
353 break;
354
355 default:
356 if (VMR3GetErrorCount(pUVM) == 0)
357 pszError = RTErrGetFull(rc);
358 else
359 pszError = NULL; /* already set. */
360 break;
361 }
362 if (pszError)
363 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
364 }
365 else
366 {
367 /*
368 * An error occurred at support library initialization time (before the
369 * VM could be created). Set the error message directly using the
370 * initial callback, as the callback list doesn't exist yet.
371 */
372 const char *pszError;
373 switch (rc)
374 {
375 case VERR_VM_DRIVER_LOAD_ERROR:
376#ifdef RT_OS_LINUX
377 pszError = N_("VirtualBox kernel driver not loaded. The vboxdrv kernel module "
378 "was either not loaded or /dev/vboxdrv is not set up properly. "
379 "Re-setup the kernel module by executing "
380 "'/sbin/vboxconfig' as root");
381#else
382 pszError = N_("VirtualBox kernel driver not loaded");
383#endif
384 break;
385 case VERR_VM_DRIVER_OPEN_ERROR:
386 pszError = N_("VirtualBox kernel driver cannot be opened");
387 break;
388 case VERR_VM_DRIVER_NOT_ACCESSIBLE:
389#ifdef VBOX_WITH_HARDENING
390 /* This should only happen if the executable wasn't hardened - bad code/build. */
391 pszError = N_("VirtualBox kernel driver not accessible, permission problem. "
392 "Re-install VirtualBox. If you are building it yourself, you "
393 "should make sure it installed correctly and that the setuid "
394 "bit is set on the executables calling VMR3Create.");
395#else
396 /* This should only happen when mixing builds or with the usual /dev/vboxdrv access issues. */
397# if defined(RT_OS_DARWIN)
398 pszError = N_("VirtualBox KEXT is not accessible, permission problem. "
399 "If you have built VirtualBox yourself, make sure that you do not "
400 "have the vboxdrv KEXT from a different build or installation loaded.");
401# elif defined(RT_OS_LINUX)
402 pszError = N_("VirtualBox kernel driver is not accessible, permission problem. "
403 "If you have built VirtualBox yourself, make sure that you do "
404 "not have the vboxdrv kernel module from a different build or "
405 "installation loaded. Also, make sure the vboxdrv udev rule gives "
406 "you the permission you need to access the device.");
407# elif defined(RT_OS_WINDOWS)
408 pszError = N_("VirtualBox kernel driver is not accessible, permission problem.");
409# else /* solaris, freebsd, ++. */
410 pszError = N_("VirtualBox kernel module is not accessible, permission problem. "
411 "If you have built VirtualBox yourself, make sure that you do "
412 "not have the vboxdrv kernel module from a different install loaded.");
413# endif
414#endif
415 break;
416 case VERR_INVALID_HANDLE: /** @todo track down and fix this error. */
417 case VERR_VM_DRIVER_NOT_INSTALLED:
418#ifdef RT_OS_LINUX
419 pszError = N_("VirtualBox kernel driver not installed. The vboxdrv kernel module "
420 "was either not loaded or /dev/vboxdrv was not created for some "
421 "reason. Re-setup the kernel module by executing "
422 "'/sbin/vboxconfig' as root");
423#else
424 pszError = N_("VirtualBox kernel driver not installed");
425#endif
426 break;
427 case VERR_NO_MEMORY:
428 pszError = N_("VirtualBox support library out of memory");
429 break;
430 case VERR_VERSION_MISMATCH:
431 case VERR_VM_DRIVER_VERSION_MISMATCH:
432 pszError = N_("The VirtualBox support driver which is running is from a different "
433 "version of VirtualBox. You can correct this by stopping all "
434 "running instances of VirtualBox and reinstalling the software.");
435 break;
436 default:
437 pszError = N_("Unknown error initializing kernel driver");
438 AssertMsgFailed(("Add error message for rc=%d (%Rrc)\n", rc, rc));
439 }
440 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, pszError, rc);
441 }
442 }
443
444 /* cleanup */
445 vmR3DestroyUVM(pUVM, 2000);
446 LogFlow(("VMR3Create: returns %Rrc\n", rc));
447 return rc;
448}
449
450
451/**
452 * Creates the UVM.
453 *
454 * This will not initialize the support library even if vmR3DestroyUVM
455 * will terminate that.
456 *
457 * @returns VBox status code.
458 * @param cCpus Number of virtual CPUs
459 * @param pVmm2UserMethods Pointer to the optional VMM -> User method
460 * table.
461 * @param ppUVM Where to store the UVM pointer.
462 */
463static int vmR3CreateUVM(uint32_t cCpus, PCVMM2USERMETHODS pVmm2UserMethods, PUVM *ppUVM)
464{
465 uint32_t i;
466
467 /*
468 * Create and initialize the UVM.
469 */
470 PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_OFFSETOF(UVM, aCpus[cCpus]));
471 AssertReturn(pUVM, VERR_NO_MEMORY);
472 pUVM->u32Magic = UVM_MAGIC;
473 pUVM->cCpus = cCpus;
474 pUVM->pVmm2UserMethods = pVmm2UserMethods;
475
476 AssertCompile(sizeof(pUVM->vm.s) <= sizeof(pUVM->vm.padding));
477
478 pUVM->vm.s.cUvmRefs = 1;
479 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
480 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
481 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
482
483 pUVM->vm.s.enmHaltMethod = VMHALTMETHOD_BOOTSTRAP;
484 RTUuidClear(&pUVM->vm.s.Uuid);
485
486 /* Initialize the VMCPU array in the UVM. */
487 for (i = 0; i < cCpus; i++)
488 {
489 pUVM->aCpus[i].pUVM = pUVM;
490 pUVM->aCpus[i].idCpu = i;
491 }
492
493 /* Allocate a TLS entry to store the VMINTUSERPERVMCPU pointer. */
494 int rc = RTTlsAllocEx(&pUVM->vm.s.idxTLS, NULL);
495 AssertRC(rc);
496 if (RT_SUCCESS(rc))
497 {
498 /* Allocate a halt method event semaphore for each VCPU. */
499 for (i = 0; i < cCpus; i++)
500 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
501 for (i = 0; i < cCpus; i++)
502 {
503 rc = RTSemEventCreate(&pUVM->aCpus[i].vm.s.EventSemWait);
504 if (RT_FAILURE(rc))
505 break;
506 }
507 if (RT_SUCCESS(rc))
508 {
509 rc = RTCritSectInit(&pUVM->vm.s.AtStateCritSect);
510 if (RT_SUCCESS(rc))
511 {
512 rc = RTCritSectInit(&pUVM->vm.s.AtErrorCritSect);
513 if (RT_SUCCESS(rc))
514 {
515 /*
516 * Init fundamental (sub-)components - STAM, MMR3Heap and PDMLdr.
517 */
518 rc = PDMR3InitUVM(pUVM);
519 if (RT_SUCCESS(rc))
520 {
521 rc = STAMR3InitUVM(pUVM);
522 if (RT_SUCCESS(rc))
523 {
524 rc = MMR3InitUVM(pUVM);
525 if (RT_SUCCESS(rc))
526 {
527 /*
528 * Start the emulation threads for all VMCPUs.
529 */
530 for (i = 0; i < cCpus; i++)
531 {
532 rc = RTThreadCreateF(&pUVM->aCpus[i].vm.s.ThreadEMT, vmR3EmulationThread, &pUVM->aCpus[i],
533 _1M, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE,
534 cCpus > 1 ? "EMT-%u" : "EMT", i);
535 if (RT_FAILURE(rc))
536 break;
537
538 pUVM->aCpus[i].vm.s.NativeThreadEMT = RTThreadGetNative(pUVM->aCpus[i].vm.s.ThreadEMT);
539 }
540
541 if (RT_SUCCESS(rc))
542 {
543 *ppUVM = pUVM;
544 return VINF_SUCCESS;
545 }
546
547 /* bail out. */
548 while (i-- > 0)
549 {
550 /** @todo rainy day: terminate the EMTs. */
551 }
552 MMR3TermUVM(pUVM);
553 }
554 STAMR3TermUVM(pUVM);
555 }
556 PDMR3TermUVM(pUVM);
557 }
558 RTCritSectDelete(&pUVM->vm.s.AtErrorCritSect);
559 }
560 RTCritSectDelete(&pUVM->vm.s.AtStateCritSect);
561 }
562 }
563 for (i = 0; i < cCpus; i++)
564 {
565 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
566 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
567 }
568 RTTlsFree(pUVM->vm.s.idxTLS);
569 }
570 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
571 return rc;
572}
573
574
575/**
576 * Creates and initializes the VM.
577 *
578 * @thread EMT
579 */
580static int vmR3CreateU(PUVM pUVM, uint32_t cCpus, PFNCFGMCONSTRUCTOR pfnCFGMConstructor, void *pvUserCFGM)
581{
582 /*
583 * Load the VMMR0.r0 module so that we can call GVMMR0CreateVM.
584 */
585 int rc = PDMR3LdrLoadVMMR0U(pUVM);
586 if (RT_FAILURE(rc))
587 {
588 /** @todo we need a cleaner solution for this (VERR_VMX_IN_VMX_ROOT_MODE).
589 * bird: what about moving the message down here? Main picks the first message, right? */
590 if (rc == VERR_VMX_IN_VMX_ROOT_MODE)
591 return rc; /* proper error message set later on */
592 return vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("Failed to load VMMR0.r0"));
593 }
594
595 /*
596 * Request GVMM to create a new VM for us.
597 */
598 GVMMCREATEVMREQ CreateVMReq;
599 CreateVMReq.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
600 CreateVMReq.Hdr.cbReq = sizeof(CreateVMReq);
601 CreateVMReq.pSession = pUVM->vm.s.pSession;
602 CreateVMReq.pVMR0 = NIL_RTR0PTR;
603 CreateVMReq.pVMR3 = NULL;
604 CreateVMReq.cCpus = cCpus;
605 rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, VMMR0_DO_GVMM_CREATE_VM, 0, &CreateVMReq.Hdr);
606 if (RT_SUCCESS(rc))
607 {
608 PVM pVM = pUVM->pVM = CreateVMReq.pVMR3;
609 AssertRelease(VALID_PTR(pVM));
610 AssertRelease(pVM->pVMR0 == CreateVMReq.pVMR0);
611 AssertRelease(pVM->pSession == pUVM->vm.s.pSession);
612 AssertRelease(pVM->cCpus == cCpus);
613 AssertRelease(pVM->uCpuExecutionCap == 100);
614 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));
615 AssertCompileMemberAlignment(VM, cpum, 64);
616 AssertCompileMemberAlignment(VM, tm, 64);
617 AssertCompileMemberAlignment(VM, aCpus, PAGE_SIZE);
618
619 Log(("VMR3Create: Created pUVM=%p pVM=%p pVMR0=%p hSelf=%#x cCpus=%RU32\n",
620 pUVM, pVM, pVM->pVMR0, pVM->hSelf, pVM->cCpus));
621
622 /*
623 * Initialize the VM structure and our internal data (VMINT).
624 */
625 pVM->pUVM = pUVM;
626
627 for (VMCPUID i = 0; i < pVM->cCpus; i++)
628 {
629 pVM->aCpus[i].pUVCpu = &pUVM->aCpus[i];
630 pVM->aCpus[i].idCpu = i;
631 pVM->aCpus[i].hNativeThread = pUVM->aCpus[i].vm.s.NativeThreadEMT;
632 Assert(pVM->aCpus[i].hNativeThread != NIL_RTNATIVETHREAD);
633 /* hNativeThreadR0 is initialized on EMT registration. */
634 pUVM->aCpus[i].pVCpu = &pVM->aCpus[i];
635 pUVM->aCpus[i].pVM = pVM;
636 }
637
638
639 /*
640 * Init the configuration.
641 */
642 rc = CFGMR3Init(pVM, pfnCFGMConstructor, pvUserCFGM);
643 if (RT_SUCCESS(rc))
644 {
645 rc = vmR3ReadBaseConfig(pVM, pUVM, cCpus);
646 if (RT_SUCCESS(rc))
647 {
648 /*
649 * Init the ring-3 components and ring-3 per cpu data, finishing it off
650 * by a relocation round (intermediate context finalization will do this).
651 */
652 rc = vmR3InitRing3(pVM, pUVM);
653 if (RT_SUCCESS(rc))
654 {
655 rc = PGMR3FinalizeMappings(pVM);
656 if (RT_SUCCESS(rc))
657 {
658
659 LogFlow(("Ring-3 init succeeded\n"));
660
661 /*
662 * Init the Ring-0 components.
663 */
664 rc = vmR3InitRing0(pVM);
665 if (RT_SUCCESS(rc))
666 {
667 /* Relocate again, because some switcher fixups depends on R0 init results. */
668 VMR3Relocate(pVM, 0 /* offDelta */);
669
670#ifdef VBOX_WITH_DEBUGGER
671 /*
672 * Init the tcp debugger console if we're building
673 * with debugger support.
674 */
675 void *pvUser = NULL;
676 rc = DBGCTcpCreate(pUVM, &pvUser);
677 if ( RT_SUCCESS(rc)
678 || rc == VERR_NET_ADDRESS_IN_USE)
679 {
680 pUVM->vm.s.pvDBGC = pvUser;
681#endif
682 /*
683 * Init the Raw-Mode Context components.
684 */
685#ifdef VBOX_WITH_RAW_MODE
686 rc = vmR3InitRC(pVM);
687 if (RT_SUCCESS(rc))
688#endif
689 {
690 /*
691 * Now we can safely set the VM halt method to default.
692 */
693 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT);
694 if (RT_SUCCESS(rc))
695 {
696 /*
697 * Set the state and we're done.
698 */
699 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING);
700
701#ifdef LOG_ENABLED
702 RTLogSetCustomPrefixCallback(NULL, vmR3LogPrefixCallback, pUVM);
703#endif
704 return VINF_SUCCESS;
705 }
706 }
707#ifdef VBOX_WITH_DEBUGGER
708 DBGCTcpTerminate(pUVM, pUVM->vm.s.pvDBGC);
709 pUVM->vm.s.pvDBGC = NULL;
710 }
711#endif
712 //..
713 }
714 }
715 vmR3Destroy(pVM);
716 }
717 }
718 //..
719
720 /* Clean CFGM. */
721 int rc2 = CFGMR3Term(pVM);
722 AssertRC(rc2);
723 }
724
725 /*
726 * Do automatic cleanups while the VM structure is still alive and all
727 * references to it are still working.
728 */
729 PDMR3CritSectBothTerm(pVM);
730
731 /*
732 * Drop all references to VM and the VMCPU structures, then
733 * tell GVMM to destroy the VM.
734 */
735 pUVM->pVM = NULL;
736 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
737 {
738 pUVM->aCpus[i].pVM = NULL;
739 pUVM->aCpus[i].pVCpu = NULL;
740 }
741 Assert(pUVM->vm.s.enmHaltMethod == VMHALTMETHOD_BOOTSTRAP);
742
743 if (pUVM->cCpus > 1)
744 {
745 /* Poke the other EMTs since they may have stale pVM and pVCpu references
746 on the stack (see VMR3WaitU for instance) if they've been awakened after
747 VM creation. */
748 for (VMCPUID i = 1; i < pUVM->cCpus; i++)
749 VMR3NotifyCpuFFU(&pUVM->aCpus[i], 0);
750 RTThreadSleep(RT_MIN(100 + 25 *(pUVM->cCpus - 1), 500)); /* very sophisticated */
751 }
752
753 int rc2 = SUPR3CallVMMR0Ex(CreateVMReq.pVMR0, 0 /*idCpu*/, VMMR0_DO_GVMM_DESTROY_VM, 0, NULL);
754 AssertRC(rc2);
755 }
756 else
757 vmR3SetErrorU(pUVM, rc, RT_SRC_POS, N_("VM creation failed (GVMM)"));
758
759 LogFlow(("vmR3CreateU: returns %Rrc\n", rc));
760 return rc;
761}
762
763
764/**
765 * Reads the base configuation from CFGM.
766 *
767 * @returns VBox status code.
768 * @param pVM The cross context VM structure.
769 * @param pUVM The user mode VM structure.
770 * @param cCpus The CPU count given to VMR3Create.
771 */
772static int vmR3ReadBaseConfig(PVM pVM, PUVM pUVM, uint32_t cCpus)
773{
774 int rc;
775 PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
776
777 /*
778 * If executing in fake suplib mode disable RR3 and RR0 in the config.
779 */
780 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
781 if (psz && !strcmp(psz, "fake"))
782 {
783 CFGMR3RemoveValue(pRoot, "RawR3Enabled");
784 CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
785 CFGMR3RemoveValue(pRoot, "RawR0Enabled");
786 CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
787 }
788
789 /*
790 * Base EM and HM config properties.
791 */
792 Assert(pVM->fRecompileUser == false); /* ASSUMES all zeros at this point */
793#ifdef VBOX_WITH_RAW_MODE
794 bool fEnabled;
795 rc = CFGMR3QueryBoolDef(pRoot, "RawR3Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
796 pVM->fRecompileUser = !fEnabled;
797 rc = CFGMR3QueryBoolDef(pRoot, "RawR0Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
798 pVM->fRecompileSupervisor = !fEnabled;
799# ifdef VBOX_WITH_RAW_RING1
800 rc = CFGMR3QueryBoolDef(pRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
801# endif
802 rc = CFGMR3QueryBoolDef(pRoot, "PATMEnabled", &pVM->fPATMEnabled, true); AssertRCReturn(rc, rc);
803 rc = CFGMR3QueryBoolDef(pRoot, "CSAMEnabled", &pVM->fCSAMEnabled, true); AssertRCReturn(rc, rc);
804 rc = CFGMR3QueryBoolDef(pRoot, "HMEnabled", &pVM->fHMEnabled, true); AssertRCReturn(rc, rc);
805#else
806 pVM->fHMEnabled = true;
807#endif
808 Assert(!pVM->fHMEnabledFixed);
809 LogRel(("VM: fHMEnabled=%RTbool (configured) fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n"
810 "VM: fRawRing1Enabled=%RTbool CSAM=%RTbool PATM=%RTbool\n",
811 pVM->fHMEnabled, pVM->fRecompileUser, pVM->fRecompileSupervisor,
812 pVM->fRawRing1Enabled, pVM->fCSAMEnabled, pVM->fPATMEnabled));
813
814
815 /*
816 * Make sure the CPU count in the config data matches.
817 */
818 uint32_t cCPUsCfg;
819 rc = CFGMR3QueryU32Def(pRoot, "NumCPUs", &cCPUsCfg, 1);
820 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"NumCPUs\" as integer failed, rc=%Rrc\n", rc), rc);
821 AssertLogRelMsgReturn(cCPUsCfg == cCpus,
822 ("Configuration error: \"NumCPUs\"=%RU32 and VMR3Create::cCpus=%RU32 does not match!\n",
823 cCPUsCfg, cCpus),
824 VERR_INVALID_PARAMETER);
825
826 /*
827 * Get the CPU execution cap.
828 */
829 rc = CFGMR3QueryU32Def(pRoot, "CpuExecutionCap", &pVM->uCpuExecutionCap, 100);
830 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"CpuExecutionCap\" as integer failed, rc=%Rrc\n", rc), rc);
831
832 /*
833 * Get the VM name and UUID.
834 */
835 rc = CFGMR3QueryStringAllocDef(pRoot, "Name", &pUVM->vm.s.pszName, "<unknown>");
836 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"Name\" failed, rc=%Rrc\n", rc), rc);
837
838 rc = CFGMR3QueryBytes(pRoot, "UUID", &pUVM->vm.s.Uuid, sizeof(pUVM->vm.s.Uuid));
839 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
840 rc = VINF_SUCCESS;
841 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"UUID\" failed, rc=%Rrc\n", rc), rc);
842
843 rc = CFGMR3QueryBoolDef(pRoot, "PowerOffInsteadOfReset", &pVM->vm.s.fPowerOffInsteadOfReset, false);
844 AssertLogRelMsgRCReturn(rc, ("Configuration error: Querying \"PowerOffInsteadOfReset\" failed, rc=%Rrc\n", rc), rc);
845
846 return VINF_SUCCESS;
847}
848
849
850/**
851 * Register the calling EMT with GVM.
852 *
853 * @returns VBox status code.
854 * @param pVM The cross context VM structure.
855 * @param idCpu The Virtual CPU ID.
856 */
857static DECLCALLBACK(int) vmR3RegisterEMT(PVM pVM, VMCPUID idCpu)
858{
859 Assert(VMMGetCpuId(pVM) == idCpu);
860 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, idCpu, VMMR0_DO_GVMM_REGISTER_VMCPU, 0, NULL);
861 if (RT_FAILURE(rc))
862 LogRel(("idCpu=%u rc=%Rrc\n", idCpu, rc));
863 return rc;
864}
865
866
867/**
868 * Initializes all R3 components of the VM
869 */
870static int vmR3InitRing3(PVM pVM, PUVM pUVM)
871{
872 int rc;
873
874 /*
875 * Register the other EMTs with GVM.
876 */
877 for (VMCPUID idCpu = 1; idCpu < pVM->cCpus; idCpu++)
878 {
879 rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)vmR3RegisterEMT, 2, pVM, idCpu);
880 if (RT_FAILURE(rc))
881 return rc;
882 }
883
884 /*
885 * Register statistics.
886 */
887 STAM_REG(pVM, &pVM->StatTotalInGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/InGC", STAMUNIT_TICKS_PER_CALL, "Profiling the total time spent in GC.");
888 STAM_REG(pVM, &pVM->StatSwitcherToGC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToGC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
889 STAM_REG(pVM, &pVM->StatSwitcherToHC, STAMTYPE_PROFILE_ADV, "/PROF/VM/SwitchToHC", STAMUNIT_TICKS_PER_CALL, "Profiling switching to HC.");
890 STAM_REG(pVM, &pVM->StatSwitcherSaveRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SaveRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
891 STAM_REG(pVM, &pVM->StatSwitcherSysEnter, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/SysEnter", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
892 STAM_REG(pVM, &pVM->StatSwitcherDebug, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Debug", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
893 STAM_REG(pVM, &pVM->StatSwitcherCR0, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR0", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
894 STAM_REG(pVM, &pVM->StatSwitcherCR4, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/CR4", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
895 STAM_REG(pVM, &pVM->StatSwitcherLgdt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lgdt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
896 STAM_REG(pVM, &pVM->StatSwitcherLidt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lidt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
897 STAM_REG(pVM, &pVM->StatSwitcherLldt, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/Lldt", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
898 STAM_REG(pVM, &pVM->StatSwitcherTSS, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/TSS", STAMUNIT_TICKS_PER_CALL, "Profiling switching to GC.");
899 STAM_REG(pVM, &pVM->StatSwitcherJmpCR3, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/JmpCR3", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
900 STAM_REG(pVM, &pVM->StatSwitcherRstrRegs, STAMTYPE_PROFILE_ADV, "/VM/Switcher/ToGC/RstrRegs", STAMUNIT_TICKS_PER_CALL,"Profiling switching to GC.");
901
902 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
903 {
904 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltYield, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state yielding.", "/PROF/CPU%d/VM/Halt/Yield", idCpu);
905 AssertRC(rc);
906 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlock, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state blocking.", "/PROF/CPU%d/VM/Halt/Block", idCpu);
907 AssertRC(rc);
908 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOverslept, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time wasted by blocking too long.", "/PROF/CPU%d/VM/Halt/BlockOverslept", idCpu);
909 AssertRC(rc);
910 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockInsomnia, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept when returning to early.","/PROF/CPU%d/VM/Halt/BlockInsomnia", idCpu);
911 AssertRC(rc);
912 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltBlockOnTime, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Time slept on time.", "/PROF/CPU%d/VM/Halt/BlockOnTime", idCpu);
913 AssertRC(rc);
914 rc = STAMR3RegisterF(pVM, &pUVM->aCpus[idCpu].vm.s.StatHaltTimers, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_NS_PER_CALL, "Profiling halted state timer tasks.", "/PROF/CPU%d/VM/Halt/Timers", idCpu);
915 AssertRC(rc);
916 }
917
918 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocNew, STAMTYPE_COUNTER, "/VM/Req/AllocNew", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a new packet.");
919 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRaces, STAMTYPE_COUNTER, "/VM/Req/AllocRaces", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc causing races.");
920 STAM_REG(pVM, &pUVM->vm.s.StatReqAllocRecycled, STAMTYPE_COUNTER, "/VM/Req/AllocRecycled", STAMUNIT_OCCURENCES, "Number of VMR3ReqAlloc returning a recycled packet.");
921 STAM_REG(pVM, &pUVM->vm.s.StatReqFree, STAMTYPE_COUNTER, "/VM/Req/Free", STAMUNIT_OCCURENCES, "Number of VMR3ReqFree calls.");
922 STAM_REG(pVM, &pUVM->vm.s.StatReqFreeOverflow, STAMTYPE_COUNTER, "/VM/Req/FreeOverflow", STAMUNIT_OCCURENCES, "Number of times the request was actually freed.");
923 STAM_REG(pVM, &pUVM->vm.s.StatReqProcessed, STAMTYPE_COUNTER, "/VM/Req/Processed", STAMUNIT_OCCURENCES, "Number of processed requests (any queue).");
924 STAM_REG(pVM, &pUVM->vm.s.StatReqMoreThan1, STAMTYPE_COUNTER, "/VM/Req/MoreThan1", STAMUNIT_OCCURENCES, "Number of times there are more than one request on the queue when processing it.");
925 STAM_REG(pVM, &pUVM->vm.s.StatReqPushBackRaces, STAMTYPE_COUNTER, "/VM/Req/PushBackRaces", STAMUNIT_OCCURENCES, "Number of push back races.");
926
927 /*
928 * Init all R3 components, the order here might be important.
929 * HM shall be initialized first!
930 */
931 rc = HMR3Init(pVM);
932 if (RT_SUCCESS(rc))
933 {
934 rc = MMR3Init(pVM);
935 if (RT_SUCCESS(rc))
936 {
937 rc = CPUMR3Init(pVM);
938 if (RT_SUCCESS(rc))
939 {
940 rc = PGMR3Init(pVM);
941 if (RT_SUCCESS(rc))
942 {
943#ifdef VBOX_WITH_REM
944 rc = REMR3Init(pVM);
945#endif
946 if (RT_SUCCESS(rc))
947 {
948 rc = MMR3InitPaging(pVM);
949 if (RT_SUCCESS(rc))
950 rc = TMR3Init(pVM);
951 if (RT_SUCCESS(rc))
952 {
953 rc = FTMR3Init(pVM);
954 if (RT_SUCCESS(rc))
955 {
956 rc = VMMR3Init(pVM);
957 if (RT_SUCCESS(rc))
958 {
959 rc = SELMR3Init(pVM);
960 if (RT_SUCCESS(rc))
961 {
962 rc = TRPMR3Init(pVM);
963 if (RT_SUCCESS(rc))
964 {
965#ifdef VBOX_WITH_RAW_MODE
966 rc = CSAMR3Init(pVM);
967 if (RT_SUCCESS(rc))
968 {
969 rc = PATMR3Init(pVM);
970 if (RT_SUCCESS(rc))
971 {
972#endif
973 rc = IOMR3Init(pVM);
974 if (RT_SUCCESS(rc))
975 {
976 rc = EMR3Init(pVM);
977 if (RT_SUCCESS(rc))
978 {
979 rc = IEMR3Init(pVM);
980 if (RT_SUCCESS(rc))
981 {
982 rc = DBGFR3Init(pVM);
983 if (RT_SUCCESS(rc))
984 {
985 /* GIM must be init'd before PDM, gimdevR3Construct()
986 requires GIM provider to be setup. */
987 rc = GIMR3Init(pVM);
988 if (RT_SUCCESS(rc))
989 {
990 rc = PDMR3Init(pVM);
991 if (RT_SUCCESS(rc))
992 {
993 rc = PGMR3InitDynMap(pVM);
994 if (RT_SUCCESS(rc))
995 rc = MMR3HyperInitFinalize(pVM);
996#ifdef VBOX_WITH_RAW_MODE
997 if (RT_SUCCESS(rc))
998 rc = PATMR3InitFinalize(pVM);
999#endif
1000 if (RT_SUCCESS(rc))
1001 rc = PGMR3InitFinalize(pVM);
1002 if (RT_SUCCESS(rc))
1003 rc = SELMR3InitFinalize(pVM);
1004 if (RT_SUCCESS(rc))
1005 rc = TMR3InitFinalize(pVM);
1006#ifdef VBOX_WITH_REM
1007 if (RT_SUCCESS(rc))
1008 rc = REMR3InitFinalize(pVM);
1009#endif
1010 if (RT_SUCCESS(rc))
1011 {
1012 PGMR3MemSetup(pVM, false /*fAtReset*/);
1013 PDMR3MemSetup(pVM, false /*fAtReset*/);
1014 }
1015 if (RT_SUCCESS(rc))
1016 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING3);
1017 if (RT_SUCCESS(rc))
1018 {
1019 LogFlow(("vmR3InitRing3: returns %Rrc\n", VINF_SUCCESS));
1020 return VINF_SUCCESS;
1021 }
1022
1023 int rc2 = PDMR3Term(pVM);
1024 AssertRC(rc2);
1025 }
1026 int rc2 = GIMR3Term(pVM);
1027 AssertRC(rc2);
1028 }
1029 int rc2 = DBGFR3Term(pVM);
1030 AssertRC(rc2);
1031 }
1032 int rc2 = IEMR3Term(pVM);
1033 AssertRC(rc2);
1034 }
1035 int rc2 = EMR3Term(pVM);
1036 AssertRC(rc2);
1037 }
1038 int rc2 = IOMR3Term(pVM);
1039 AssertRC(rc2);
1040 }
1041#ifdef VBOX_WITH_RAW_MODE
1042 int rc2 = PATMR3Term(pVM);
1043 AssertRC(rc2);
1044 }
1045 int rc2 = CSAMR3Term(pVM);
1046 AssertRC(rc2);
1047 }
1048#endif
1049 int rc2 = TRPMR3Term(pVM);
1050 AssertRC(rc2);
1051 }
1052 int rc2 = SELMR3Term(pVM);
1053 AssertRC(rc2);
1054 }
1055 int rc2 = VMMR3Term(pVM);
1056 AssertRC(rc2);
1057 }
1058 int rc2 = FTMR3Term(pVM);
1059 AssertRC(rc2);
1060 }
1061 int rc2 = TMR3Term(pVM);
1062 AssertRC(rc2);
1063 }
1064#ifdef VBOX_WITH_REM
1065 int rc2 = REMR3Term(pVM);
1066 AssertRC(rc2);
1067#endif
1068 }
1069 int rc2 = PGMR3Term(pVM);
1070 AssertRC(rc2);
1071 }
1072 //int rc2 = CPUMR3Term(pVM);
1073 //AssertRC(rc2);
1074 }
1075 /* MMR3Term is not called here because it'll kill the heap. */
1076 }
1077 int rc2 = HMR3Term(pVM);
1078 AssertRC(rc2);
1079 }
1080
1081
1082 LogFlow(("vmR3InitRing3: returns %Rrc\n", rc));
1083 return rc;
1084}
1085
1086
1087/**
1088 * Initializes all R0 components of the VM
1089 */
1090static int vmR3InitRing0(PVM pVM)
1091{
1092 LogFlow(("vmR3InitRing0:\n"));
1093
1094 /*
1095 * Check for FAKE suplib mode.
1096 */
1097 int rc = VINF_SUCCESS;
1098 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1099 if (!psz || strcmp(psz, "fake"))
1100 {
1101 /*
1102 * Call the VMMR0 component and let it do the init.
1103 */
1104 rc = VMMR3InitR0(pVM);
1105 }
1106 else
1107 Log(("vmR3InitRing0: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1108
1109 /*
1110 * Do notifications and return.
1111 */
1112 if (RT_SUCCESS(rc))
1113 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0);
1114 if (RT_SUCCESS(rc))
1115 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM);
1116
1117 /** @todo Move this to the VMINITCOMPLETED_HM notification handler. */
1118 if (RT_SUCCESS(rc))
1119 CPUMR3SetHWVirtEx(pVM, HMIsEnabled(pVM));
1120
1121 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc));
1122 return rc;
1123}
1124
1125
1126#ifdef VBOX_WITH_RAW_MODE
1127/**
1128 * Initializes all RC components of the VM
1129 */
1130static int vmR3InitRC(PVM pVM)
1131{
1132 LogFlow(("vmR3InitRC:\n"));
1133
1134 /*
1135 * Check for FAKE suplib mode.
1136 */
1137 int rc = VINF_SUCCESS;
1138 const char *psz = RTEnvGet("VBOX_SUPLIB_FAKE");
1139 if (!psz || strcmp(psz, "fake"))
1140 {
1141 /*
1142 * Call the VMMR0 component and let it do the init.
1143 */
1144 rc = VMMR3InitRC(pVM);
1145 }
1146 else
1147 Log(("vmR3InitRC: skipping because of VBOX_SUPLIB_FAKE=fake\n"));
1148
1149 /*
1150 * Do notifications and return.
1151 */
1152 if (RT_SUCCESS(rc))
1153 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RC);
1154 LogFlow(("vmR3InitRC: returns %Rrc\n", rc));
1155 return rc;
1156}
1157#endif /* VBOX_WITH_RAW_MODE */
1158
1159
1160/**
1161 * Do init completed notifications.
1162 *
1163 * @returns VBox status code.
1164 * @param pVM The cross context VM structure.
1165 * @param enmWhat What's completed.
1166 */
1167static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1168{
1169 int rc = VMMR3InitCompleted(pVM, enmWhat);
1170 if (RT_SUCCESS(rc))
1171 rc = HMR3InitCompleted(pVM, enmWhat);
1172 if (RT_SUCCESS(rc))
1173 rc = PGMR3InitCompleted(pVM, enmWhat);
1174 if (RT_SUCCESS(rc))
1175 rc = CPUMR3InitCompleted(pVM, enmWhat);
1176 if (enmWhat == VMINITCOMPLETED_RING3)
1177 {
1178#ifndef VBOX_WITH_RAW_MODE
1179 if (RT_SUCCESS(rc))
1180 rc = SSMR3RegisterStub(pVM, "CSAM", 0);
1181 if (RT_SUCCESS(rc))
1182 rc = SSMR3RegisterStub(pVM, "PATM", 0);
1183#endif
1184#ifndef VBOX_WITH_REM
1185 if (RT_SUCCESS(rc))
1186 rc = SSMR3RegisterStub(pVM, "rem", 1);
1187#endif
1188 }
1189 if (RT_SUCCESS(rc))
1190 rc = PDMR3InitCompleted(pVM, enmWhat);
1191 return rc;
1192}
1193
1194
1195#ifdef LOG_ENABLED
1196/**
1197 * Logger callback for inserting a custom prefix.
1198 *
1199 * @returns Number of chars written.
1200 * @param pLogger The logger.
1201 * @param pchBuf The output buffer.
1202 * @param cchBuf The output buffer size.
1203 * @param pvUser Pointer to the UVM structure.
1204 */
1205static DECLCALLBACK(size_t) vmR3LogPrefixCallback(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1206{
1207 AssertReturn(cchBuf >= 2, 0);
1208 PUVM pUVM = (PUVM)pvUser;
1209 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
1210 if (pUVCpu)
1211 {
1212 static const char s_szHex[17] = "0123456789abcdef";
1213 VMCPUID const idCpu = pUVCpu->idCpu;
1214 pchBuf[1] = s_szHex[ idCpu & 15];
1215 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1216 }
1217 else
1218 {
1219 pchBuf[0] = 'x';
1220 pchBuf[1] = 'y';
1221 }
1222
1223 NOREF(pLogger);
1224 return 2;
1225}
1226#endif /* LOG_ENABLED */
1227
1228
1229/**
1230 * Calls the relocation functions for all VMM components so they can update
1231 * any GC pointers. When this function is called all the basic VM members
1232 * have been updated and the actual memory relocation have been done
1233 * by the PGM/MM.
1234 *
1235 * This is used both on init and on runtime relocations.
1236 *
1237 * @param pVM The cross context VM structure.
1238 * @param offDelta Relocation delta relative to old location.
1239 */
1240VMMR3_INT_DECL(void) VMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
1241{
1242 LogFlow(("VMR3Relocate: offDelta=%RGv\n", offDelta));
1243
1244 /*
1245 * The order here is very important!
1246 */
1247 PGMR3Relocate(pVM, offDelta);
1248 PDMR3LdrRelocateU(pVM->pUVM, offDelta);
1249 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */
1250 CPUMR3Relocate(pVM);
1251 HMR3Relocate(pVM);
1252 SELMR3Relocate(pVM);
1253 VMMR3Relocate(pVM, offDelta);
1254 SELMR3Relocate(pVM); /* !hack! fix stack! */
1255 TRPMR3Relocate(pVM, offDelta);
1256#ifdef VBOX_WITH_RAW_MODE
1257 PATMR3Relocate(pVM, (RTRCINTPTR)offDelta);
1258 CSAMR3Relocate(pVM, offDelta);
1259#endif
1260 IOMR3Relocate(pVM, offDelta);
1261 EMR3Relocate(pVM);
1262 TMR3Relocate(pVM, offDelta);
1263 IEMR3Relocate(pVM);
1264 DBGFR3Relocate(pVM, offDelta);
1265 PDMR3Relocate(pVM, offDelta);
1266}
1267
1268
1269/**
1270 * EMT rendezvous worker for VMR3PowerOn.
1271 *
1272 * @returns VERR_VM_INVALID_VM_STATE or VINF_SUCCESS. (This is a strict return
1273 * code, see FNVMMEMTRENDEZVOUS.)
1274 *
1275 * @param pVM The cross context VM structure.
1276 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1277 * @param pvUser Ignored.
1278 */
1279static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOn(PVM pVM, PVMCPU pVCpu, void *pvUser)
1280{
1281 LogFlow(("vmR3PowerOn: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1282 Assert(!pvUser); NOREF(pvUser);
1283
1284 /*
1285 * The first thread thru here tries to change the state. We shouldn't be
1286 * called again if this fails.
1287 */
1288 if (pVCpu->idCpu == pVM->cCpus - 1)
1289 {
1290 int rc = vmR3TrySetState(pVM, "VMR3PowerOn", 1, VMSTATE_POWERING_ON, VMSTATE_CREATED);
1291 if (RT_FAILURE(rc))
1292 return rc;
1293 }
1294
1295 VMSTATE enmVMState = VMR3GetState(pVM);
1296 AssertMsgReturn(enmVMState == VMSTATE_POWERING_ON,
1297 ("%s\n", VMR3GetStateName(enmVMState)),
1298 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1299
1300 /*
1301 * All EMTs changes their state to started.
1302 */
1303 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1304
1305 /*
1306 * EMT(0) is last thru here and it will make the notification calls
1307 * and advance the state.
1308 */
1309 if (pVCpu->idCpu == 0)
1310 {
1311 PDMR3PowerOn(pVM);
1312 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_POWERING_ON);
1313 }
1314
1315 return VINF_SUCCESS;
1316}
1317
1318
1319/**
1320 * Powers on the virtual machine.
1321 *
1322 * @returns VBox status code.
1323 *
1324 * @param pUVM The VM to power on.
1325 *
1326 * @thread Any thread.
1327 * @vmstate Created
1328 * @vmstateto PoweringOn+Running
1329 */
1330VMMR3DECL(int) VMR3PowerOn(PUVM pUVM)
1331{
1332 LogFlow(("VMR3PowerOn: pUVM=%p\n", pUVM));
1333 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1334 PVM pVM = pUVM->pVM;
1335 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1336
1337 /*
1338 * Gather all the EMTs to reduce the init TSC drift and keep
1339 * the state changing APIs a bit uniform.
1340 */
1341 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1342 vmR3PowerOn, NULL);
1343 LogFlow(("VMR3PowerOn: returns %Rrc\n", rc));
1344 return rc;
1345}
1346
1347
1348/**
1349 * Does the suspend notifications.
1350 *
1351 * @param pVM The cross context VM structure.
1352 * @thread EMT(0)
1353 */
1354static void vmR3SuspendDoWork(PVM pVM)
1355{
1356 PDMR3Suspend(pVM);
1357}
1358
1359
1360/**
1361 * EMT rendezvous worker for VMR3Suspend.
1362 *
1363 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
1364 * return code, see FNVMMEMTRENDEZVOUS.)
1365 *
1366 * @param pVM The cross context VM structure.
1367 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1368 * @param pvUser Ignored.
1369 */
1370static DECLCALLBACK(VBOXSTRICTRC) vmR3Suspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1371{
1372 VMSUSPENDREASON enmReason = (VMSUSPENDREASON)(uintptr_t)pvUser;
1373 LogFlow(("vmR3Suspend: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1374
1375 /*
1376 * The first EMT switches the state to suspending. If this fails because
1377 * something was racing us in one way or the other, there will be no more
1378 * calls and thus the state assertion below is not going to annoy anyone.
1379 */
1380 if (pVCpu->idCpu == pVM->cCpus - 1)
1381 {
1382 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1383 VMSTATE_SUSPENDING, VMSTATE_RUNNING,
1384 VMSTATE_SUSPENDING_EXT_LS, VMSTATE_RUNNING_LS);
1385 if (RT_FAILURE(rc))
1386 return rc;
1387 pVM->pUVM->vm.s.enmSuspendReason = enmReason;
1388 }
1389
1390 VMSTATE enmVMState = VMR3GetState(pVM);
1391 AssertMsgReturn( enmVMState == VMSTATE_SUSPENDING
1392 || enmVMState == VMSTATE_SUSPENDING_EXT_LS,
1393 ("%s\n", VMR3GetStateName(enmVMState)),
1394 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1395
1396 /*
1397 * EMT(0) does the actually suspending *after* all the other CPUs have
1398 * been thru here.
1399 */
1400 if (pVCpu->idCpu == 0)
1401 {
1402 vmR3SuspendDoWork(pVM);
1403
1404 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 2,
1405 VMSTATE_SUSPENDED, VMSTATE_SUSPENDING,
1406 VMSTATE_SUSPENDED_EXT_LS, VMSTATE_SUSPENDING_EXT_LS);
1407 if (RT_FAILURE(rc))
1408 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1409 }
1410
1411 return VINF_EM_SUSPEND;
1412}
1413
1414
1415/**
1416 * Suspends a running VM.
1417 *
1418 * @returns VBox status code. When called on EMT, this will be a strict status
1419 * code that has to be propagated up the call stack.
1420 *
1421 * @param pUVM The VM to suspend.
1422 * @param enmReason The reason for suspending.
1423 *
1424 * @thread Any thread.
1425 * @vmstate Running or RunningLS
1426 * @vmstateto Suspending + Suspended or SuspendingExtLS + SuspendedExtLS
1427 */
1428VMMR3DECL(int) VMR3Suspend(PUVM pUVM, VMSUSPENDREASON enmReason)
1429{
1430 LogFlow(("VMR3Suspend: pUVM=%p\n", pUVM));
1431 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1432 AssertReturn(enmReason > VMSUSPENDREASON_INVALID && enmReason < VMSUSPENDREASON_END, VERR_INVALID_PARAMETER);
1433
1434 /*
1435 * Gather all the EMTs to make sure there are no races before
1436 * changing the VM state.
1437 */
1438 int rc = VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1439 vmR3Suspend, (void *)(uintptr_t)enmReason);
1440 LogFlow(("VMR3Suspend: returns %Rrc\n", rc));
1441 return rc;
1442}
1443
1444
1445/**
1446 * Retrieves the reason for the most recent suspend.
1447 *
1448 * @returns Suspend reason. VMSUSPENDREASON_INVALID if no suspend has been done
1449 * or the handle is invalid.
1450 * @param pUVM The user mode VM handle.
1451 */
1452VMMR3DECL(VMSUSPENDREASON) VMR3GetSuspendReason(PUVM pUVM)
1453{
1454 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSUSPENDREASON_INVALID);
1455 return pUVM->vm.s.enmSuspendReason;
1456}
1457
1458
1459/**
1460 * EMT rendezvous worker for VMR3Resume.
1461 *
1462 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1463 * return code, see FNVMMEMTRENDEZVOUS.)
1464 *
1465 * @param pVM The cross context VM structure.
1466 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1467 * @param pvUser Reason.
1468 */
1469static DECLCALLBACK(VBOXSTRICTRC) vmR3Resume(PVM pVM, PVMCPU pVCpu, void *pvUser)
1470{
1471 VMRESUMEREASON enmReason = (VMRESUMEREASON)(uintptr_t)pvUser;
1472 LogFlow(("vmR3Resume: pVM=%p pVCpu=%p/#%u enmReason=%d\n", pVM, pVCpu, pVCpu->idCpu, enmReason));
1473
1474 /*
1475 * The first thread thru here tries to change the state. We shouldn't be
1476 * called again if this fails.
1477 */
1478 if (pVCpu->idCpu == pVM->cCpus - 1)
1479 {
1480 int rc = vmR3TrySetState(pVM, "VMR3Resume", 1, VMSTATE_RESUMING, VMSTATE_SUSPENDED);
1481 if (RT_FAILURE(rc))
1482 return rc;
1483 pVM->pUVM->vm.s.enmResumeReason = enmReason;
1484 }
1485
1486 VMSTATE enmVMState = VMR3GetState(pVM);
1487 AssertMsgReturn(enmVMState == VMSTATE_RESUMING,
1488 ("%s\n", VMR3GetStateName(enmVMState)),
1489 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1490
1491#if 0
1492 /*
1493 * All EMTs changes their state to started.
1494 */
1495 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1496#endif
1497
1498 /*
1499 * EMT(0) is last thru here and it will make the notification calls
1500 * and advance the state.
1501 */
1502 if (pVCpu->idCpu == 0)
1503 {
1504 PDMR3Resume(pVM);
1505 vmR3SetState(pVM, VMSTATE_RUNNING, VMSTATE_RESUMING);
1506 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
1507 }
1508
1509 return VINF_EM_RESUME;
1510}
1511
1512
1513/**
1514 * Resume VM execution.
1515 *
1516 * @returns VBox status code. When called on EMT, this will be a strict status
1517 * code that has to be propagated up the call stack.
1518 *
1519 * @param pUVM The user mode VM handle.
1520 * @param enmReason The reason we're resuming.
1521 *
1522 * @thread Any thread.
1523 * @vmstate Suspended
1524 * @vmstateto Running
1525 */
1526VMMR3DECL(int) VMR3Resume(PUVM pUVM, VMRESUMEREASON enmReason)
1527{
1528 LogFlow(("VMR3Resume: pUVM=%p\n", pUVM));
1529 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1530 PVM pVM = pUVM->pVM;
1531 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1532 AssertReturn(enmReason > VMRESUMEREASON_INVALID && enmReason < VMRESUMEREASON_END, VERR_INVALID_PARAMETER);
1533
1534 /*
1535 * Gather all the EMTs to make sure there are no races before
1536 * changing the VM state.
1537 */
1538 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1539 vmR3Resume, (void *)(uintptr_t)enmReason);
1540 LogFlow(("VMR3Resume: returns %Rrc\n", rc));
1541 return rc;
1542}
1543
1544
1545/**
1546 * Retrieves the reason for the most recent resume.
1547 *
1548 * @returns Resume reason. VMRESUMEREASON_INVALID if no suspend has been
1549 * done or the handle is invalid.
1550 * @param pUVM The user mode VM handle.
1551 */
1552VMMR3DECL(VMRESUMEREASON) VMR3GetResumeReason(PUVM pUVM)
1553{
1554 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMRESUMEREASON_INVALID);
1555 return pUVM->vm.s.enmResumeReason;
1556}
1557
1558
1559/**
1560 * EMT rendezvous worker for VMR3Save and VMR3Teleport that suspends the VM
1561 * after the live step has been completed.
1562 *
1563 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_RESUME. (This is a strict
1564 * return code, see FNVMMEMTRENDEZVOUS.)
1565 *
1566 * @param pVM The cross context VM structure.
1567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1568 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1569 */
1570static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoSuspend(PVM pVM, PVMCPU pVCpu, void *pvUser)
1571{
1572 LogFlow(("vmR3LiveDoSuspend: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1573 bool *pfSuspended = (bool *)pvUser;
1574
1575 /*
1576 * The first thread thru here tries to change the state. We shouldn't be
1577 * called again if this fails.
1578 */
1579 if (pVCpu->idCpu == pVM->cCpus - 1U)
1580 {
1581 PUVM pUVM = pVM->pUVM;
1582 int rc;
1583
1584 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
1585 VMSTATE enmVMState = pVM->enmVMState;
1586 switch (enmVMState)
1587 {
1588 case VMSTATE_RUNNING_LS:
1589 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RUNNING_LS, false /*fSetRatherThanClearFF*/);
1590 rc = VINF_SUCCESS;
1591 break;
1592
1593 case VMSTATE_SUSPENDED_EXT_LS:
1594 case VMSTATE_SUSPENDED_LS: /* (via reset) */
1595 rc = VINF_SUCCESS;
1596 break;
1597
1598 case VMSTATE_DEBUGGING_LS:
1599 rc = VERR_TRY_AGAIN;
1600 break;
1601
1602 case VMSTATE_OFF_LS:
1603 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_OFF_LS, false /*fSetRatherThanClearFF*/);
1604 rc = VERR_SSM_LIVE_POWERED_OFF;
1605 break;
1606
1607 case VMSTATE_FATAL_ERROR_LS:
1608 vmR3SetStateLocked(pVM, pUVM, VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, false /*fSetRatherThanClearFF*/);
1609 rc = VERR_SSM_LIVE_FATAL_ERROR;
1610 break;
1611
1612 case VMSTATE_GURU_MEDITATION_LS:
1613 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, false /*fSetRatherThanClearFF*/);
1614 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1615 break;
1616
1617 case VMSTATE_POWERING_OFF_LS:
1618 case VMSTATE_SUSPENDING_EXT_LS:
1619 case VMSTATE_RESETTING_LS:
1620 default:
1621 AssertMsgFailed(("%s\n", VMR3GetStateName(enmVMState)));
1622 rc = VERR_VM_UNEXPECTED_VM_STATE;
1623 break;
1624 }
1625 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
1626 if (RT_FAILURE(rc))
1627 {
1628 LogFlow(("vmR3LiveDoSuspend: returns %Rrc (state was %s)\n", rc, VMR3GetStateName(enmVMState)));
1629 return rc;
1630 }
1631 }
1632
1633 VMSTATE enmVMState = VMR3GetState(pVM);
1634 AssertMsgReturn(enmVMState == VMSTATE_SUSPENDING_LS,
1635 ("%s\n", VMR3GetStateName(enmVMState)),
1636 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
1637
1638 /*
1639 * Only EMT(0) have work to do since it's last thru here.
1640 */
1641 if (pVCpu->idCpu == 0)
1642 {
1643 vmR3SuspendDoWork(pVM);
1644 int rc = vmR3TrySetState(pVM, "VMR3Suspend", 1,
1645 VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
1646 if (RT_FAILURE(rc))
1647 return VERR_VM_UNEXPECTED_UNSTABLE_STATE;
1648
1649 *pfSuspended = true;
1650 }
1651
1652 return VINF_EM_SUSPEND;
1653}
1654
1655
1656/**
1657 * EMT rendezvous worker that VMR3Save and VMR3Teleport uses to clean up a
1658 * SSMR3LiveDoStep1 failure.
1659 *
1660 * Doing this as a rendezvous operation avoids all annoying transition
1661 * states.
1662 *
1663 * @returns VERR_VM_INVALID_VM_STATE, VINF_SUCCESS or some specific VERR_SSM_*
1664 * status code. (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
1665 *
1666 * @param pVM The cross context VM structure.
1667 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1668 * @param pvUser The pfSuspended argument of vmR3SaveTeleport.
1669 */
1670static DECLCALLBACK(VBOXSTRICTRC) vmR3LiveDoStep1Cleanup(PVM pVM, PVMCPU pVCpu, void *pvUser)
1671{
1672 LogFlow(("vmR3LiveDoStep1Cleanup: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
1673 bool *pfSuspended = (bool *)pvUser;
1674 NOREF(pVCpu);
1675
1676 int rc = vmR3TrySetState(pVM, "vmR3LiveDoStep1Cleanup", 8,
1677 VMSTATE_OFF, VMSTATE_OFF_LS, /* 1 */
1678 VMSTATE_FATAL_ERROR, VMSTATE_FATAL_ERROR_LS, /* 2 */
1679 VMSTATE_GURU_MEDITATION, VMSTATE_GURU_MEDITATION_LS, /* 3 */
1680 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_LS, /* 4 */
1681 VMSTATE_SUSPENDED, VMSTATE_SAVING,
1682 VMSTATE_SUSPENDED, VMSTATE_SUSPENDED_EXT_LS,
1683 VMSTATE_RUNNING, VMSTATE_RUNNING_LS,
1684 VMSTATE_DEBUGGING, VMSTATE_DEBUGGING_LS);
1685 if (rc == 1)
1686 rc = VERR_SSM_LIVE_POWERED_OFF;
1687 else if (rc == 2)
1688 rc = VERR_SSM_LIVE_FATAL_ERROR;
1689 else if (rc == 3)
1690 rc = VERR_SSM_LIVE_GURU_MEDITATION;
1691 else if (rc == 4)
1692 {
1693 *pfSuspended = true;
1694 rc = VINF_SUCCESS;
1695 }
1696 else if (rc > 0)
1697 rc = VINF_SUCCESS;
1698 return rc;
1699}
1700
1701
1702/**
1703 * EMT(0) worker for VMR3Save and VMR3Teleport that completes the live save.
1704 *
1705 * @returns VBox status code.
1706 * @retval VINF_SSM_LIVE_SUSPENDED if VMR3Suspend was called.
1707 *
1708 * @param pVM The cross context VM structure.
1709 * @param pSSM The handle of saved state operation.
1710 *
1711 * @thread EMT(0)
1712 */
1713static DECLCALLBACK(int) vmR3LiveDoStep2(PVM pVM, PSSMHANDLE pSSM)
1714{
1715 LogFlow(("vmR3LiveDoStep2: pVM=%p pSSM=%p\n", pVM, pSSM));
1716 VM_ASSERT_EMT0(pVM);
1717
1718 /*
1719 * Advance the state and mark if VMR3Suspend was called.
1720 */
1721 int rc = VINF_SUCCESS;
1722 VMSTATE enmVMState = VMR3GetState(pVM);
1723 if (enmVMState == VMSTATE_SUSPENDED_LS)
1724 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_LS);
1725 else
1726 {
1727 if (enmVMState != VMSTATE_SAVING)
1728 vmR3SetState(pVM, VMSTATE_SAVING, VMSTATE_SUSPENDED_EXT_LS);
1729 rc = VINF_SSM_LIVE_SUSPENDED;
1730 }
1731
1732 /*
1733 * Finish up and release the handle. Careful with the status codes.
1734 */
1735 int rc2 = SSMR3LiveDoStep2(pSSM);
1736 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1737 rc = rc2;
1738
1739 rc2 = SSMR3LiveDone(pSSM);
1740 if (rc == VINF_SUCCESS || (RT_FAILURE(rc2) && RT_SUCCESS(rc)))
1741 rc = rc2;
1742
1743 /*
1744 * Advance to the final state and return.
1745 */
1746 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1747 Assert(rc > VINF_EM_LAST || rc < VINF_EM_FIRST);
1748 return rc;
1749}
1750
1751
1752/**
1753 * Worker for vmR3SaveTeleport that validates the state and calls SSMR3Save or
1754 * SSMR3LiveSave.
1755 *
1756 * @returns VBox status code.
1757 *
1758 * @param pVM The cross context VM structure.
1759 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1760 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1761 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1762 * @param pvStreamOpsUser The user argument to the stream methods.
1763 * @param enmAfter What to do afterwards.
1764 * @param pfnProgress Progress callback. Optional.
1765 * @param pvProgressUser User argument for the progress callback.
1766 * @param ppSSM Where to return the saved state handle in case of a
1767 * live snapshot scenario.
1768 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1769 *
1770 * @thread EMT
1771 */
1772static DECLCALLBACK(int) vmR3Save(PVM pVM, uint32_t cMsMaxDowntime, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1773 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, PSSMHANDLE *ppSSM,
1774 bool fSkipStateChanges)
1775{
1776 int rc = VINF_SUCCESS;
1777
1778 LogFlow(("vmR3Save: pVM=%p cMsMaxDowntime=%u pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p enmAfter=%d pfnProgress=%p pvProgressUser=%p ppSSM=%p\n",
1779 pVM, cMsMaxDowntime, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser, ppSSM));
1780
1781 /*
1782 * Validate input.
1783 */
1784 AssertPtrNull(pszFilename);
1785 AssertPtrNull(pStreamOps);
1786 AssertPtr(pVM);
1787 Assert( enmAfter == SSMAFTER_DESTROY
1788 || enmAfter == SSMAFTER_CONTINUE
1789 || enmAfter == SSMAFTER_TELEPORT);
1790 AssertPtr(ppSSM);
1791 *ppSSM = NULL;
1792
1793 /*
1794 * Change the state and perform/start the saving.
1795 */
1796 if (!fSkipStateChanges)
1797 {
1798 rc = vmR3TrySetState(pVM, "VMR3Save", 2,
1799 VMSTATE_SAVING, VMSTATE_SUSPENDED,
1800 VMSTATE_RUNNING_LS, VMSTATE_RUNNING);
1801 }
1802 else
1803 {
1804 Assert(enmAfter != SSMAFTER_TELEPORT);
1805 rc = 1;
1806 }
1807
1808 if (rc == 1 && enmAfter != SSMAFTER_TELEPORT)
1809 {
1810 rc = SSMR3Save(pVM, pszFilename, pStreamOps, pvStreamOpsUser, enmAfter, pfnProgress, pvProgressUser);
1811 if (!fSkipStateChanges)
1812 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_SAVING);
1813 }
1814 else if (rc == 2 || enmAfter == SSMAFTER_TELEPORT)
1815 {
1816 Assert(!fSkipStateChanges);
1817 if (enmAfter == SSMAFTER_TELEPORT)
1818 pVM->vm.s.fTeleportedAndNotFullyResumedYet = true;
1819 rc = SSMR3LiveSave(pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1820 enmAfter, pfnProgress, pvProgressUser, ppSSM);
1821 /* (We're not subject to cancellation just yet.) */
1822 }
1823 else
1824 Assert(RT_FAILURE(rc));
1825 return rc;
1826}
1827
1828
1829/**
1830 * Common worker for VMR3Save and VMR3Teleport.
1831 *
1832 * @returns VBox status code.
1833 *
1834 * @param pVM The cross context VM structure.
1835 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
1836 * @param pszFilename The name of the file. NULL if pStreamOps is used.
1837 * @param pStreamOps The stream methods. NULL if pszFilename is used.
1838 * @param pvStreamOpsUser The user argument to the stream methods.
1839 * @param enmAfter What to do afterwards.
1840 * @param pfnProgress Progress callback. Optional.
1841 * @param pvProgressUser User argument for the progress callback.
1842 * @param pfSuspended Set if we suspended the VM.
1843 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1844 *
1845 * @thread Non-EMT
1846 */
1847static int vmR3SaveTeleport(PVM pVM, uint32_t cMsMaxDowntime,
1848 const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
1849 SSMAFTER enmAfter, PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended,
1850 bool fSkipStateChanges)
1851{
1852 /*
1853 * Request the operation in EMT(0).
1854 */
1855 PSSMHANDLE pSSM;
1856 int rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/,
1857 (PFNRT)vmR3Save, 10, pVM, cMsMaxDowntime, pszFilename, pStreamOps, pvStreamOpsUser,
1858 enmAfter, pfnProgress, pvProgressUser, &pSSM, fSkipStateChanges);
1859 if ( RT_SUCCESS(rc)
1860 && pSSM)
1861 {
1862 Assert(!fSkipStateChanges);
1863
1864 /*
1865 * Live snapshot.
1866 *
1867 * The state handling here is kind of tricky, doing it on EMT(0) helps
1868 * a bit. See the VMSTATE diagram for details.
1869 */
1870 rc = SSMR3LiveDoStep1(pSSM);
1871 if (RT_SUCCESS(rc))
1872 {
1873 if (VMR3GetState(pVM) != VMSTATE_SAVING)
1874 for (;;)
1875 {
1876 /* Try suspend the VM. */
1877 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
1878 vmR3LiveDoSuspend, pfSuspended);
1879 if (rc != VERR_TRY_AGAIN)
1880 break;
1881
1882 /* Wait for the state to change. */
1883 RTThreadSleep(250); /** @todo Live Migration: fix this polling wait by some smart use of multiple release event semaphores.. */
1884 }
1885 if (RT_SUCCESS(rc))
1886 rc = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)vmR3LiveDoStep2, 2, pVM, pSSM);
1887 else
1888 {
1889 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1890 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc)); NOREF(rc2);
1891 }
1892 }
1893 else
1894 {
1895 int rc2 = VMR3ReqCallWait(pVM, 0 /*idDstCpu*/, (PFNRT)SSMR3LiveDone, 1, pSSM);
1896 AssertMsg(rc2 == rc, ("%Rrc != %Rrc\n", rc2, rc));
1897
1898 rc2 = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONCE, vmR3LiveDoStep1Cleanup, pfSuspended);
1899 if (RT_FAILURE(rc2) && rc == VERR_SSM_CANCELLED)
1900 rc = rc2;
1901 }
1902 }
1903
1904 return rc;
1905}
1906
1907
1908/**
1909 * Save current VM state.
1910 *
1911 * Can be used for both saving the state and creating snapshots.
1912 *
1913 * When called for a VM in the Running state, the saved state is created live
1914 * and the VM is only suspended when the final part of the saving is preformed.
1915 * The VM state will not be restored to Running in this case and it's up to the
1916 * caller to call VMR3Resume if this is desirable. (The rational is that the
1917 * caller probably wish to reconfigure the disks before resuming the VM.)
1918 *
1919 * @returns VBox status code.
1920 *
1921 * @param pUVM The VM which state should be saved.
1922 * @param pszFilename The name of the save state file.
1923 * @param fContinueAfterwards Whether continue execution afterwards or not.
1924 * When in doubt, set this to true.
1925 * @param pfnProgress Progress callback. Optional.
1926 * @param pvUser User argument for the progress callback.
1927 * @param pfSuspended Set if we suspended the VM.
1928 *
1929 * @thread Non-EMT.
1930 * @vmstate Suspended or Running
1931 * @vmstateto Saving+Suspended or
1932 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1933 */
1934VMMR3DECL(int) VMR3Save(PUVM pUVM, const char *pszFilename, bool fContinueAfterwards, PFNVMPROGRESS pfnProgress, void *pvUser,
1935 bool *pfSuspended)
1936{
1937 LogFlow(("VMR3Save: pUVM=%p pszFilename=%p:{%s} fContinueAfterwards=%RTbool pfnProgress=%p pvUser=%p pfSuspended=%p\n",
1938 pUVM, pszFilename, pszFilename, fContinueAfterwards, pfnProgress, pvUser, pfSuspended));
1939
1940 /*
1941 * Validate input.
1942 */
1943 AssertPtr(pfSuspended);
1944 *pfSuspended = false;
1945 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1946 PVM pVM = pUVM->pVM;
1947 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1948 VM_ASSERT_OTHER_THREAD(pVM);
1949 AssertReturn(VALID_PTR(pszFilename), VERR_INVALID_POINTER);
1950 AssertReturn(*pszFilename, VERR_INVALID_PARAMETER);
1951 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
1952
1953 /*
1954 * Join paths with VMR3Teleport.
1955 */
1956 SSMAFTER enmAfter = fContinueAfterwards ? SSMAFTER_CONTINUE : SSMAFTER_DESTROY;
1957 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
1958 pszFilename, NULL /* pStreamOps */, NULL /* pvStreamOpsUser */,
1959 enmAfter, pfnProgress, pvUser, pfSuspended,
1960 false /* fSkipStateChanges */);
1961 LogFlow(("VMR3Save: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
1962 return rc;
1963}
1964
1965/**
1966 * Save current VM state (used by FTM)
1967 *
1968 *
1969 * @returns VBox status code.
1970 *
1971 * @param pUVM The user mode VM handle.
1972 * @param pStreamOps The stream methods.
1973 * @param pvStreamOpsUser The user argument to the stream methods.
1974 * @param pfSuspended Set if we suspended the VM.
1975 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
1976 *
1977 * @thread Any
1978 * @vmstate Suspended or Running
1979 * @vmstateto Saving+Suspended or
1980 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
1981 */
1982VMMR3_INT_DECL(int) VMR3SaveFT(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser, bool *pfSuspended, bool fSkipStateChanges)
1983{
1984 LogFlow(("VMR3SaveFT: pUVM=%p pStreamOps=%p pvSteamOpsUser=%p pfSuspended=%p\n",
1985 pUVM, pStreamOps, pvStreamOpsUser, pfSuspended));
1986
1987 /*
1988 * Validate input.
1989 */
1990 AssertPtr(pfSuspended);
1991 *pfSuspended = false;
1992 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
1993 PVM pVM = pUVM->pVM;
1994 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
1995 AssertReturn(pStreamOps, VERR_INVALID_PARAMETER);
1996
1997 /*
1998 * Join paths with VMR3Teleport.
1999 */
2000 int rc = vmR3SaveTeleport(pVM, 250 /*cMsMaxDowntime*/,
2001 NULL, pStreamOps, pvStreamOpsUser,
2002 SSMAFTER_CONTINUE, NULL, NULL, pfSuspended,
2003 fSkipStateChanges);
2004 LogFlow(("VMR3SaveFT: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
2005 return rc;
2006}
2007
2008
2009/**
2010 * Teleport the VM (aka live migration).
2011 *
2012 * @returns VBox status code.
2013 *
2014 * @param pUVM The VM which state should be saved.
2015 * @param cMsMaxDowntime The maximum downtime given as milliseconds.
2016 * @param pStreamOps The stream methods.
2017 * @param pvStreamOpsUser The user argument to the stream methods.
2018 * @param pfnProgress Progress callback. Optional.
2019 * @param pvProgressUser User argument for the progress callback.
2020 * @param pfSuspended Set if we suspended the VM.
2021 *
2022 * @thread Non-EMT.
2023 * @vmstate Suspended or Running
2024 * @vmstateto Saving+Suspended or
2025 * RunningLS+SuspendingLS+SuspendedLS+Saving+Suspended.
2026 */
2027VMMR3DECL(int) VMR3Teleport(PUVM pUVM, uint32_t cMsMaxDowntime, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2028 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool *pfSuspended)
2029{
2030 LogFlow(("VMR3Teleport: pUVM=%p cMsMaxDowntime=%u pStreamOps=%p pvStreamOps=%p pfnProgress=%p pvProgressUser=%p\n",
2031 pUVM, cMsMaxDowntime, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2032
2033 /*
2034 * Validate input.
2035 */
2036 AssertPtr(pfSuspended);
2037 *pfSuspended = false;
2038 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2039 PVM pVM = pUVM->pVM;
2040 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2041 VM_ASSERT_OTHER_THREAD(pVM);
2042 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2043 AssertPtrNullReturn(pfnProgress, VERR_INVALID_POINTER);
2044
2045 /*
2046 * Join paths with VMR3Save.
2047 */
2048 int rc = vmR3SaveTeleport(pVM, cMsMaxDowntime,
2049 NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser,
2050 SSMAFTER_TELEPORT, pfnProgress, pvProgressUser, pfSuspended,
2051 false /* fSkipStateChanges */);
2052 LogFlow(("VMR3Teleport: returns %Rrc (*pfSuspended=%RTbool)\n", rc, *pfSuspended));
2053 return rc;
2054}
2055
2056
2057
2058/**
2059 * EMT(0) worker for VMR3LoadFromFile and VMR3LoadFromStream.
2060 *
2061 * @returns VBox status code.
2062 *
2063 * @param pUVM Pointer to the VM.
2064 * @param pszFilename The name of the file. NULL if pStreamOps is used.
2065 * @param pStreamOps The stream methods. NULL if pszFilename is used.
2066 * @param pvStreamOpsUser The user argument to the stream methods.
2067 * @param pfnProgress Progress callback. Optional.
2068 * @param pvProgressUser User argument for the progress callback.
2069 * @param fTeleporting Indicates whether we're teleporting or not.
2070 * @param fSkipStateChanges Set if we're supposed to skip state changes (FTM delta case)
2071 *
2072 * @thread EMT.
2073 */
2074static DECLCALLBACK(int) vmR3Load(PUVM pUVM, const char *pszFilename, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2075 PFNVMPROGRESS pfnProgress, void *pvProgressUser, bool fTeleporting,
2076 bool fSkipStateChanges)
2077{
2078 int rc = VINF_SUCCESS;
2079
2080 LogFlow(("vmR3Load: pUVM=%p pszFilename=%p:{%s} pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p fTeleporting=%RTbool\n",
2081 pUVM, pszFilename, pszFilename, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser, fTeleporting));
2082
2083 /*
2084 * Validate input (paranoia).
2085 */
2086 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2087 PVM pVM = pUVM->pVM;
2088 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2089 AssertPtrNull(pszFilename);
2090 AssertPtrNull(pStreamOps);
2091 AssertPtrNull(pfnProgress);
2092
2093 if (!fSkipStateChanges)
2094 {
2095 /*
2096 * Change the state and perform the load.
2097 *
2098 * Always perform a relocation round afterwards to make sure hypervisor
2099 * selectors and such are correct.
2100 */
2101 rc = vmR3TrySetState(pVM, "VMR3Load", 2,
2102 VMSTATE_LOADING, VMSTATE_CREATED,
2103 VMSTATE_LOADING, VMSTATE_SUSPENDED);
2104 if (RT_FAILURE(rc))
2105 return rc;
2106 }
2107 pVM->vm.s.fTeleportedAndNotFullyResumedYet = fTeleporting;
2108
2109 uint32_t cErrorsPriorToSave = VMR3GetErrorCount(pUVM);
2110 rc = SSMR3Load(pVM, pszFilename, pStreamOps, pvStreamOpsUser, SSMAFTER_RESUME, pfnProgress, pvProgressUser);
2111 if (RT_SUCCESS(rc))
2112 {
2113 VMR3Relocate(pVM, 0 /*offDelta*/);
2114 if (!fSkipStateChanges)
2115 vmR3SetState(pVM, VMSTATE_SUSPENDED, VMSTATE_LOADING);
2116 }
2117 else
2118 {
2119 pVM->vm.s.fTeleportedAndNotFullyResumedYet = false;
2120 if (!fSkipStateChanges)
2121 vmR3SetState(pVM, VMSTATE_LOAD_FAILURE, VMSTATE_LOADING);
2122
2123 if (cErrorsPriorToSave == VMR3GetErrorCount(pUVM))
2124 rc = VMSetError(pVM, rc, RT_SRC_POS,
2125 N_("Unable to restore the virtual machine's saved state from '%s'. "
2126 "It may be damaged or from an older version of VirtualBox. "
2127 "Please discard the saved state before starting the virtual machine"),
2128 pszFilename);
2129 }
2130
2131 return rc;
2132}
2133
2134
2135/**
2136 * Loads a VM state into a newly created VM or a one that is suspended.
2137 *
2138 * To restore a saved state on VM startup, call this function and then resume
2139 * the VM instead of powering it on.
2140 *
2141 * @returns VBox status code.
2142 *
2143 * @param pUVM The user mode VM structure.
2144 * @param pszFilename The name of the save state file.
2145 * @param pfnProgress Progress callback. Optional.
2146 * @param pvUser User argument for the progress callback.
2147 *
2148 * @thread Any thread.
2149 * @vmstate Created, Suspended
2150 * @vmstateto Loading+Suspended
2151 */
2152VMMR3DECL(int) VMR3LoadFromFile(PUVM pUVM, const char *pszFilename, PFNVMPROGRESS pfnProgress, void *pvUser)
2153{
2154 LogFlow(("VMR3LoadFromFile: pUVM=%p pszFilename=%p:{%s} pfnProgress=%p pvUser=%p\n",
2155 pUVM, pszFilename, pszFilename, pfnProgress, pvUser));
2156
2157 /*
2158 * Validate input.
2159 */
2160 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2161 AssertPtrReturn(pszFilename, VERR_INVALID_POINTER);
2162
2163 /*
2164 * Forward the request to EMT(0). No need to setup a rendezvous here
2165 * since there is no execution taking place when this call is allowed.
2166 */
2167 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2168 pUVM, pszFilename, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvStreamOpsUser*/, pfnProgress, pvUser,
2169 false /*fTeleporting*/, false /* fSkipStateChanges */);
2170 LogFlow(("VMR3LoadFromFile: returns %Rrc\n", rc));
2171 return rc;
2172}
2173
2174
2175/**
2176 * VMR3LoadFromFile for arbitrary file streams.
2177 *
2178 * @returns VBox status code.
2179 *
2180 * @param pUVM Pointer to the VM.
2181 * @param pStreamOps The stream methods.
2182 * @param pvStreamOpsUser The user argument to the stream methods.
2183 * @param pfnProgress Progress callback. Optional.
2184 * @param pvProgressUser User argument for the progress callback.
2185 *
2186 * @thread Any thread.
2187 * @vmstate Created, Suspended
2188 * @vmstateto Loading+Suspended
2189 */
2190VMMR3DECL(int) VMR3LoadFromStream(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser,
2191 PFNVMPROGRESS pfnProgress, void *pvProgressUser)
2192{
2193 LogFlow(("VMR3LoadFromStream: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p pfnProgress=%p pvProgressUser=%p\n",
2194 pUVM, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser));
2195
2196 /*
2197 * Validate input.
2198 */
2199 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2200 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2201
2202 /*
2203 * Forward the request to EMT(0). No need to setup a rendezvous here
2204 * since there is no execution taking place when this call is allowed.
2205 */
2206 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2207 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, pfnProgress, pvProgressUser,
2208 true /*fTeleporting*/, false /* fSkipStateChanges */);
2209 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2210 return rc;
2211}
2212
2213
2214/**
2215 * Special version for the FT component, it skips state changes.
2216 *
2217 * @returns VBox status code.
2218 *
2219 * @param pUVM The VM handle.
2220 * @param pStreamOps The stream methods.
2221 * @param pvStreamOpsUser The user argument to the stream methods.
2222 *
2223 * @thread Any thread.
2224 * @vmstate Created, Suspended
2225 * @vmstateto Loading+Suspended
2226 */
2227VMMR3_INT_DECL(int) VMR3LoadFromStreamFT(PUVM pUVM, PCSSMSTRMOPS pStreamOps, void *pvStreamOpsUser)
2228{
2229 LogFlow(("VMR3LoadFromStreamFT: pUVM=%p pStreamOps=%p pvStreamOpsUser=%p\n", pUVM, pStreamOps, pvStreamOpsUser));
2230
2231 /*
2232 * Validate input.
2233 */
2234 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2235 AssertPtrReturn(pStreamOps, VERR_INVALID_POINTER);
2236
2237 /*
2238 * Forward the request to EMT(0). No need to setup a rendezvous here
2239 * since there is no execution taking place when this call is allowed.
2240 */
2241 int rc = VMR3ReqCallWaitU(pUVM, 0 /*idDstCpu*/, (PFNRT)vmR3Load, 8,
2242 pUVM, (uintptr_t)NULL /*pszFilename*/, pStreamOps, pvStreamOpsUser, NULL, NULL,
2243 true /*fTeleporting*/, true /* fSkipStateChanges */);
2244 LogFlow(("VMR3LoadFromStream: returns %Rrc\n", rc));
2245 return rc;
2246}
2247
2248/**
2249 * EMT rendezvous worker for VMR3PowerOff.
2250 *
2251 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_OFF. (This is a strict
2252 * return code, see FNVMMEMTRENDEZVOUS.)
2253 *
2254 * @param pVM The cross context VM structure.
2255 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2256 * @param pvUser Ignored.
2257 */
2258static DECLCALLBACK(VBOXSTRICTRC) vmR3PowerOff(PVM pVM, PVMCPU pVCpu, void *pvUser)
2259{
2260 LogFlow(("vmR3PowerOff: pVM=%p pVCpu=%p/#%u\n", pVM, pVCpu, pVCpu->idCpu));
2261 Assert(!pvUser); NOREF(pvUser);
2262
2263 /*
2264 * The first EMT thru here will change the state to PoweringOff.
2265 */
2266 if (pVCpu->idCpu == pVM->cCpus - 1)
2267 {
2268 int rc = vmR3TrySetState(pVM, "VMR3PowerOff", 11,
2269 VMSTATE_POWERING_OFF, VMSTATE_RUNNING, /* 1 */
2270 VMSTATE_POWERING_OFF, VMSTATE_SUSPENDED, /* 2 */
2271 VMSTATE_POWERING_OFF, VMSTATE_DEBUGGING, /* 3 */
2272 VMSTATE_POWERING_OFF, VMSTATE_LOAD_FAILURE, /* 4 */
2273 VMSTATE_POWERING_OFF, VMSTATE_GURU_MEDITATION, /* 5 */
2274 VMSTATE_POWERING_OFF, VMSTATE_FATAL_ERROR, /* 6 */
2275 VMSTATE_POWERING_OFF, VMSTATE_CREATED, /* 7 */ /** @todo update the diagram! */
2276 VMSTATE_POWERING_OFF_LS, VMSTATE_RUNNING_LS, /* 8 */
2277 VMSTATE_POWERING_OFF_LS, VMSTATE_DEBUGGING_LS, /* 9 */
2278 VMSTATE_POWERING_OFF_LS, VMSTATE_GURU_MEDITATION_LS,/* 10 */
2279 VMSTATE_POWERING_OFF_LS, VMSTATE_FATAL_ERROR_LS); /* 11 */
2280 if (RT_FAILURE(rc))
2281 return rc;
2282 if (rc >= 7)
2283 SSMR3Cancel(pVM->pUVM);
2284 }
2285
2286 /*
2287 * Check the state.
2288 */
2289 VMSTATE enmVMState = VMR3GetState(pVM);
2290 AssertMsgReturn( enmVMState == VMSTATE_POWERING_OFF
2291 || enmVMState == VMSTATE_POWERING_OFF_LS,
2292 ("%s\n", VMR3GetStateName(enmVMState)),
2293 VERR_VM_INVALID_VM_STATE);
2294
2295 /*
2296 * EMT(0) does the actual power off work here *after* all the other EMTs
2297 * have been thru and entered the STOPPED state.
2298 */
2299 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STOPPED);
2300 if (pVCpu->idCpu == 0)
2301 {
2302 /*
2303 * For debugging purposes, we will log a summary of the guest state at this point.
2304 */
2305 if (enmVMState != VMSTATE_GURU_MEDITATION)
2306 {
2307 /** @todo make the state dumping at VMR3PowerOff optional. */
2308 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2309 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2310 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2311 RTLogRelPrintf("***\n");
2312 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2313 RTLogRelPrintf("***\n");
2314 DBGFR3Info(pVM->pUVM, "activetimers", NULL, DBGFR3InfoLogRelHlp());
2315 RTLogRelPrintf("***\n");
2316 DBGFR3Info(pVM->pUVM, "gdt", NULL, DBGFR3InfoLogRelHlp());
2317 /** @todo dump guest call stack. */
2318 RTLogRelSetBuffering(fOldBuffered);
2319 RTLogRelPrintf("************** End of Guest state at power off ***************\n");
2320 }
2321
2322 /*
2323 * Perform the power off notifications and advance the state to
2324 * Off or OffLS.
2325 */
2326 PDMR3PowerOff(pVM);
2327 DBGFR3PowerOff(pVM);
2328
2329 PUVM pUVM = pVM->pUVM;
2330 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2331 enmVMState = pVM->enmVMState;
2332 if (enmVMState == VMSTATE_POWERING_OFF_LS)
2333 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF_LS, VMSTATE_POWERING_OFF_LS, false /*fSetRatherThanClearFF*/);
2334 else
2335 vmR3SetStateLocked(pVM, pUVM, VMSTATE_OFF, VMSTATE_POWERING_OFF, false /*fSetRatherThanClearFF*/);
2336 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2337 }
2338 else if (enmVMState != VMSTATE_GURU_MEDITATION)
2339 {
2340 /** @todo make the state dumping at VMR3PowerOff optional. */
2341 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2342 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu);
2343 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp());
2344 RTLogRelPrintf("***\n");
2345 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp());
2346 RTLogRelPrintf("***\n");
2347 RTLogRelSetBuffering(fOldBuffered);
2348 RTLogRelPrintf("************** End of Guest state at power off for VCpu %u ***************\n", pVCpu->idCpu);
2349 }
2350
2351 return VINF_EM_OFF;
2352}
2353
2354
2355/**
2356 * Power off the VM.
2357 *
2358 * @returns VBox status code. When called on EMT, this will be a strict status
2359 * code that has to be propagated up the call stack.
2360 *
2361 * @param pUVM The handle of the VM to be powered off.
2362 *
2363 * @thread Any thread.
2364 * @vmstate Suspended, Running, Guru Meditation, Load Failure
2365 * @vmstateto Off or OffLS
2366 */
2367VMMR3DECL(int) VMR3PowerOff(PUVM pUVM)
2368{
2369 LogFlow(("VMR3PowerOff: pUVM=%p\n", pUVM));
2370 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2371 PVM pVM = pUVM->pVM;
2372 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2373
2374 /*
2375 * Gather all the EMTs to make sure there are no races before
2376 * changing the VM state.
2377 */
2378 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2379 vmR3PowerOff, NULL);
2380 LogFlow(("VMR3PowerOff: returns %Rrc\n", rc));
2381 return rc;
2382}
2383
2384
2385/**
2386 * Destroys the VM.
2387 *
2388 * The VM must be powered off (or never really powered on) to call this
2389 * function. The VM handle is destroyed and can no longer be used up successful
2390 * return.
2391 *
2392 * @returns VBox status code.
2393 *
2394 * @param pUVM The user mode VM handle.
2395 *
2396 * @thread Any none emulation thread.
2397 * @vmstate Off, Created
2398 * @vmstateto N/A
2399 */
2400VMMR3DECL(int) VMR3Destroy(PUVM pUVM)
2401{
2402 LogFlow(("VMR3Destroy: pUVM=%p\n", pUVM));
2403
2404 /*
2405 * Validate input.
2406 */
2407 if (!pUVM)
2408 return VERR_INVALID_VM_HANDLE;
2409 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2410 PVM pVM = pUVM->pVM;
2411 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2412 AssertLogRelReturn(!VM_IS_EMT(pVM), VERR_VM_THREAD_IS_EMT);
2413
2414 /*
2415 * Change VM state to destroying and aall vmR3Destroy on each of the EMTs
2416 * ending with EMT(0) doing the bulk of the cleanup.
2417 */
2418 int rc = vmR3TrySetState(pVM, "VMR3Destroy", 1, VMSTATE_DESTROYING, VMSTATE_OFF);
2419 if (RT_FAILURE(rc))
2420 return rc;
2421
2422 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL_REVERSE, (PFNRT)vmR3Destroy, 1, pVM);
2423 AssertLogRelRC(rc);
2424
2425 /*
2426 * Wait for EMTs to quit and destroy the UVM.
2427 */
2428 vmR3DestroyUVM(pUVM, 30000);
2429
2430 LogFlow(("VMR3Destroy: returns VINF_SUCCESS\n"));
2431 return VINF_SUCCESS;
2432}
2433
2434
2435/**
2436 * Internal destruction worker.
2437 *
2438 * This is either called from VMR3Destroy via VMR3ReqCallU or from
2439 * vmR3EmulationThreadWithId when EMT(0) terminates after having called
2440 * VMR3Destroy().
2441 *
2442 * When called on EMT(0), it will performed the great bulk of the destruction.
2443 * When called on the other EMTs, they will do nothing and the whole purpose is
2444 * to return VINF_EM_TERMINATE so they break out of their run loops.
2445 *
2446 * @returns VINF_EM_TERMINATE.
2447 * @param pVM The cross context VM structure.
2448 */
2449DECLCALLBACK(int) vmR3Destroy(PVM pVM)
2450{
2451 PUVM pUVM = pVM->pUVM;
2452 PVMCPU pVCpu = VMMGetCpu(pVM);
2453 Assert(pVCpu);
2454 LogFlow(("vmR3Destroy: pVM=%p pUVM=%p pVCpu=%p idCpu=%u\n", pVM, pUVM, pVCpu, pVCpu->idCpu));
2455
2456 /*
2457 * Only VCPU 0 does the full cleanup (last).
2458 */
2459 if (pVCpu->idCpu == 0)
2460 {
2461 /*
2462 * Dump statistics to the log.
2463 */
2464#if defined(VBOX_WITH_STATISTICS) || defined(LOG_ENABLED)
2465 RTLogFlags(NULL, "nodisabled nobuffered");
2466#endif
2467//#ifdef VBOX_WITH_STATISTICS
2468// STAMR3Dump(pUVM, "*");
2469//#else
2470 LogRel(("************************* Statistics *************************\n"));
2471 STAMR3DumpToReleaseLog(pUVM, "*");
2472 LogRel(("********************* End of statistics **********************\n"));
2473//#endif
2474
2475 /*
2476 * Destroy the VM components.
2477 */
2478 int rc = TMR3Term(pVM);
2479 AssertRC(rc);
2480#ifdef VBOX_WITH_DEBUGGER
2481 rc = DBGCTcpTerminate(pUVM, pUVM->vm.s.pvDBGC);
2482 pUVM->vm.s.pvDBGC = NULL;
2483#endif
2484 AssertRC(rc);
2485 rc = FTMR3Term(pVM);
2486 AssertRC(rc);
2487 rc = PDMR3Term(pVM);
2488 AssertRC(rc);
2489 rc = GIMR3Term(pVM);
2490 AssertRC(rc);
2491 rc = DBGFR3Term(pVM);
2492 AssertRC(rc);
2493 rc = IEMR3Term(pVM);
2494 AssertRC(rc);
2495 rc = EMR3Term(pVM);
2496 AssertRC(rc);
2497 rc = IOMR3Term(pVM);
2498 AssertRC(rc);
2499#ifdef VBOX_WITH_RAW_MODE
2500 rc = CSAMR3Term(pVM);
2501 AssertRC(rc);
2502 rc = PATMR3Term(pVM);
2503 AssertRC(rc);
2504#endif
2505 rc = TRPMR3Term(pVM);
2506 AssertRC(rc);
2507 rc = SELMR3Term(pVM);
2508 AssertRC(rc);
2509#ifdef VBOX_WITH_REM
2510 rc = REMR3Term(pVM);
2511 AssertRC(rc);
2512#endif
2513 rc = HMR3Term(pVM);
2514 AssertRC(rc);
2515 rc = PGMR3Term(pVM);
2516 AssertRC(rc);
2517 rc = VMMR3Term(pVM); /* Terminates the ring-0 code! */
2518 AssertRC(rc);
2519 rc = CPUMR3Term(pVM);
2520 AssertRC(rc);
2521 SSMR3Term(pVM);
2522 rc = PDMR3CritSectBothTerm(pVM);
2523 AssertRC(rc);
2524 rc = MMR3Term(pVM);
2525 AssertRC(rc);
2526
2527 /*
2528 * We're done, tell the other EMTs to quit.
2529 */
2530 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2531 ASMAtomicWriteU32(&pVM->fGlobalForcedActions, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2532 LogFlow(("vmR3Destroy: returning %Rrc\n", VINF_EM_TERMINATE));
2533 }
2534 return VINF_EM_TERMINATE;
2535}
2536
2537
2538/**
2539 * Destroys the UVM portion.
2540 *
2541 * This is called as the final step in the VM destruction or as the cleanup
2542 * in case of a creation failure.
2543 *
2544 * @param pUVM The user mode VM structure.
2545 * @param cMilliesEMTWait The number of milliseconds to wait for the emulation
2546 * threads.
2547 */
2548static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait)
2549{
2550 /*
2551 * Signal termination of each the emulation threads and
2552 * wait for them to complete.
2553 */
2554 /* Signal them. */
2555 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2556 if (pUVM->pVM)
2557 VM_FF_SET(pUVM->pVM, VM_FF_CHECK_VM_STATE); /* Can't hurt... */
2558 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2559 {
2560 VMR3NotifyGlobalFFU(pUVM, VMNOTIFYFF_FLAGS_DONE_REM);
2561 RTSemEventSignal(pUVM->aCpus[i].vm.s.EventSemWait);
2562 }
2563
2564 /* Wait for them. */
2565 uint64_t NanoTS = RTTimeNanoTS();
2566 RTTHREAD hSelf = RTThreadSelf();
2567 ASMAtomicUoWriteBool(&pUVM->vm.s.fTerminateEMT, true);
2568 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2569 {
2570 RTTHREAD hThread = pUVM->aCpus[i].vm.s.ThreadEMT;
2571 if ( hThread != NIL_RTTHREAD
2572 && hThread != hSelf)
2573 {
2574 uint64_t cMilliesElapsed = (RTTimeNanoTS() - NanoTS) / 1000000;
2575 int rc2 = RTThreadWait(hThread,
2576 cMilliesElapsed < cMilliesEMTWait
2577 ? RT_MAX(cMilliesEMTWait - cMilliesElapsed, 2000)
2578 : 2000,
2579 NULL);
2580 if (rc2 == VERR_TIMEOUT) /* avoid the assertion when debugging. */
2581 rc2 = RTThreadWait(hThread, 1000, NULL);
2582 AssertLogRelMsgRC(rc2, ("i=%u rc=%Rrc\n", i, rc2));
2583 if (RT_SUCCESS(rc2))
2584 pUVM->aCpus[0].vm.s.ThreadEMT = NIL_RTTHREAD;
2585 }
2586 }
2587
2588 /* Cleanup the semaphores. */
2589 for (VMCPUID i = 0; i < pUVM->cCpus; i++)
2590 {
2591 RTSemEventDestroy(pUVM->aCpus[i].vm.s.EventSemWait);
2592 pUVM->aCpus[i].vm.s.EventSemWait = NIL_RTSEMEVENT;
2593 }
2594
2595 /*
2596 * Free the event semaphores associated with the request packets.
2597 */
2598 unsigned cReqs = 0;
2599 for (unsigned i = 0; i < RT_ELEMENTS(pUVM->vm.s.apReqFree); i++)
2600 {
2601 PVMREQ pReq = pUVM->vm.s.apReqFree[i];
2602 pUVM->vm.s.apReqFree[i] = NULL;
2603 for (; pReq; pReq = pReq->pNext, cReqs++)
2604 {
2605 pReq->enmState = VMREQSTATE_INVALID;
2606 RTSemEventDestroy(pReq->EventSem);
2607 }
2608 }
2609 Assert(cReqs == pUVM->vm.s.cReqFree); NOREF(cReqs);
2610
2611 /*
2612 * Kill all queued requests. (There really shouldn't be any!)
2613 */
2614 for (unsigned i = 0; i < 10; i++)
2615 {
2616 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pPriorityReqs, NULL, PVMREQ);
2617 if (!pReqHead)
2618 {
2619 pReqHead = ASMAtomicXchgPtrT(&pUVM->vm.s.pNormalReqs, NULL, PVMREQ);
2620 if (!pReqHead)
2621 break;
2622 }
2623 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2624
2625 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2626 {
2627 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2628 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2629 RTSemEventSignal(pReq->EventSem);
2630 RTThreadSleep(2);
2631 RTSemEventDestroy(pReq->EventSem);
2632 }
2633 /* give them a chance to respond before we free the request memory. */
2634 RTThreadSleep(32);
2635 }
2636
2637 /*
2638 * Now all queued VCPU requests (again, there shouldn't be any).
2639 */
2640 for (VMCPUID idCpu = 0; idCpu < pUVM->cCpus; idCpu++)
2641 {
2642 PUVMCPU pUVCpu = &pUVM->aCpus[idCpu];
2643
2644 for (unsigned i = 0; i < 10; i++)
2645 {
2646 PVMREQ pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pPriorityReqs, NULL, PVMREQ);
2647 if (!pReqHead)
2648 {
2649 pReqHead = ASMAtomicXchgPtrT(&pUVCpu->vm.s.pNormalReqs, NULL, PVMREQ);
2650 if (!pReqHead)
2651 break;
2652 }
2653 AssertLogRelMsgFailed(("Requests pending! VMR3Destroy caller has to serialize this.\n"));
2654
2655 for (PVMREQ pReq = pReqHead; pReq; pReq = pReq->pNext)
2656 {
2657 ASMAtomicUoWriteS32(&pReq->iStatus, VERR_VM_REQUEST_KILLED);
2658 ASMAtomicWriteSize(&pReq->enmState, VMREQSTATE_INVALID);
2659 RTSemEventSignal(pReq->EventSem);
2660 RTThreadSleep(2);
2661 RTSemEventDestroy(pReq->EventSem);
2662 }
2663 /* give them a chance to respond before we free the request memory. */
2664 RTThreadSleep(32);
2665 }
2666 }
2667
2668 /*
2669 * Make sure the VMMR0.r0 module and whatever else is unloaded.
2670 */
2671 PDMR3TermUVM(pUVM);
2672
2673 /*
2674 * Terminate the support library if initialized.
2675 */
2676 if (pUVM->vm.s.pSession)
2677 {
2678 int rc = SUPR3Term(false /*fForced*/);
2679 AssertRC(rc);
2680 pUVM->vm.s.pSession = NIL_RTR0PTR;
2681 }
2682
2683 /*
2684 * Release the UVM structure reference.
2685 */
2686 VMR3ReleaseUVM(pUVM);
2687
2688 /*
2689 * Clean up and flush logs.
2690 */
2691#ifdef LOG_ENABLED
2692 RTLogSetCustomPrefixCallback(NULL, NULL, NULL);
2693#endif
2694 RTLogFlush(NULL);
2695}
2696
2697
2698/**
2699 * Worker which checks integrity of some internal structures.
2700 * This is yet another attempt to track down that AVL tree crash.
2701 */
2702static void vmR3CheckIntegrity(PVM pVM)
2703{
2704#ifdef VBOX_STRICT
2705 int rc = PGMR3CheckIntegrity(pVM);
2706 AssertReleaseRC(rc);
2707#else
2708 RT_NOREF_PV(pVM);
2709#endif
2710}
2711
2712
2713/**
2714 * EMT rendezvous worker for VMR3ResetFF for doing soft/warm reset.
2715 *
2716 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESCHEDULE.
2717 * (This is a strict return code, see FNVMMEMTRENDEZVOUS.)
2718 *
2719 * @param pVM The cross context VM structure.
2720 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2721 * @param pvUser The reset flags.
2722 */
2723static DECLCALLBACK(VBOXSTRICTRC) vmR3SoftReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2724{
2725 uint32_t fResetFlags = *(uint32_t *)pvUser;
2726
2727
2728 /*
2729 * The first EMT will try change the state to resetting. If this fails,
2730 * we won't get called for the other EMTs.
2731 */
2732 if (pVCpu->idCpu == pVM->cCpus - 1)
2733 {
2734 int rc = vmR3TrySetState(pVM, "vmR3ResetSoft", 3,
2735 VMSTATE_SOFT_RESETTING, VMSTATE_RUNNING,
2736 VMSTATE_SOFT_RESETTING, VMSTATE_SUSPENDED,
2737 VMSTATE_SOFT_RESETTING_LS, VMSTATE_RUNNING_LS);
2738 if (RT_FAILURE(rc))
2739 return rc;
2740 }
2741
2742 /*
2743 * Check the state.
2744 */
2745 VMSTATE enmVMState = VMR3GetState(pVM);
2746 AssertLogRelMsgReturn( enmVMState == VMSTATE_SOFT_RESETTING
2747 || enmVMState == VMSTATE_SOFT_RESETTING_LS,
2748 ("%s\n", VMR3GetStateName(enmVMState)),
2749 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2750
2751 /*
2752 * EMT(0) does the full cleanup *after* all the other EMTs has been
2753 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2754 *
2755 * Because there are per-cpu reset routines and order may/is important,
2756 * the following sequence looks a bit ugly...
2757 */
2758
2759 /* Reset the VCpu state. */
2760 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2761
2762 /*
2763 * Soft reset the VM components.
2764 */
2765 if (pVCpu->idCpu == 0)
2766 {
2767#ifdef VBOX_WITH_REM
2768 REMR3Reset(pVM);
2769#endif
2770 PDMR3SoftReset(pVM, fResetFlags);
2771 TRPMR3Reset(pVM);
2772 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2773 EMR3Reset(pVM);
2774 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2775
2776 /*
2777 * Since EMT(0) is the last to go thru here, it will advance the state.
2778 * (Unlike vmR3HardReset we won't be doing any suspending of live
2779 * migration VMs here since memory is unchanged.)
2780 */
2781 PUVM pUVM = pVM->pUVM;
2782 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2783 enmVMState = pVM->enmVMState;
2784 if (enmVMState == VMSTATE_SOFT_RESETTING)
2785 {
2786 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2787 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2788 else
2789 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_SOFT_RESETTING, false /*fSetRatherThanClearFF*/);
2790 }
2791 else
2792 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING_LS, VMSTATE_SOFT_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2793 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2794 }
2795
2796 return VINF_EM_RESCHEDULE;
2797}
2798
2799
2800/**
2801 * EMT rendezvous worker for VMR3Reset and VMR3ResetFF.
2802 *
2803 * This is called by the emulation threads as a response to the reset request
2804 * issued by VMR3Reset().
2805 *
2806 * @returns VERR_VM_INVALID_VM_STATE, VINF_EM_RESET or VINF_EM_SUSPEND. (This
2807 * is a strict return code, see FNVMMEMTRENDEZVOUS.)
2808 *
2809 * @param pVM The cross context VM structure.
2810 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2811 * @param pvUser Ignored.
2812 */
2813static DECLCALLBACK(VBOXSTRICTRC) vmR3HardReset(PVM pVM, PVMCPU pVCpu, void *pvUser)
2814{
2815 Assert(!pvUser); NOREF(pvUser);
2816
2817 /*
2818 * The first EMT will try change the state to resetting. If this fails,
2819 * we won't get called for the other EMTs.
2820 */
2821 if (pVCpu->idCpu == pVM->cCpus - 1)
2822 {
2823 int rc = vmR3TrySetState(pVM, "vmR3HardReset", 3,
2824 VMSTATE_RESETTING, VMSTATE_RUNNING,
2825 VMSTATE_RESETTING, VMSTATE_SUSPENDED,
2826 VMSTATE_RESETTING_LS, VMSTATE_RUNNING_LS);
2827 if (RT_FAILURE(rc))
2828 return rc;
2829 }
2830
2831 /*
2832 * Check the state.
2833 */
2834 VMSTATE enmVMState = VMR3GetState(pVM);
2835 AssertLogRelMsgReturn( enmVMState == VMSTATE_RESETTING
2836 || enmVMState == VMSTATE_RESETTING_LS,
2837 ("%s\n", VMR3GetStateName(enmVMState)),
2838 VERR_VM_UNEXPECTED_UNSTABLE_STATE);
2839
2840 /*
2841 * EMT(0) does the full cleanup *after* all the other EMTs has been
2842 * thru here and been told to enter the EMSTATE_WAIT_SIPI state.
2843 *
2844 * Because there are per-cpu reset routines and order may/is important,
2845 * the following sequence looks a bit ugly...
2846 */
2847 if (pVCpu->idCpu == 0)
2848 vmR3CheckIntegrity(pVM);
2849
2850 /* Reset the VCpu state. */
2851 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED);
2852
2853 /* Clear all pending forced actions. */
2854 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_ALL_MASK & ~VMCPU_FF_REQUEST);
2855
2856 /*
2857 * Reset the VM components.
2858 */
2859 if (pVCpu->idCpu == 0)
2860 {
2861#ifdef VBOX_WITH_RAW_MODE
2862 PATMR3Reset(pVM);
2863 CSAMR3Reset(pVM);
2864#endif
2865 GIMR3Reset(pVM); /* This must come *before* PDM and TM. */
2866 PDMR3Reset(pVM);
2867 PGMR3Reset(pVM);
2868 SELMR3Reset(pVM);
2869 TRPMR3Reset(pVM);
2870#ifdef VBOX_WITH_REM
2871 REMR3Reset(pVM);
2872#endif
2873 IOMR3Reset(pVM);
2874 CPUMR3Reset(pVM); /* This must come *after* PDM (due to APIC base MSR caching). */
2875 TMR3Reset(pVM);
2876 EMR3Reset(pVM);
2877 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */
2878
2879 /*
2880 * Do memory setup.
2881 */
2882 PGMR3MemSetup(pVM, true /*fAtReset*/);
2883 PDMR3MemSetup(pVM, true /*fAtReset*/);
2884
2885 /*
2886 * Since EMT(0) is the last to go thru here, it will advance the state.
2887 * When a live save is active, we will move on to SuspendingLS but
2888 * leave it for VMR3Reset to do the actual suspending due to deadlock risks.
2889 */
2890 PUVM pUVM = pVM->pUVM;
2891 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
2892 enmVMState = pVM->enmVMState;
2893 if (enmVMState == VMSTATE_RESETTING)
2894 {
2895 if (pUVM->vm.s.enmPrevVMState == VMSTATE_SUSPENDED)
2896 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDED, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2897 else
2898 vmR3SetStateLocked(pVM, pUVM, VMSTATE_RUNNING, VMSTATE_RESETTING, false /*fSetRatherThanClearFF*/);
2899 }
2900 else
2901 vmR3SetStateLocked(pVM, pUVM, VMSTATE_SUSPENDING_LS, VMSTATE_RESETTING_LS, false /*fSetRatherThanClearFF*/);
2902 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
2903
2904 vmR3CheckIntegrity(pVM);
2905
2906 /*
2907 * Do the suspend bit as well.
2908 * It only requires some EMT(0) work at present.
2909 */
2910 if (enmVMState != VMSTATE_RESETTING)
2911 {
2912 vmR3SuspendDoWork(pVM);
2913 vmR3SetState(pVM, VMSTATE_SUSPENDED_LS, VMSTATE_SUSPENDING_LS);
2914 }
2915 }
2916
2917 return enmVMState == VMSTATE_RESETTING
2918 ? VINF_EM_RESET
2919 : VINF_EM_SUSPEND; /** @todo VINF_EM_SUSPEND has lower priority than VINF_EM_RESET, so fix races. Perhaps add a new code for this combined case. */
2920}
2921
2922
2923/**
2924 * Internal worker for VMR3Reset, VMR3ResetFF, VMR3TripleFault.
2925 *
2926 * @returns VBox status code.
2927 * @param pVM The cross context VM structure.
2928 * @param fHardReset Whether it's a hard reset or not.
2929 * @param fResetFlags The reset flags (PDMVMRESET_F_XXX).
2930 */
2931static VBOXSTRICTRC vmR3ResetCommon(PVM pVM, bool fHardReset, uint32_t fResetFlags)
2932{
2933 LogFlow(("vmR3ResetCommon: fHardReset=%RTbool fResetFlags=%#x\n", fHardReset, fResetFlags));
2934 int rc;
2935 if (fHardReset)
2936 {
2937 /*
2938 * Hard reset.
2939 */
2940 /* Check whether we're supposed to power off instead of resetting. */
2941 if (pVM->vm.s.fPowerOffInsteadOfReset)
2942 {
2943 PUVM pUVM = pVM->pUVM;
2944 if ( pUVM->pVmm2UserMethods
2945 && pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff)
2946 pUVM->pVmm2UserMethods->pfnNotifyResetTurnedIntoPowerOff(pUVM->pVmm2UserMethods, pUVM);
2947 return VMR3PowerOff(pUVM);
2948 }
2949
2950 /* Gather all the EMTs to make sure there are no races before changing
2951 the VM state. */
2952 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2953 vmR3HardReset, NULL);
2954 }
2955 else
2956 {
2957 /*
2958 * Soft reset. Since we only support this with a single CPU active,
2959 * we must be on EMT #0 here.
2960 */
2961 VM_ASSERT_EMT0(pVM);
2962 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
2963 vmR3SoftReset, &fResetFlags);
2964 }
2965
2966 LogFlow(("vmR3ResetCommon: returns %Rrc\n", rc));
2967 return rc;
2968}
2969
2970
2971
2972/**
2973 * Reset the current VM.
2974 *
2975 * @returns VBox status code.
2976 * @param pUVM The VM to reset.
2977 */
2978VMMR3DECL(int) VMR3Reset(PUVM pUVM)
2979{
2980 LogFlow(("VMR3Reset:\n"));
2981 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
2982 PVM pVM = pUVM->pVM;
2983 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
2984
2985 return VBOXSTRICTRC_VAL(vmR3ResetCommon(pVM, true, 0));
2986}
2987
2988
2989/**
2990 * Handle the reset force flag or triple fault.
2991 *
2992 * This handles both soft and hard resets (see PDMVMRESET_F_XXX).
2993 *
2994 * @returns VBox status code.
2995 * @param pVM The cross context VM structure.
2996 * @thread EMT
2997 *
2998 * @remarks Caller is expected to clear the VM_FF_RESET force flag.
2999 */
3000VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetFF(PVM pVM)
3001{
3002 LogFlow(("VMR3ResetFF:\n"));
3003
3004 /*
3005 * First consult the firmware on whether this is a hard or soft reset.
3006 */
3007 uint32_t fResetFlags;
3008 bool fHardReset = PDMR3GetResetInfo(pVM, 0 /*fOverride*/, &fResetFlags);
3009 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
3010}
3011
3012
3013/**
3014 * For handling a CPU reset on triple fault.
3015 *
3016 * According to one mainboard manual, a CPU triple fault causes the 286 CPU to
3017 * send a SHUTDOWN signal to the chipset. The chipset responds by sending a
3018 * RESET signal to the CPU. So, it should be very similar to a soft/warm reset.
3019 *
3020 * @returns VBox status code.
3021 * @param pVM The cross context VM structure.
3022 * @thread EMT
3023 */
3024VMMR3_INT_DECL(VBOXSTRICTRC) VMR3ResetTripleFault(PVM pVM)
3025{
3026 LogFlow(("VMR3ResetTripleFault:\n"));
3027
3028 /*
3029 * First consult the firmware on whether this is a hard or soft reset.
3030 */
3031 uint32_t fResetFlags;
3032 bool fHardReset = PDMR3GetResetInfo(pVM, PDMVMRESET_F_TRIPLE_FAULT, &fResetFlags);
3033 return vmR3ResetCommon(pVM, fHardReset, fResetFlags);
3034}
3035
3036
3037/**
3038 * Gets the user mode VM structure pointer given Pointer to the VM.
3039 *
3040 * @returns Pointer to the user mode VM structure on success. NULL if @a pVM is
3041 * invalid (asserted).
3042 * @param pVM The cross context VM structure.
3043 * @sa VMR3GetVM, VMR3RetainUVM
3044 */
3045VMMR3DECL(PUVM) VMR3GetUVM(PVM pVM)
3046{
3047 VM_ASSERT_VALID_EXT_RETURN(pVM, NULL);
3048 return pVM->pUVM;
3049}
3050
3051
3052/**
3053 * Gets the shared VM structure pointer given the pointer to the user mode VM
3054 * structure.
3055 *
3056 * @returns Pointer to the VM.
3057 * NULL if @a pUVM is invalid (asserted) or if no shared VM structure
3058 * is currently associated with it.
3059 * @param pUVM The user mode VM handle.
3060 * @sa VMR3GetUVM
3061 */
3062VMMR3DECL(PVM) VMR3GetVM(PUVM pUVM)
3063{
3064 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3065 return pUVM->pVM;
3066}
3067
3068
3069/**
3070 * Retain the user mode VM handle.
3071 *
3072 * @returns Reference count.
3073 * UINT32_MAX if @a pUVM is invalid.
3074 *
3075 * @param pUVM The user mode VM handle.
3076 * @sa VMR3ReleaseUVM
3077 */
3078VMMR3DECL(uint32_t) VMR3RetainUVM(PUVM pUVM)
3079{
3080 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
3081 uint32_t cRefs = ASMAtomicIncU32(&pUVM->vm.s.cUvmRefs);
3082 AssertMsg(cRefs > 0 && cRefs < _64K, ("%u\n", cRefs));
3083 return cRefs;
3084}
3085
3086
3087/**
3088 * Does the final release of the UVM structure.
3089 *
3090 * @param pUVM The user mode VM handle.
3091 */
3092static void vmR3DoReleaseUVM(PUVM pUVM)
3093{
3094 /*
3095 * Free the UVM.
3096 */
3097 Assert(!pUVM->pVM);
3098
3099 MMR3TermUVM(pUVM);
3100 STAMR3TermUVM(pUVM);
3101
3102 ASMAtomicUoWriteU32(&pUVM->u32Magic, UINT32_MAX);
3103 RTTlsFree(pUVM->vm.s.idxTLS);
3104 RTMemPageFree(pUVM, RT_OFFSETOF(UVM, aCpus[pUVM->cCpus]));
3105}
3106
3107
3108/**
3109 * Releases a refernece to the mode VM handle.
3110 *
3111 * @returns The new reference count, 0 if destroyed.
3112 * UINT32_MAX if @a pUVM is invalid.
3113 *
3114 * @param pUVM The user mode VM handle.
3115 * @sa VMR3RetainUVM
3116 */
3117VMMR3DECL(uint32_t) VMR3ReleaseUVM(PUVM pUVM)
3118{
3119 if (!pUVM)
3120 return 0;
3121 UVM_ASSERT_VALID_EXT_RETURN(pUVM, UINT32_MAX);
3122 uint32_t cRefs = ASMAtomicDecU32(&pUVM->vm.s.cUvmRefs);
3123 if (!cRefs)
3124 vmR3DoReleaseUVM(pUVM);
3125 else
3126 AssertMsg(cRefs < _64K, ("%u\n", cRefs));
3127 return cRefs;
3128}
3129
3130
3131/**
3132 * Gets the VM name.
3133 *
3134 * @returns Pointer to a read-only string containing the name. NULL if called
3135 * too early.
3136 * @param pUVM The user mode VM handle.
3137 */
3138VMMR3DECL(const char *) VMR3GetName(PUVM pUVM)
3139{
3140 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3141 return pUVM->vm.s.pszName;
3142}
3143
3144
3145/**
3146 * Gets the VM UUID.
3147 *
3148 * @returns pUuid on success, NULL on failure.
3149 * @param pUVM The user mode VM handle.
3150 * @param pUuid Where to store the UUID.
3151 */
3152VMMR3DECL(PRTUUID) VMR3GetUuid(PUVM pUVM, PRTUUID pUuid)
3153{
3154 UVM_ASSERT_VALID_EXT_RETURN(pUVM, NULL);
3155 AssertPtrReturn(pUuid, NULL);
3156
3157 *pUuid = pUVM->vm.s.Uuid;
3158 return pUuid;
3159}
3160
3161
3162/**
3163 * Gets the current VM state.
3164 *
3165 * @returns The current VM state.
3166 * @param pVM The cross context VM structure.
3167 * @thread Any
3168 */
3169VMMR3DECL(VMSTATE) VMR3GetState(PVM pVM)
3170{
3171 AssertMsgReturn(RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE), ("%p\n", pVM), VMSTATE_TERMINATED);
3172 VMSTATE enmVMState = pVM->enmVMState;
3173 return enmVMState >= VMSTATE_CREATING && enmVMState <= VMSTATE_TERMINATED ? enmVMState : VMSTATE_TERMINATED;
3174}
3175
3176
3177/**
3178 * Gets the current VM state.
3179 *
3180 * @returns The current VM state.
3181 * @param pUVM The user-mode VM handle.
3182 * @thread Any
3183 */
3184VMMR3DECL(VMSTATE) VMR3GetStateU(PUVM pUVM)
3185{
3186 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VMSTATE_TERMINATED);
3187 if (RT_UNLIKELY(!pUVM->pVM))
3188 return VMSTATE_TERMINATED;
3189 return pUVM->pVM->enmVMState;
3190}
3191
3192
3193/**
3194 * Gets the state name string for a VM state.
3195 *
3196 * @returns Pointer to the state name. (readonly)
3197 * @param enmState The state.
3198 */
3199VMMR3DECL(const char *) VMR3GetStateName(VMSTATE enmState)
3200{
3201 switch (enmState)
3202 {
3203 case VMSTATE_CREATING: return "CREATING";
3204 case VMSTATE_CREATED: return "CREATED";
3205 case VMSTATE_LOADING: return "LOADING";
3206 case VMSTATE_POWERING_ON: return "POWERING_ON";
3207 case VMSTATE_RESUMING: return "RESUMING";
3208 case VMSTATE_RUNNING: return "RUNNING";
3209 case VMSTATE_RUNNING_LS: return "RUNNING_LS";
3210 case VMSTATE_RUNNING_FT: return "RUNNING_FT";
3211 case VMSTATE_RESETTING: return "RESETTING";
3212 case VMSTATE_RESETTING_LS: return "RESETTING_LS";
3213 case VMSTATE_SOFT_RESETTING: return "SOFT_RESETTING";
3214 case VMSTATE_SOFT_RESETTING_LS: return "SOFT_RESETTING_LS";
3215 case VMSTATE_SUSPENDED: return "SUSPENDED";
3216 case VMSTATE_SUSPENDED_LS: return "SUSPENDED_LS";
3217 case VMSTATE_SUSPENDED_EXT_LS: return "SUSPENDED_EXT_LS";
3218 case VMSTATE_SUSPENDING: return "SUSPENDING";
3219 case VMSTATE_SUSPENDING_LS: return "SUSPENDING_LS";
3220 case VMSTATE_SUSPENDING_EXT_LS: return "SUSPENDING_EXT_LS";
3221 case VMSTATE_SAVING: return "SAVING";
3222 case VMSTATE_DEBUGGING: return "DEBUGGING";
3223 case VMSTATE_DEBUGGING_LS: return "DEBUGGING_LS";
3224 case VMSTATE_POWERING_OFF: return "POWERING_OFF";
3225 case VMSTATE_POWERING_OFF_LS: return "POWERING_OFF_LS";
3226 case VMSTATE_FATAL_ERROR: return "FATAL_ERROR";
3227 case VMSTATE_FATAL_ERROR_LS: return "FATAL_ERROR_LS";
3228 case VMSTATE_GURU_MEDITATION: return "GURU_MEDITATION";
3229 case VMSTATE_GURU_MEDITATION_LS:return "GURU_MEDITATION_LS";
3230 case VMSTATE_LOAD_FAILURE: return "LOAD_FAILURE";
3231 case VMSTATE_OFF: return "OFF";
3232 case VMSTATE_OFF_LS: return "OFF_LS";
3233 case VMSTATE_DESTROYING: return "DESTROYING";
3234 case VMSTATE_TERMINATED: return "TERMINATED";
3235
3236 default:
3237 AssertMsgFailed(("Unknown state %d\n", enmState));
3238 return "Unknown!\n";
3239 }
3240}
3241
3242
3243/**
3244 * Validates the state transition in strict builds.
3245 *
3246 * @returns true if valid, false if not.
3247 *
3248 * @param enmStateOld The old (current) state.
3249 * @param enmStateNew The proposed new state.
3250 *
3251 * @remarks The reference for this is found in doc/vp/VMM.vpp, the VMSTATE
3252 * diagram (under State Machine Diagram).
3253 */
3254static bool vmR3ValidateStateTransition(VMSTATE enmStateOld, VMSTATE enmStateNew)
3255{
3256#ifndef VBOX_STRICT
3257 RT_NOREF2(enmStateOld, enmStateNew);
3258#else
3259 switch (enmStateOld)
3260 {
3261 case VMSTATE_CREATING:
3262 AssertMsgReturn(enmStateNew == VMSTATE_CREATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3263 break;
3264
3265 case VMSTATE_CREATED:
3266 AssertMsgReturn( enmStateNew == VMSTATE_LOADING
3267 || enmStateNew == VMSTATE_POWERING_ON
3268 || enmStateNew == VMSTATE_POWERING_OFF
3269 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3270 break;
3271
3272 case VMSTATE_LOADING:
3273 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3274 || enmStateNew == VMSTATE_LOAD_FAILURE
3275 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3276 break;
3277
3278 case VMSTATE_POWERING_ON:
3279 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3280 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3281 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3282 break;
3283
3284 case VMSTATE_RESUMING:
3285 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3286 /*|| enmStateNew == VMSTATE_FATAL_ERROR ?*/
3287 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3288 break;
3289
3290 case VMSTATE_RUNNING:
3291 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3292 || enmStateNew == VMSTATE_SUSPENDING
3293 || enmStateNew == VMSTATE_RESETTING
3294 || enmStateNew == VMSTATE_SOFT_RESETTING
3295 || enmStateNew == VMSTATE_RUNNING_LS
3296 || enmStateNew == VMSTATE_RUNNING_FT
3297 || enmStateNew == VMSTATE_DEBUGGING
3298 || enmStateNew == VMSTATE_FATAL_ERROR
3299 || enmStateNew == VMSTATE_GURU_MEDITATION
3300 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3301 break;
3302
3303 case VMSTATE_RUNNING_LS:
3304 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF_LS
3305 || enmStateNew == VMSTATE_SUSPENDING_LS
3306 || enmStateNew == VMSTATE_SUSPENDING_EXT_LS
3307 || enmStateNew == VMSTATE_RESETTING_LS
3308 || enmStateNew == VMSTATE_SOFT_RESETTING_LS
3309 || enmStateNew == VMSTATE_RUNNING
3310 || enmStateNew == VMSTATE_DEBUGGING_LS
3311 || enmStateNew == VMSTATE_FATAL_ERROR_LS
3312 || enmStateNew == VMSTATE_GURU_MEDITATION_LS
3313 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3314 break;
3315
3316 case VMSTATE_RUNNING_FT:
3317 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3318 || enmStateNew == VMSTATE_FATAL_ERROR
3319 || enmStateNew == VMSTATE_GURU_MEDITATION
3320 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3321 break;
3322
3323 case VMSTATE_RESETTING:
3324 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3325 break;
3326
3327 case VMSTATE_SOFT_RESETTING:
3328 AssertMsgReturn(enmStateNew == VMSTATE_RUNNING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3329 break;
3330
3331 case VMSTATE_RESETTING_LS:
3332 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING_LS
3333 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3334 break;
3335
3336 case VMSTATE_SOFT_RESETTING_LS:
3337 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING_LS
3338 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3339 break;
3340
3341 case VMSTATE_SUSPENDING:
3342 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3343 break;
3344
3345 case VMSTATE_SUSPENDING_LS:
3346 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3347 || enmStateNew == VMSTATE_SUSPENDED_LS
3348 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3349 break;
3350
3351 case VMSTATE_SUSPENDING_EXT_LS:
3352 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDING
3353 || enmStateNew == VMSTATE_SUSPENDED_EXT_LS
3354 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3355 break;
3356
3357 case VMSTATE_SUSPENDED:
3358 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3359 || enmStateNew == VMSTATE_SAVING
3360 || enmStateNew == VMSTATE_RESETTING
3361 || enmStateNew == VMSTATE_SOFT_RESETTING
3362 || enmStateNew == VMSTATE_RESUMING
3363 || enmStateNew == VMSTATE_LOADING
3364 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3365 break;
3366
3367 case VMSTATE_SUSPENDED_LS:
3368 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3369 || enmStateNew == VMSTATE_SAVING
3370 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3371 break;
3372
3373 case VMSTATE_SUSPENDED_EXT_LS:
3374 AssertMsgReturn( enmStateNew == VMSTATE_SUSPENDED
3375 || enmStateNew == VMSTATE_SAVING
3376 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3377 break;
3378
3379 case VMSTATE_SAVING:
3380 AssertMsgReturn(enmStateNew == VMSTATE_SUSPENDED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3381 break;
3382
3383 case VMSTATE_DEBUGGING:
3384 AssertMsgReturn( enmStateNew == VMSTATE_RUNNING
3385 || enmStateNew == VMSTATE_POWERING_OFF
3386 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3387 break;
3388
3389 case VMSTATE_DEBUGGING_LS:
3390 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3391 || enmStateNew == VMSTATE_RUNNING_LS
3392 || enmStateNew == VMSTATE_POWERING_OFF_LS
3393 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3394 break;
3395
3396 case VMSTATE_POWERING_OFF:
3397 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3398 break;
3399
3400 case VMSTATE_POWERING_OFF_LS:
3401 AssertMsgReturn( enmStateNew == VMSTATE_POWERING_OFF
3402 || enmStateNew == VMSTATE_OFF_LS
3403 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3404 break;
3405
3406 case VMSTATE_OFF:
3407 AssertMsgReturn(enmStateNew == VMSTATE_DESTROYING, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3408 break;
3409
3410 case VMSTATE_OFF_LS:
3411 AssertMsgReturn(enmStateNew == VMSTATE_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3412 break;
3413
3414 case VMSTATE_FATAL_ERROR:
3415 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3416 break;
3417
3418 case VMSTATE_FATAL_ERROR_LS:
3419 AssertMsgReturn( enmStateNew == VMSTATE_FATAL_ERROR
3420 || enmStateNew == VMSTATE_POWERING_OFF_LS
3421 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3422 break;
3423
3424 case VMSTATE_GURU_MEDITATION:
3425 AssertMsgReturn( enmStateNew == VMSTATE_DEBUGGING
3426 || enmStateNew == VMSTATE_POWERING_OFF
3427 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3428 break;
3429
3430 case VMSTATE_GURU_MEDITATION_LS:
3431 AssertMsgReturn( enmStateNew == VMSTATE_GURU_MEDITATION
3432 || enmStateNew == VMSTATE_DEBUGGING_LS
3433 || enmStateNew == VMSTATE_POWERING_OFF_LS
3434 , ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3435 break;
3436
3437 case VMSTATE_LOAD_FAILURE:
3438 AssertMsgReturn(enmStateNew == VMSTATE_POWERING_OFF, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3439 break;
3440
3441 case VMSTATE_DESTROYING:
3442 AssertMsgReturn(enmStateNew == VMSTATE_TERMINATED, ("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3443 break;
3444
3445 case VMSTATE_TERMINATED:
3446 default:
3447 AssertMsgFailedReturn(("%s -> %s\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)), false);
3448 break;
3449 }
3450#endif /* VBOX_STRICT */
3451 return true;
3452}
3453
3454
3455/**
3456 * Does the state change callouts.
3457 *
3458 * The caller owns the AtStateCritSect.
3459 *
3460 * @param pVM The cross context VM structure.
3461 * @param pUVM The UVM handle.
3462 * @param enmStateNew The New state.
3463 * @param enmStateOld The old state.
3464 */
3465static void vmR3DoAtState(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3466{
3467 LogRel(("Changing the VM state from '%s' to '%s'\n", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3468
3469 for (PVMATSTATE pCur = pUVM->vm.s.pAtState; pCur; pCur = pCur->pNext)
3470 {
3471 pCur->pfnAtState(pUVM, enmStateNew, enmStateOld, pCur->pvUser);
3472 if ( enmStateNew != VMSTATE_DESTROYING
3473 && pVM->enmVMState == VMSTATE_DESTROYING)
3474 break;
3475 AssertMsg(pVM->enmVMState == enmStateNew,
3476 ("You are not allowed to change the state while in the change callback, except "
3477 "from destroying the VM. There are restrictions in the way the state changes "
3478 "are propagated up to the EM execution loop and it makes the program flow very "
3479 "difficult to follow. (%s, expected %s, old %s)\n",
3480 VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateNew),
3481 VMR3GetStateName(enmStateOld)));
3482 }
3483}
3484
3485
3486/**
3487 * Sets the current VM state, with the AtStatCritSect already entered.
3488 *
3489 * @param pVM The cross context VM structure.
3490 * @param pUVM The UVM handle.
3491 * @param enmStateNew The new state.
3492 * @param enmStateOld The old state.
3493 * @param fSetRatherThanClearFF The usual behavior is to clear the
3494 * VM_FF_CHECK_VM_STATE force flag, but for
3495 * some transitions (-> guru) we need to kick
3496 * the other EMTs to stop what they're doing.
3497 */
3498static void vmR3SetStateLocked(PVM pVM, PUVM pUVM, VMSTATE enmStateNew, VMSTATE enmStateOld, bool fSetRatherThanClearFF)
3499{
3500 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3501
3502 AssertMsg(pVM->enmVMState == enmStateOld,
3503 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3504
3505 pUVM->vm.s.enmPrevVMState = enmStateOld;
3506 pVM->enmVMState = enmStateNew;
3507
3508 if (!fSetRatherThanClearFF)
3509 VM_FF_CLEAR(pVM, VM_FF_CHECK_VM_STATE);
3510 else if (pVM->cCpus > 0)
3511 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
3512
3513 vmR3DoAtState(pVM, pUVM, enmStateNew, enmStateOld);
3514}
3515
3516
3517/**
3518 * Sets the current VM state.
3519 *
3520 * @param pVM The cross context VM structure.
3521 * @param enmStateNew The new state.
3522 * @param enmStateOld The old state (for asserting only).
3523 */
3524static void vmR3SetState(PVM pVM, VMSTATE enmStateNew, VMSTATE enmStateOld)
3525{
3526 PUVM pUVM = pVM->pUVM;
3527 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3528
3529 RT_NOREF_PV(enmStateOld);
3530 AssertMsg(pVM->enmVMState == enmStateOld,
3531 ("%s != %s\n", VMR3GetStateName(pVM->enmVMState), VMR3GetStateName(enmStateOld)));
3532 vmR3SetStateLocked(pVM, pUVM, enmStateNew, pVM->enmVMState, false /*fSetRatherThanClearFF*/);
3533
3534 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3535}
3536
3537
3538/**
3539 * Tries to perform a state transition.
3540 *
3541 * @returns The 1-based ordinal of the succeeding transition.
3542 * VERR_VM_INVALID_VM_STATE and Assert+LogRel on failure.
3543 *
3544 * @param pVM The cross context VM structure.
3545 * @param pszWho Who is trying to change it.
3546 * @param cTransitions The number of transitions in the ellipsis.
3547 * @param ... Transition pairs; new, old.
3548 */
3549static int vmR3TrySetState(PVM pVM, const char *pszWho, unsigned cTransitions, ...)
3550{
3551 va_list va;
3552 VMSTATE enmStateNew = VMSTATE_CREATED;
3553 VMSTATE enmStateOld = VMSTATE_CREATED;
3554
3555#ifdef VBOX_STRICT
3556 /*
3557 * Validate the input first.
3558 */
3559 va_start(va, cTransitions);
3560 for (unsigned i = 0; i < cTransitions; i++)
3561 {
3562 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3563 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3564 vmR3ValidateStateTransition(enmStateOld, enmStateNew);
3565 }
3566 va_end(va);
3567#endif
3568
3569 /*
3570 * Grab the lock and see if any of the proposed transitions works out.
3571 */
3572 va_start(va, cTransitions);
3573 int rc = VERR_VM_INVALID_VM_STATE;
3574 PUVM pUVM = pVM->pUVM;
3575 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3576
3577 VMSTATE enmStateCur = pVM->enmVMState;
3578
3579 for (unsigned i = 0; i < cTransitions; i++)
3580 {
3581 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3582 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3583 if (enmStateCur == enmStateOld)
3584 {
3585 vmR3SetStateLocked(pVM, pUVM, enmStateNew, enmStateOld, false /*fSetRatherThanClearFF*/);
3586 rc = i + 1;
3587 break;
3588 }
3589 }
3590
3591 if (RT_FAILURE(rc))
3592 {
3593 /*
3594 * Complain about it.
3595 */
3596 if (cTransitions == 1)
3597 {
3598 LogRel(("%s: %s -> %s failed, because the VM state is actually %s\n",
3599 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3600 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3601 N_("%s failed because the VM state is %s instead of %s"),
3602 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3603 AssertMsgFailed(("%s: %s -> %s failed, because the VM state is actually %s\n",
3604 pszWho, VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew), VMR3GetStateName(enmStateCur)));
3605 }
3606 else
3607 {
3608 va_end(va);
3609 va_start(va, cTransitions);
3610 LogRel(("%s:\n", pszWho));
3611 for (unsigned i = 0; i < cTransitions; i++)
3612 {
3613 enmStateNew = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3614 enmStateOld = (VMSTATE)va_arg(va, /*VMSTATE*/int);
3615 LogRel(("%s%s -> %s",
3616 i ? ", " : " ", VMR3GetStateName(enmStateOld), VMR3GetStateName(enmStateNew)));
3617 }
3618 LogRel((" failed, because the VM state is actually %s\n", VMR3GetStateName(enmStateCur)));
3619 VMSetError(pVM, VERR_VM_INVALID_VM_STATE, RT_SRC_POS,
3620 N_("%s failed because the current VM state, %s, was not found in the state transition table (old state %s)"),
3621 pszWho, VMR3GetStateName(enmStateCur), VMR3GetStateName(enmStateOld));
3622 AssertMsgFailed(("%s - state=%s, see release log for full details. Check the cTransitions passed us.\n",
3623 pszWho, VMR3GetStateName(enmStateCur)));
3624 }
3625 }
3626
3627 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3628 va_end(va);
3629 Assert(rc > 0 || rc < 0);
3630 return rc;
3631}
3632
3633
3634/**
3635 * Interface used by EM to signal that it's entering the guru meditation state.
3636 *
3637 * This will notifying other threads.
3638 *
3639 * @returns true if the state changed to Guru, false if no state change.
3640 * @param pVM The cross context VM structure.
3641 */
3642VMMR3_INT_DECL(bool) VMR3SetGuruMeditation(PVM pVM)
3643{
3644 PUVM pUVM = pVM->pUVM;
3645 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3646
3647 VMSTATE enmStateCur = pVM->enmVMState;
3648 bool fRc = true;
3649 if (enmStateCur == VMSTATE_RUNNING)
3650 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION, VMSTATE_RUNNING, true /*fSetRatherThanClearFF*/);
3651 else if (enmStateCur == VMSTATE_RUNNING_LS)
3652 {
3653 vmR3SetStateLocked(pVM, pUVM, VMSTATE_GURU_MEDITATION_LS, VMSTATE_RUNNING_LS, true /*fSetRatherThanClearFF*/);
3654 SSMR3Cancel(pUVM);
3655 }
3656 else
3657 fRc = false;
3658
3659 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3660 return fRc;
3661}
3662
3663
3664/**
3665 * Called by vmR3EmulationThreadWithId just before the VM structure is freed.
3666 *
3667 * @param pVM The cross context VM structure.
3668 */
3669void vmR3SetTerminated(PVM pVM)
3670{
3671 vmR3SetState(pVM, VMSTATE_TERMINATED, VMSTATE_DESTROYING);
3672}
3673
3674
3675/**
3676 * Checks if the VM was teleported and hasn't been fully resumed yet.
3677 *
3678 * This applies to both sides of the teleportation since we may leave a working
3679 * clone behind and the user is allowed to resume this...
3680 *
3681 * @returns true / false.
3682 * @param pVM The cross context VM structure.
3683 * @thread Any thread.
3684 */
3685VMMR3_INT_DECL(bool) VMR3TeleportedAndNotFullyResumedYet(PVM pVM)
3686{
3687 VM_ASSERT_VALID_EXT_RETURN(pVM, false);
3688 return pVM->vm.s.fTeleportedAndNotFullyResumedYet;
3689}
3690
3691
3692/**
3693 * Registers a VM state change callback.
3694 *
3695 * You are not allowed to call any function which changes the VM state from a
3696 * state callback.
3697 *
3698 * @returns VBox status code.
3699 * @param pUVM The VM handle.
3700 * @param pfnAtState Pointer to callback.
3701 * @param pvUser User argument.
3702 * @thread Any.
3703 */
3704VMMR3DECL(int) VMR3AtStateRegister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3705{
3706 LogFlow(("VMR3AtStateRegister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3707
3708 /*
3709 * Validate input.
3710 */
3711 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3712 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3713
3714 /*
3715 * Allocate a new record.
3716 */
3717 PVMATSTATE pNew = (PVMATSTATE)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3718 if (!pNew)
3719 return VERR_NO_MEMORY;
3720
3721 /* fill */
3722 pNew->pfnAtState = pfnAtState;
3723 pNew->pvUser = pvUser;
3724
3725 /* insert */
3726 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3727 pNew->pNext = *pUVM->vm.s.ppAtStateNext;
3728 *pUVM->vm.s.ppAtStateNext = pNew;
3729 pUVM->vm.s.ppAtStateNext = &pNew->pNext;
3730 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3731
3732 return VINF_SUCCESS;
3733}
3734
3735
3736/**
3737 * Deregisters a VM state change callback.
3738 *
3739 * @returns VBox status code.
3740 * @param pUVM The VM handle.
3741 * @param pfnAtState Pointer to callback.
3742 * @param pvUser User argument.
3743 * @thread Any.
3744 */
3745VMMR3DECL(int) VMR3AtStateDeregister(PUVM pUVM, PFNVMATSTATE pfnAtState, void *pvUser)
3746{
3747 LogFlow(("VMR3AtStateDeregister: pfnAtState=%p pvUser=%p\n", pfnAtState, pvUser));
3748
3749 /*
3750 * Validate input.
3751 */
3752 AssertPtrReturn(pfnAtState, VERR_INVALID_PARAMETER);
3753 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3754
3755 RTCritSectEnter(&pUVM->vm.s.AtStateCritSect);
3756
3757 /*
3758 * Search the list for the entry.
3759 */
3760 PVMATSTATE pPrev = NULL;
3761 PVMATSTATE pCur = pUVM->vm.s.pAtState;
3762 while ( pCur
3763 && ( pCur->pfnAtState != pfnAtState
3764 || pCur->pvUser != pvUser))
3765 {
3766 pPrev = pCur;
3767 pCur = pCur->pNext;
3768 }
3769 if (!pCur)
3770 {
3771 AssertMsgFailed(("pfnAtState=%p was not found\n", pfnAtState));
3772 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3773 return VERR_FILE_NOT_FOUND;
3774 }
3775
3776 /*
3777 * Unlink it.
3778 */
3779 if (pPrev)
3780 {
3781 pPrev->pNext = pCur->pNext;
3782 if (!pCur->pNext)
3783 pUVM->vm.s.ppAtStateNext = &pPrev->pNext;
3784 }
3785 else
3786 {
3787 pUVM->vm.s.pAtState = pCur->pNext;
3788 if (!pCur->pNext)
3789 pUVM->vm.s.ppAtStateNext = &pUVM->vm.s.pAtState;
3790 }
3791
3792 RTCritSectLeave(&pUVM->vm.s.AtStateCritSect);
3793
3794 /*
3795 * Free it.
3796 */
3797 pCur->pfnAtState = NULL;
3798 pCur->pNext = NULL;
3799 MMR3HeapFree(pCur);
3800
3801 return VINF_SUCCESS;
3802}
3803
3804
3805/**
3806 * Registers a VM error callback.
3807 *
3808 * @returns VBox status code.
3809 * @param pUVM The VM handle.
3810 * @param pfnAtError Pointer to callback.
3811 * @param pvUser User argument.
3812 * @thread Any.
3813 */
3814VMMR3DECL(int) VMR3AtErrorRegister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3815{
3816 LogFlow(("VMR3AtErrorRegister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3817
3818 /*
3819 * Validate input.
3820 */
3821 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3822 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3823
3824 /*
3825 * Allocate a new record.
3826 */
3827 PVMATERROR pNew = (PVMATERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
3828 if (!pNew)
3829 return VERR_NO_MEMORY;
3830
3831 /* fill */
3832 pNew->pfnAtError = pfnAtError;
3833 pNew->pvUser = pvUser;
3834
3835 /* insert */
3836 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3837 pNew->pNext = *pUVM->vm.s.ppAtErrorNext;
3838 *pUVM->vm.s.ppAtErrorNext = pNew;
3839 pUVM->vm.s.ppAtErrorNext = &pNew->pNext;
3840 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3841
3842 return VINF_SUCCESS;
3843}
3844
3845
3846/**
3847 * Deregisters a VM error callback.
3848 *
3849 * @returns VBox status code.
3850 * @param pUVM The VM handle.
3851 * @param pfnAtError Pointer to callback.
3852 * @param pvUser User argument.
3853 * @thread Any.
3854 */
3855VMMR3DECL(int) VMR3AtErrorDeregister(PUVM pUVM, PFNVMATERROR pfnAtError, void *pvUser)
3856{
3857 LogFlow(("VMR3AtErrorDeregister: pfnAtError=%p pvUser=%p\n", pfnAtError, pvUser));
3858
3859 /*
3860 * Validate input.
3861 */
3862 AssertPtrReturn(pfnAtError, VERR_INVALID_PARAMETER);
3863 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
3864
3865 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3866
3867 /*
3868 * Search the list for the entry.
3869 */
3870 PVMATERROR pPrev = NULL;
3871 PVMATERROR pCur = pUVM->vm.s.pAtError;
3872 while ( pCur
3873 && ( pCur->pfnAtError != pfnAtError
3874 || pCur->pvUser != pvUser))
3875 {
3876 pPrev = pCur;
3877 pCur = pCur->pNext;
3878 }
3879 if (!pCur)
3880 {
3881 AssertMsgFailed(("pfnAtError=%p was not found\n", pfnAtError));
3882 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3883 return VERR_FILE_NOT_FOUND;
3884 }
3885
3886 /*
3887 * Unlink it.
3888 */
3889 if (pPrev)
3890 {
3891 pPrev->pNext = pCur->pNext;
3892 if (!pCur->pNext)
3893 pUVM->vm.s.ppAtErrorNext = &pPrev->pNext;
3894 }
3895 else
3896 {
3897 pUVM->vm.s.pAtError = pCur->pNext;
3898 if (!pCur->pNext)
3899 pUVM->vm.s.ppAtErrorNext = &pUVM->vm.s.pAtError;
3900 }
3901
3902 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3903
3904 /*
3905 * Free it.
3906 */
3907 pCur->pfnAtError = NULL;
3908 pCur->pNext = NULL;
3909 MMR3HeapFree(pCur);
3910
3911 return VINF_SUCCESS;
3912}
3913
3914
3915/**
3916 * Ellipsis to va_list wrapper for calling pfnAtError.
3917 */
3918static void vmR3SetErrorWorkerDoCall(PVM pVM, PVMATERROR pCur, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
3919{
3920 va_list va;
3921 va_start(va, pszFormat);
3922 pCur->pfnAtError(pVM->pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va);
3923 va_end(va);
3924}
3925
3926
3927/**
3928 * This is a worker function for GC and Ring-0 calls to VMSetError and VMSetErrorV.
3929 * The message is found in VMINT.
3930 *
3931 * @param pVM The cross context VM structure.
3932 * @thread EMT.
3933 */
3934VMMR3_INT_DECL(void) VMR3SetErrorWorker(PVM pVM)
3935{
3936 VM_ASSERT_EMT(pVM);
3937 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetErrorV! Congrats!\n"));
3938
3939 /*
3940 * Unpack the error (if we managed to format one).
3941 */
3942 PVMERROR pErr = pVM->vm.s.pErrorR3;
3943 const char *pszFile = NULL;
3944 const char *pszFunction = NULL;
3945 uint32_t iLine = 0;
3946 const char *pszMessage;
3947 int32_t rc = VERR_MM_HYPER_NO_MEMORY;
3948 if (pErr)
3949 {
3950 AssertCompile(sizeof(const char) == sizeof(uint8_t));
3951 if (pErr->offFile)
3952 pszFile = (const char *)pErr + pErr->offFile;
3953 iLine = pErr->iLine;
3954 if (pErr->offFunction)
3955 pszFunction = (const char *)pErr + pErr->offFunction;
3956 if (pErr->offMessage)
3957 pszMessage = (const char *)pErr + pErr->offMessage;
3958 else
3959 pszMessage = "No message!";
3960 }
3961 else
3962 pszMessage = "No message! (Failed to allocate memory to put the error message in!)";
3963
3964 /*
3965 * Call the at error callbacks.
3966 */
3967 PUVM pUVM = pVM->pUVM;
3968 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
3969 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
3970 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
3971 vmR3SetErrorWorkerDoCall(pVM, pCur, rc, RT_SRC_POS_ARGS, "%s", pszMessage);
3972 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
3973}
3974
3975
3976/**
3977 * Gets the number of errors raised via VMSetError.
3978 *
3979 * This can be used avoid double error messages.
3980 *
3981 * @returns The error count.
3982 * @param pUVM The VM handle.
3983 */
3984VMMR3_INT_DECL(uint32_t) VMR3GetErrorCount(PUVM pUVM)
3985{
3986 AssertPtrReturn(pUVM, 0);
3987 AssertReturn(pUVM->u32Magic == UVM_MAGIC, 0);
3988 return pUVM->vm.s.cErrors;
3989}
3990
3991
3992/**
3993 * Creation time wrapper for vmR3SetErrorUV.
3994 *
3995 * @returns rc.
3996 * @param pUVM Pointer to the user mode VM structure.
3997 * @param rc The VBox status code.
3998 * @param SRC_POS The source position of this error.
3999 * @param pszFormat Format string.
4000 * @param ... The arguments.
4001 * @thread Any thread.
4002 */
4003static int vmR3SetErrorU(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
4004{
4005 va_list va;
4006 va_start(va, pszFormat);
4007 vmR3SetErrorUV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, &va);
4008 va_end(va);
4009 return rc;
4010}
4011
4012
4013/**
4014 * Worker which calls everyone listening to the VM error messages.
4015 *
4016 * @param pUVM Pointer to the user mode VM structure.
4017 * @param rc The VBox status code.
4018 * @param SRC_POS The source position of this error.
4019 * @param pszFormat Format string.
4020 * @param pArgs Pointer to the format arguments.
4021 * @thread EMT
4022 */
4023DECLCALLBACK(void) vmR3SetErrorUV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list *pArgs)
4024{
4025 /*
4026 * Log the error.
4027 */
4028 va_list va3;
4029 va_copy(va3, *pArgs);
4030 RTLogRelPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
4031 "VMSetError: %N\n",
4032 pszFile, iLine, pszFunction, rc,
4033 pszFormat, &va3);
4034 va_end(va3);
4035
4036#ifdef LOG_ENABLED
4037 va_copy(va3, *pArgs);
4038 RTLogPrintf("VMSetError: %s(%d) %s; rc=%Rrc\n"
4039 "%N\n",
4040 pszFile, iLine, pszFunction, rc,
4041 pszFormat, &va3);
4042 va_end(va3);
4043#endif
4044
4045 /*
4046 * Make a copy of the message.
4047 */
4048 if (pUVM->pVM)
4049 vmSetErrorCopy(pUVM->pVM, rc, RT_SRC_POS_ARGS, pszFormat, *pArgs);
4050
4051 /*
4052 * Call the at error callbacks.
4053 */
4054 bool fCalledSomeone = false;
4055 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4056 ASMAtomicIncU32(&pUVM->vm.s.cErrors);
4057 for (PVMATERROR pCur = pUVM->vm.s.pAtError; pCur; pCur = pCur->pNext)
4058 {
4059 va_list va2;
4060 va_copy(va2, *pArgs);
4061 pCur->pfnAtError(pUVM, pCur->pvUser, rc, RT_SRC_POS_ARGS, pszFormat, va2);
4062 va_end(va2);
4063 fCalledSomeone = true;
4064 }
4065 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4066}
4067
4068
4069/**
4070 * Sets the error message.
4071 *
4072 * @returns rc. Meaning you can do:
4073 * @code
4074 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
4075 * @endcode
4076 * @param pUVM The user mode VM handle.
4077 * @param rc VBox status code.
4078 * @param SRC_POS Use RT_SRC_POS.
4079 * @param pszFormat Error message format string.
4080 * @param ... Error message arguments.
4081 * @thread Any
4082 */
4083VMMR3DECL(int) VMR3SetError(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, ...)
4084{
4085 va_list va;
4086 va_start(va, pszFormat);
4087 int rcRet = VMR3SetErrorV(pUVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
4088 va_end(va);
4089 return rcRet;
4090}
4091
4092
4093/**
4094 * Sets the error message.
4095 *
4096 * @returns rc. Meaning you can do:
4097 * @code
4098 * return VM_SET_ERROR_U(pUVM, VERR_OF_YOUR_CHOICE, "descriptive message");
4099 * @endcode
4100 * @param pUVM The user mode VM handle.
4101 * @param rc VBox status code.
4102 * @param SRC_POS Use RT_SRC_POS.
4103 * @param pszFormat Error message format string.
4104 * @param va Error message arguments.
4105 * @thread Any
4106 */
4107VMMR3DECL(int) VMR3SetErrorV(PUVM pUVM, int rc, RT_SRC_POS_DECL, const char *pszFormat, va_list va)
4108{
4109 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4110
4111 /* Take shortcut when called on EMT, skipping VM handle requirement + validation. */
4112 if (VMR3GetVMCPUThread(pUVM) != NIL_RTTHREAD)
4113 {
4114 va_list vaCopy;
4115 va_copy(vaCopy, va);
4116 vmR3SetErrorUV(pUVM, rc, RT_SRC_POS_ARGS, pszFormat, &vaCopy);
4117 va_end(vaCopy);
4118 return rc;
4119 }
4120
4121 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
4122 return VMSetErrorV(pUVM->pVM, rc, pszFile, iLine, pszFunction, pszFormat, va);
4123}
4124
4125
4126
4127/**
4128 * Registers a VM runtime error callback.
4129 *
4130 * @returns VBox status code.
4131 * @param pUVM The user mode VM structure.
4132 * @param pfnAtRuntimeError Pointer to callback.
4133 * @param pvUser User argument.
4134 * @thread Any.
4135 */
4136VMMR3DECL(int) VMR3AtRuntimeErrorRegister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
4137{
4138 LogFlow(("VMR3AtRuntimeErrorRegister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
4139
4140 /*
4141 * Validate input.
4142 */
4143 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
4144 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4145
4146 /*
4147 * Allocate a new record.
4148 */
4149 PVMATRUNTIMEERROR pNew = (PVMATRUNTIMEERROR)MMR3HeapAllocU(pUVM, MM_TAG_VM, sizeof(*pNew));
4150 if (!pNew)
4151 return VERR_NO_MEMORY;
4152
4153 /* fill */
4154 pNew->pfnAtRuntimeError = pfnAtRuntimeError;
4155 pNew->pvUser = pvUser;
4156
4157 /* insert */
4158 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4159 pNew->pNext = *pUVM->vm.s.ppAtRuntimeErrorNext;
4160 *pUVM->vm.s.ppAtRuntimeErrorNext = pNew;
4161 pUVM->vm.s.ppAtRuntimeErrorNext = &pNew->pNext;
4162 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4163
4164 return VINF_SUCCESS;
4165}
4166
4167
4168/**
4169 * Deregisters a VM runtime error callback.
4170 *
4171 * @returns VBox status code.
4172 * @param pUVM The user mode VM handle.
4173 * @param pfnAtRuntimeError Pointer to callback.
4174 * @param pvUser User argument.
4175 * @thread Any.
4176 */
4177VMMR3DECL(int) VMR3AtRuntimeErrorDeregister(PUVM pUVM, PFNVMATRUNTIMEERROR pfnAtRuntimeError, void *pvUser)
4178{
4179 LogFlow(("VMR3AtRuntimeErrorDeregister: pfnAtRuntimeError=%p pvUser=%p\n", pfnAtRuntimeError, pvUser));
4180
4181 /*
4182 * Validate input.
4183 */
4184 AssertPtrReturn(pfnAtRuntimeError, VERR_INVALID_PARAMETER);
4185 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4186
4187 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4188
4189 /*
4190 * Search the list for the entry.
4191 */
4192 PVMATRUNTIMEERROR pPrev = NULL;
4193 PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError;
4194 while ( pCur
4195 && ( pCur->pfnAtRuntimeError != pfnAtRuntimeError
4196 || pCur->pvUser != pvUser))
4197 {
4198 pPrev = pCur;
4199 pCur = pCur->pNext;
4200 }
4201 if (!pCur)
4202 {
4203 AssertMsgFailed(("pfnAtRuntimeError=%p was not found\n", pfnAtRuntimeError));
4204 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4205 return VERR_FILE_NOT_FOUND;
4206 }
4207
4208 /*
4209 * Unlink it.
4210 */
4211 if (pPrev)
4212 {
4213 pPrev->pNext = pCur->pNext;
4214 if (!pCur->pNext)
4215 pUVM->vm.s.ppAtRuntimeErrorNext = &pPrev->pNext;
4216 }
4217 else
4218 {
4219 pUVM->vm.s.pAtRuntimeError = pCur->pNext;
4220 if (!pCur->pNext)
4221 pUVM->vm.s.ppAtRuntimeErrorNext = &pUVM->vm.s.pAtRuntimeError;
4222 }
4223
4224 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4225
4226 /*
4227 * Free it.
4228 */
4229 pCur->pfnAtRuntimeError = NULL;
4230 pCur->pNext = NULL;
4231 MMR3HeapFree(pCur);
4232
4233 return VINF_SUCCESS;
4234}
4235
4236
4237/**
4238 * EMT rendezvous worker that vmR3SetRuntimeErrorCommon uses to safely change
4239 * the state to FatalError(LS).
4240 *
4241 * @returns VERR_VM_INVALID_VM_STATE or VINF_EM_SUSPEND. (This is a strict
4242 * return code, see FNVMMEMTRENDEZVOUS.)
4243 *
4244 * @param pVM The cross context VM structure.
4245 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4246 * @param pvUser Ignored.
4247 */
4248static DECLCALLBACK(VBOXSTRICTRC) vmR3SetRuntimeErrorChangeState(PVM pVM, PVMCPU pVCpu, void *pvUser)
4249{
4250 NOREF(pVCpu);
4251 Assert(!pvUser); NOREF(pvUser);
4252
4253 /*
4254 * The first EMT thru here changes the state.
4255 */
4256 if (pVCpu->idCpu == pVM->cCpus - 1)
4257 {
4258 int rc = vmR3TrySetState(pVM, "VMSetRuntimeError", 2,
4259 VMSTATE_FATAL_ERROR, VMSTATE_RUNNING,
4260 VMSTATE_FATAL_ERROR_LS, VMSTATE_RUNNING_LS);
4261 if (RT_FAILURE(rc))
4262 return rc;
4263 if (rc == 2)
4264 SSMR3Cancel(pVM->pUVM);
4265
4266 VM_FF_SET(pVM, VM_FF_CHECK_VM_STATE);
4267 }
4268
4269 /* This'll make sure we get out of whereever we are (e.g. REM). */
4270 return VINF_EM_SUSPEND;
4271}
4272
4273
4274/**
4275 * Worker for VMR3SetRuntimeErrorWorker and vmR3SetRuntimeErrorV.
4276 *
4277 * This does the common parts after the error has been saved / retrieved.
4278 *
4279 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4280 *
4281 * @param pVM The cross context VM structure.
4282 * @param fFlags The error flags.
4283 * @param pszErrorId Error ID string.
4284 * @param pszFormat Format string.
4285 * @param pVa Pointer to the format arguments.
4286 */
4287static int vmR3SetRuntimeErrorCommon(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4288{
4289 LogRel(("VM: Raising runtime error '%s' (fFlags=%#x)\n", pszErrorId, fFlags));
4290 PUVM pUVM = pVM->pUVM;
4291
4292 /*
4293 * Take actions before the call.
4294 */
4295 int rc;
4296 if (fFlags & VMSETRTERR_FLAGS_FATAL)
4297 rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING | VMMEMTRENDEZVOUS_FLAGS_STOP_ON_ERROR,
4298 vmR3SetRuntimeErrorChangeState, NULL);
4299 else if (fFlags & VMSETRTERR_FLAGS_SUSPEND)
4300 rc = VMR3Suspend(pUVM, VMSUSPENDREASON_RUNTIME_ERROR);
4301 else
4302 rc = VINF_SUCCESS;
4303
4304 /*
4305 * Do the callback round.
4306 */
4307 RTCritSectEnter(&pUVM->vm.s.AtErrorCritSect);
4308 ASMAtomicIncU32(&pUVM->vm.s.cRuntimeErrors);
4309 for (PVMATRUNTIMEERROR pCur = pUVM->vm.s.pAtRuntimeError; pCur; pCur = pCur->pNext)
4310 {
4311 va_list va;
4312 va_copy(va, *pVa);
4313 pCur->pfnAtRuntimeError(pUVM, pCur->pvUser, fFlags, pszErrorId, pszFormat, va);
4314 va_end(va);
4315 }
4316 RTCritSectLeave(&pUVM->vm.s.AtErrorCritSect);
4317
4318 return rc;
4319}
4320
4321
4322/**
4323 * Ellipsis to va_list wrapper for calling vmR3SetRuntimeErrorCommon.
4324 */
4325static int vmR3SetRuntimeErrorCommonF(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, ...)
4326{
4327 va_list va;
4328 va_start(va, pszFormat);
4329 int rc = vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, &va);
4330 va_end(va);
4331 return rc;
4332}
4333
4334
4335/**
4336 * This is a worker function for RC and Ring-0 calls to VMSetError and
4337 * VMSetErrorV.
4338 *
4339 * The message is found in VMINT.
4340 *
4341 * @returns VBox status code, see VMSetRuntimeError.
4342 * @param pVM The cross context VM structure.
4343 * @thread EMT.
4344 */
4345VMMR3_INT_DECL(int) VMR3SetRuntimeErrorWorker(PVM pVM)
4346{
4347 VM_ASSERT_EMT(pVM);
4348 AssertReleaseMsgFailed(("And we have a winner! You get to implement Ring-0 and GC VMSetRuntimeErrorV! Congrats!\n"));
4349
4350 /*
4351 * Unpack the error (if we managed to format one).
4352 */
4353 const char *pszErrorId = "SetRuntimeError";
4354 const char *pszMessage = "No message!";
4355 uint32_t fFlags = VMSETRTERR_FLAGS_FATAL;
4356 PVMRUNTIMEERROR pErr = pVM->vm.s.pRuntimeErrorR3;
4357 if (pErr)
4358 {
4359 AssertCompile(sizeof(const char) == sizeof(uint8_t));
4360 if (pErr->offErrorId)
4361 pszErrorId = (const char *)pErr + pErr->offErrorId;
4362 if (pErr->offMessage)
4363 pszMessage = (const char *)pErr + pErr->offMessage;
4364 fFlags = pErr->fFlags;
4365 }
4366
4367 /*
4368 * Join cause with vmR3SetRuntimeErrorV.
4369 */
4370 return vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4371}
4372
4373
4374/**
4375 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4376 *
4377 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4378 *
4379 * @param pVM The cross context VM structure.
4380 * @param fFlags The error flags.
4381 * @param pszErrorId Error ID string.
4382 * @param pszMessage The error message residing the MM heap.
4383 *
4384 * @thread EMT
4385 */
4386DECLCALLBACK(int) vmR3SetRuntimeError(PVM pVM, uint32_t fFlags, const char *pszErrorId, char *pszMessage)
4387{
4388#if 0 /** @todo make copy of the error msg. */
4389 /*
4390 * Make a copy of the message.
4391 */
4392 va_list va2;
4393 va_copy(va2, *pVa);
4394 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4395 va_end(va2);
4396#endif
4397
4398 /*
4399 * Join paths with VMR3SetRuntimeErrorWorker.
4400 */
4401 int rc = vmR3SetRuntimeErrorCommonF(pVM, fFlags, pszErrorId, "%s", pszMessage);
4402 MMR3HeapFree(pszMessage);
4403 return rc;
4404}
4405
4406
4407/**
4408 * Worker for VMSetRuntimeErrorV for doing the job on EMT in ring-3.
4409 *
4410 * @returns VBox status code with modifications, see VMSetRuntimeErrorV.
4411 *
4412 * @param pVM The cross context VM structure.
4413 * @param fFlags The error flags.
4414 * @param pszErrorId Error ID string.
4415 * @param pszFormat Format string.
4416 * @param pVa Pointer to the format arguments.
4417 *
4418 * @thread EMT
4419 */
4420DECLCALLBACK(int) vmR3SetRuntimeErrorV(PVM pVM, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list *pVa)
4421{
4422 /*
4423 * Make a copy of the message.
4424 */
4425 va_list va2;
4426 va_copy(va2, *pVa);
4427 vmSetRuntimeErrorCopy(pVM, fFlags, pszErrorId, pszFormat, va2);
4428 va_end(va2);
4429
4430 /*
4431 * Join paths with VMR3SetRuntimeErrorWorker.
4432 */
4433 return vmR3SetRuntimeErrorCommon(pVM, fFlags, pszErrorId, pszFormat, pVa);
4434}
4435
4436
4437/**
4438 * Gets the number of runtime errors raised via VMR3SetRuntimeError.
4439 *
4440 * This can be used avoid double error messages.
4441 *
4442 * @returns The runtime error count.
4443 * @param pUVM The user mode VM handle.
4444 */
4445VMMR3_INT_DECL(uint32_t) VMR3GetRuntimeErrorCount(PUVM pUVM)
4446{
4447 return pUVM->vm.s.cRuntimeErrors;
4448}
4449
4450
4451/**
4452 * Gets the ID virtual of the virtual CPU associated with the calling thread.
4453 *
4454 * @returns The CPU ID. NIL_VMCPUID if the thread isn't an EMT.
4455 *
4456 * @param pVM The cross context VM structure.
4457 */
4458VMMR3_INT_DECL(RTCPUID) VMR3GetVMCPUId(PVM pVM)
4459{
4460 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4461 return pUVCpu
4462 ? pUVCpu->idCpu
4463 : NIL_VMCPUID;
4464}
4465
4466
4467/**
4468 * Checks if the VM is long-mode (64-bit) capable or not.
4469 * @returns true if VM can operate in long-mode, false
4470 * otherwise.
4471 *
4472 * @param pVM The cross context VM structure.
4473 */
4474VMMR3_INT_DECL(bool) VMR3IsLongModeAllowed(PVM pVM)
4475{
4476 if (HMIsEnabled(pVM))
4477 return HMIsLongModeAllowed(pVM);
4478 return false;
4479}
4480
4481
4482/**
4483 * Returns the native handle of the current EMT VMCPU thread.
4484 *
4485 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4486 * @param pVM The cross context VM structure.
4487 * @thread EMT
4488 */
4489VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThread(PVM pVM)
4490{
4491 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pVM->pUVM->vm.s.idxTLS);
4492
4493 if (!pUVCpu)
4494 return NIL_RTNATIVETHREAD;
4495
4496 return pUVCpu->vm.s.NativeThreadEMT;
4497}
4498
4499
4500/**
4501 * Returns the native handle of the current EMT VMCPU thread.
4502 *
4503 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4504 * @param pUVM The user mode VM structure.
4505 * @thread EMT
4506 */
4507VMMR3DECL(RTNATIVETHREAD) VMR3GetVMCPUNativeThreadU(PUVM pUVM)
4508{
4509 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4510
4511 if (!pUVCpu)
4512 return NIL_RTNATIVETHREAD;
4513
4514 return pUVCpu->vm.s.NativeThreadEMT;
4515}
4516
4517
4518/**
4519 * Returns the handle of the current EMT VMCPU thread.
4520 *
4521 * @returns Handle if this is an EMT thread; NIL_RTNATIVETHREAD otherwise
4522 * @param pUVM The user mode VM handle.
4523 * @thread EMT
4524 */
4525VMMR3DECL(RTTHREAD) VMR3GetVMCPUThread(PUVM pUVM)
4526{
4527 PUVMCPU pUVCpu = (PUVMCPU)RTTlsGet(pUVM->vm.s.idxTLS);
4528
4529 if (!pUVCpu)
4530 return NIL_RTTHREAD;
4531
4532 return pUVCpu->vm.s.ThreadEMT;
4533}
4534
4535
4536/**
4537 * Return the package and core ID of a CPU.
4538 *
4539 * @returns VBOX status code.
4540 * @param pUVM The user mode VM handle.
4541 * @param idCpu Virtual CPU to get the ID from.
4542 * @param pidCpuCore Where to store the core ID of the virtual CPU.
4543 * @param pidCpuPackage Where to store the package ID of the virtual CPU.
4544 *
4545 */
4546VMMR3DECL(int) VMR3GetCpuCoreAndPackageIdFromCpuId(PUVM pUVM, VMCPUID idCpu, uint32_t *pidCpuCore, uint32_t *pidCpuPackage)
4547{
4548 /*
4549 * Validate input.
4550 */
4551 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4552 PVM pVM = pUVM->pVM;
4553 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4554 AssertPtrReturn(pidCpuCore, VERR_INVALID_POINTER);
4555 AssertPtrReturn(pidCpuPackage, VERR_INVALID_POINTER);
4556 if (idCpu >= pVM->cCpus)
4557 return VERR_INVALID_CPU_ID;
4558
4559 /*
4560 * Set return values.
4561 */
4562#ifdef VBOX_WITH_MULTI_CORE
4563 *pidCpuCore = idCpu;
4564 *pidCpuPackage = 0;
4565#else
4566 *pidCpuCore = 0;
4567 *pidCpuPackage = idCpu;
4568#endif
4569
4570 return VINF_SUCCESS;
4571}
4572
4573
4574/**
4575 * Worker for VMR3HotUnplugCpu.
4576 *
4577 * @returns VINF_EM_WAIT_SPIP (strict status code).
4578 * @param pVM The cross context VM structure.
4579 * @param idCpu The current CPU.
4580 */
4581static DECLCALLBACK(int) vmR3HotUnplugCpu(PVM pVM, VMCPUID idCpu)
4582{
4583 PVMCPU pVCpu = VMMGetCpuById(pVM, idCpu);
4584 VMCPU_ASSERT_EMT(pVCpu);
4585
4586 /*
4587 * Reset per CPU resources.
4588 *
4589 * Actually only needed for VT-x because the CPU seems to be still in some
4590 * paged mode and startup fails after a new hot plug event. SVM works fine
4591 * even without this.
4592 */
4593 Log(("vmR3HotUnplugCpu for VCPU %u\n", idCpu));
4594 PGMR3ResetCpu(pVM, pVCpu);
4595 PDMR3ResetCpu(pVCpu);
4596 TRPMR3ResetCpu(pVCpu);
4597 CPUMR3ResetCpu(pVM, pVCpu);
4598 EMR3ResetCpu(pVCpu);
4599 HMR3ResetCpu(pVCpu);
4600 return VINF_EM_WAIT_SIPI;
4601}
4602
4603
4604/**
4605 * Hot-unplugs a CPU from the guest.
4606 *
4607 * @returns VBox status code.
4608 * @param pUVM The user mode VM handle.
4609 * @param idCpu Virtual CPU to perform the hot unplugging operation on.
4610 */
4611VMMR3DECL(int) VMR3HotUnplugCpu(PUVM pUVM, VMCPUID idCpu)
4612{
4613 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4614 PVM pVM = pUVM->pVM;
4615 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4616 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4617
4618 /** @todo r=bird: Don't destroy the EMT, it'll break VMMR3EmtRendezvous and
4619 * broadcast requests. Just note down somewhere that the CPU is
4620 * offline and send it to SPIP wait. Maybe modify VMCPUSTATE and push
4621 * it out of the EM loops when offline. */
4622 return VMR3ReqCallNoWaitU(pUVM, idCpu, (PFNRT)vmR3HotUnplugCpu, 2, pVM, idCpu);
4623}
4624
4625
4626/**
4627 * Hot-plugs a CPU on the guest.
4628 *
4629 * @returns VBox status code.
4630 * @param pUVM The user mode VM handle.
4631 * @param idCpu Virtual CPU to perform the hot plugging operation on.
4632 */
4633VMMR3DECL(int) VMR3HotPlugCpu(PUVM pUVM, VMCPUID idCpu)
4634{
4635 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4636 PVM pVM = pUVM->pVM;
4637 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4638 AssertReturn(idCpu < pVM->cCpus, VERR_INVALID_CPU_ID);
4639
4640 /** @todo r-bird: Just mark it online and make sure it waits on SPIP. */
4641 return VINF_SUCCESS;
4642}
4643
4644
4645/**
4646 * Changes the VMM execution cap.
4647 *
4648 * @returns VBox status code.
4649 * @param pUVM The user mode VM structure.
4650 * @param uCpuExecutionCap New CPU execution cap in precent, 1-100. Where
4651 * 100 is max performance (default).
4652 */
4653VMMR3DECL(int) VMR3SetCpuExecutionCap(PUVM pUVM, uint32_t uCpuExecutionCap)
4654{
4655 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4656 PVM pVM = pUVM->pVM;
4657 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4658 AssertReturn(uCpuExecutionCap > 0 && uCpuExecutionCap <= 100, VERR_INVALID_PARAMETER);
4659
4660 Log(("VMR3SetCpuExecutionCap: new priority = %d\n", uCpuExecutionCap));
4661 /* Note: not called from EMT. */
4662 pVM->uCpuExecutionCap = uCpuExecutionCap;
4663 return VINF_SUCCESS;
4664}
4665
4666
4667/**
4668 * Control whether the VM should power off when resetting.
4669 *
4670 * @returns VBox status code.
4671 * @param pUVM The user mode VM handle.
4672 * @param fPowerOffInsteadOfReset Flag whether the VM should power off when
4673 * resetting.
4674 */
4675VMMR3DECL(int) VMR3SetPowerOffInsteadOfReset(PUVM pUVM, bool fPowerOffInsteadOfReset)
4676{
4677 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
4678 PVM pVM = pUVM->pVM;
4679 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4680
4681 /* Note: not called from EMT. */
4682 pVM->vm.s.fPowerOffInsteadOfReset = fPowerOffInsteadOfReset;
4683 return VINF_SUCCESS;
4684}
4685
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette