VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 80346

最後變更 在這個檔案從80346是 80346,由 vboxsync 提交於 6 年 前

VMM,PciRaw: Eliminate duplicate PGVM/PVMCC and PGVMCPU/PVMCPUCC parameters in ring-0 code. butref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 94.3 KB
 
1/* $Id: VMMR0.cpp 80346 2019-08-19 19:36:29Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else if (a_pGVM) \
113 { \
114 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 else \
120 { \
121 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
122 a_BadExpr; \
123 } \
124 } \
125 } while (0)
126#else
127# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
128# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
129# define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures)
130#endif
131
132
133/*********************************************************************************************************************************
134* Internal Functions *
135*********************************************************************************************************************************/
136RT_C_DECLS_BEGIN
137#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
138extern uint64_t __udivdi3(uint64_t, uint64_t);
139extern uint64_t __umoddi3(uint64_t, uint64_t);
140#endif
141RT_C_DECLS_END
142
143
144/*********************************************************************************************************************************
145* Global Variables *
146*********************************************************************************************************************************/
147/** Drag in necessary library bits.
148 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
149PFNRT g_VMMR0Deps[] =
150{
151 (PFNRT)RTCrc32,
152 (PFNRT)RTOnce,
153#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
154 (PFNRT)__udivdi3,
155 (PFNRT)__umoddi3,
156#endif
157 NULL
158};
159
160#ifdef RT_OS_SOLARIS
161/* Dependency information for the native solaris loader. */
162extern "C" { char _depends_on[] = "vboxdrv"; }
163#endif
164
165/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
166int g_rcRawModeUsability = VINF_SUCCESS;
167
168
169/**
170 * Initialize the module.
171 * This is called when we're first loaded.
172 *
173 * @returns 0 on success.
174 * @returns VBox status on failure.
175 * @param hMod Image handle for use in APIs.
176 */
177DECLEXPORT(int) ModuleInit(void *hMod)
178{
179 VMM_CHECK_SMAP_SETUP();
180 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
181
182#ifdef VBOX_WITH_DTRACE_R0
183 /*
184 * The first thing to do is register the static tracepoints.
185 * (Deregistration is automatic.)
186 */
187 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
188 if (RT_FAILURE(rc2))
189 return rc2;
190#endif
191 LogFlow(("ModuleInit:\n"));
192
193#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
194 /*
195 * Display the CMOS debug code.
196 */
197 ASMOutU8(0x72, 0x03);
198 uint8_t bDebugCode = ASMInU8(0x73);
199 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
200 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
201#endif
202
203 /*
204 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
205 */
206 int rc = vmmInitFormatTypes();
207 if (RT_SUCCESS(rc))
208 {
209 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
210 rc = GVMMR0Init();
211 if (RT_SUCCESS(rc))
212 {
213 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
214 rc = GMMR0Init();
215 if (RT_SUCCESS(rc))
216 {
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = HMR0Init();
219 if (RT_SUCCESS(rc))
220 {
221 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
222 rc = PGMRegisterStringFormatTypes();
223 if (RT_SUCCESS(rc))
224 {
225 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
226#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
227 rc = PGMR0DynMapInit();
228#endif
229 if (RT_SUCCESS(rc))
230 {
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232 rc = IntNetR0Init();
233 if (RT_SUCCESS(rc))
234 {
235#ifdef VBOX_WITH_PCI_PASSTHROUGH
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237 rc = PciRawR0Init();
238#endif
239 if (RT_SUCCESS(rc))
240 {
241 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
242 rc = CPUMR0ModuleInit();
243 if (RT_SUCCESS(rc))
244 {
245#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
246 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
247 rc = vmmR0TripleFaultHackInit();
248 if (RT_SUCCESS(rc))
249#endif
250 {
251 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
252 if (RT_SUCCESS(rc))
253 {
254 g_rcRawModeUsability = SUPR0GetRawModeUsability();
255 if (g_rcRawModeUsability != VINF_SUCCESS)
256 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
257 g_rcRawModeUsability);
258 LogFlow(("ModuleInit: returns success\n"));
259 return VINF_SUCCESS;
260 }
261 }
262
263 /*
264 * Bail out.
265 */
266#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
267 vmmR0TripleFaultHackTerm();
268#endif
269 }
270 else
271 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
272#ifdef VBOX_WITH_PCI_PASSTHROUGH
273 PciRawR0Term();
274#endif
275 }
276 else
277 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
278 IntNetR0Term();
279 }
280 else
281 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
282#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
283 PGMR0DynMapTerm();
284#endif
285 }
286 else
287 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
288 PGMDeregisterStringFormatTypes();
289 }
290 else
291 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
292 HMR0Term();
293 }
294 else
295 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
296 GMMR0Term();
297 }
298 else
299 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
300 GVMMR0Term();
301 }
302 else
303 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
304 vmmTermFormatTypes();
305 }
306 else
307 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
308
309 LogFlow(("ModuleInit: failed %Rrc\n", rc));
310 return rc;
311}
312
313
314/**
315 * Terminate the module.
316 * This is called when we're finally unloaded.
317 *
318 * @param hMod Image handle for use in APIs.
319 */
320DECLEXPORT(void) ModuleTerm(void *hMod)
321{
322 NOREF(hMod);
323 LogFlow(("ModuleTerm:\n"));
324
325 /*
326 * Terminate the CPUM module (Local APIC cleanup).
327 */
328 CPUMR0ModuleTerm();
329
330 /*
331 * Terminate the internal network service.
332 */
333 IntNetR0Term();
334
335 /*
336 * PGM (Darwin), HM and PciRaw global cleanup.
337 */
338#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
339 PGMR0DynMapTerm();
340#endif
341#ifdef VBOX_WITH_PCI_PASSTHROUGH
342 PciRawR0Term();
343#endif
344 PGMDeregisterStringFormatTypes();
345 HMR0Term();
346#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
347 vmmR0TripleFaultHackTerm();
348#endif
349
350 /*
351 * Destroy the GMM and GVMM instances.
352 */
353 GMMR0Term();
354 GVMMR0Term();
355
356 vmmTermFormatTypes();
357
358 LogFlow(("ModuleTerm: returns\n"));
359}
360
361
362/**
363 * Initiates the R0 driver for a particular VM instance.
364 *
365 * @returns VBox status code.
366 *
367 * @param pGVM The global (ring-0) VM structure.
368 * @param uSvnRev The SVN revision of the ring-3 part.
369 * @param uBuildType Build type indicator.
370 * @thread EMT(0)
371 */
372static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType)
373{
374 VMM_CHECK_SMAP_SETUP();
375 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
376
377 /*
378 * Match the SVN revisions and build type.
379 */
380 if (uSvnRev != VMMGetSvnRev())
381 {
382 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
383 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
384 return VERR_VMM_R0_VERSION_MISMATCH;
385 }
386 if (uBuildType != vmmGetBuildType())
387 {
388 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
389 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
390 return VERR_VMM_R0_VERSION_MISMATCH;
391 }
392
393 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/);
394 if (RT_FAILURE(rc))
395 return rc;
396
397#ifdef LOG_ENABLED
398 /*
399 * Register the EMT R0 logger instance for VCPU 0.
400 */
401 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM);
402
403 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
404 if (pR0Logger)
405 {
406# if 0 /* testing of the logger. */
407 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
408 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
409 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
410 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
411
412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
413 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
414 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
415 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
416
417 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
418 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
419 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
420 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
421
422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
423 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
424 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
425 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
426 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession);
427 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
428
429 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
430 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
431
432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
433 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
434 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
435# endif
436 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession));
437 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
438 pR0Logger->fRegistered = true;
439 }
440#endif /* LOG_ENABLED */
441
442 /*
443 * Check if the host supports high resolution timers or not.
444 */
445 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers
446 && !RTTimerCanDoHighResolution())
447 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false;
448
449 /*
450 * Initialize the per VM data for GVMM and GMM.
451 */
452 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
453 rc = GVMMR0InitVM(pGVM);
454 if (RT_SUCCESS(rc))
455 {
456 /*
457 * Init HM, CPUM and PGM (Darwin only).
458 */
459 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
460 rc = HMR0InitVM(pGVM);
461 if (RT_SUCCESS(rc))
462 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
463 if (RT_SUCCESS(rc))
464 {
465 rc = CPUMR0InitVM(pGVM);
466 if (RT_SUCCESS(rc))
467 {
468 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
469#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
470 rc = PGMR0DynMapInitVM(pGVM);
471#endif
472 if (RT_SUCCESS(rc))
473 {
474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
475 rc = EMR0InitVM(pGVM);
476 if (RT_SUCCESS(rc))
477 {
478 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
479#ifdef VBOX_WITH_PCI_PASSTHROUGH
480 rc = PciRawR0InitVM(pGVM);
481#endif
482 if (RT_SUCCESS(rc))
483 {
484 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
485 rc = GIMR0InitVM(pGVM);
486 if (RT_SUCCESS(rc))
487 {
488 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION);
489 if (RT_SUCCESS(rc))
490 {
491 GVMMR0DoneInitVM(pGVM);
492
493 /*
494 * Collect a bit of info for the VM release log.
495 */
496 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
497 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
498
499 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
500 return rc;
501 }
502
503 /* bail out*/
504 GIMR0TermVM(pGVM);
505 }
506#ifdef VBOX_WITH_PCI_PASSTHROUGH
507 PciRawR0TermVM(pGVM);
508#endif
509 }
510 }
511 }
512 }
513 HMR0TermVM(pGVM);
514 }
515 }
516
517 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
518 return rc;
519}
520
521
522/**
523 * Does EMT specific VM initialization.
524 *
525 * @returns VBox status code.
526 * @param pGVM The ring-0 VM structure.
527 * @param idCpu The EMT that's calling.
528 */
529static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu)
530{
531 /* Paranoia (caller checked these already). */
532 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
533 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
534
535#ifdef LOG_ENABLED
536 /*
537 * Registration of ring 0 loggers.
538 */
539 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
540 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
541 if ( pR0Logger
542 && !pR0Logger->fRegistered)
543 {
544 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession);
545 pR0Logger->fRegistered = true;
546 }
547#endif
548
549 return VINF_SUCCESS;
550}
551
552
553
554/**
555 * Terminates the R0 bits for a particular VM instance.
556 *
557 * This is normally called by ring-3 as part of the VM termination process, but
558 * may alternatively be called during the support driver session cleanup when
559 * the VM object is destroyed (see GVMM).
560 *
561 * @returns VBox status code.
562 *
563 * @param pGVM The global (ring-0) VM structure.
564 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
565 * thread.
566 * @thread EMT(0) or session clean up thread.
567 */
568VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu)
569{
570 /*
571 * Check EMT(0) claim if we're called from userland.
572 */
573 if (idCpu != NIL_VMCPUID)
574 {
575 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
576 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu);
577 if (RT_FAILURE(rc))
578 return rc;
579 }
580
581#ifdef VBOX_WITH_PCI_PASSTHROUGH
582 PciRawR0TermVM(pGVM);
583#endif
584
585 /*
586 * Tell GVMM what we're up to and check that we only do this once.
587 */
588 if (GVMMR0DoingTermVM(pGVM))
589 {
590 GIMR0TermVM(pGVM);
591
592 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu])
593 * here to make sure we don't leak any shared pages if we crash... */
594#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
595 PGMR0DynMapTermVM(pGVM);
596#endif
597 HMR0TermVM(pGVM);
598 }
599
600 /*
601 * Deregister the logger.
602 */
603 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession);
604 return VINF_SUCCESS;
605}
606
607
608/**
609 * An interrupt or unhalt force flag is set, deal with it.
610 *
611 * @returns VINF_SUCCESS (or VINF_EM_HALT).
612 * @param pVCpu The cross context virtual CPU structure.
613 * @param uMWait Result from EMMonitorWaitIsActive().
614 * @param enmInterruptibility Guest CPU interruptbility level.
615 */
616static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
617{
618 Assert(!TRPMHasTrap(pVCpu));
619 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
620 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
621
622 /*
623 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
624 */
625 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
626 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
627 {
628 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
629 {
630 uint8_t u8Interrupt = 0;
631 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
632 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
633 if (RT_SUCCESS(rc))
634 {
635 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
636
637 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
638 AssertRCSuccess(rc);
639 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
640 return rc;
641 }
642 }
643 }
644 /*
645 * SMI is not implemented yet, at least not here.
646 */
647 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
648 {
649 return VINF_EM_HALT;
650 }
651 /*
652 * NMI.
653 */
654 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
655 {
656 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
657 {
658 /** @todo later. */
659 return VINF_EM_HALT;
660 }
661 }
662 /*
663 * Nested-guest virtual interrupt.
664 */
665 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
666 {
667 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
668 {
669 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
670 * here before injecting the virtual interrupt. See emR3ForcedActions
671 * for details. */
672 return VINF_EM_HALT;
673 }
674 }
675
676 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
677 {
678 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
679 return VINF_SUCCESS;
680 }
681 if (uMWait > 1)
682 {
683 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
684 return VINF_SUCCESS;
685 }
686
687 return VINF_EM_HALT;
688}
689
690
691/**
692 * This does one round of vmR3HaltGlobal1Halt().
693 *
694 * The rational here is that we'll reduce latency in interrupt situations if we
695 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
696 * MWAIT), but do one round of blocking here instead and hope the interrupt is
697 * raised in the meanwhile.
698 *
699 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
700 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
701 * ring-0 call (unless we're too close to a timer event). When the interrupt
702 * wakes us up, we'll return from ring-0 and EM will by instinct do a
703 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
704 * back to VMMR0EntryFast().
705 *
706 * @returns VINF_SUCCESS or VINF_EM_HALT.
707 * @param pGVM The ring-0 VM structure.
708 * @param pGVCpu The ring-0 virtual CPU structure.
709 *
710 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
711 * the VM module, probably to VMM. Then this would be more weird wrt
712 * parameters and statistics.
713 */
714static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu)
715{
716 /*
717 * Do spin stat historization.
718 */
719 if (++pGVCpu->vmm.s.cR0Halts & 0xff)
720 { /* likely */ }
721 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3)
722 {
723 pGVCpu->vmm.s.cR0HaltsSucceeded = 2;
724 pGVCpu->vmm.s.cR0HaltsToRing3 = 0;
725 }
726 else
727 {
728 pGVCpu->vmm.s.cR0HaltsSucceeded = 0;
729 pGVCpu->vmm.s.cR0HaltsToRing3 = 2;
730 }
731
732 /*
733 * Flags that makes us go to ring-3.
734 */
735 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
736 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
737 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
738 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
739 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
740 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
741 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
742 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
743
744 /*
745 * Check preconditions.
746 */
747 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu);
748 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu);
749 if ( pGVCpu->vmm.s.fMayHaltInRing0
750 && !TRPMHasTrap(pGVCpu)
751 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
752 || uMWait > 1))
753 {
754 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
755 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
756 {
757 /*
758 * Interrupts pending already?
759 */
760 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
761 APICUpdatePendingInterrupts(pGVCpu);
762
763 /*
764 * Flags that wake up from the halted state.
765 */
766 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
767 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
768
769 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
770 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
771 ASMNopPause();
772
773 /*
774 * Check out how long till the next timer event.
775 */
776 uint64_t u64Delta;
777 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta);
778
779 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
780 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
781 {
782 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
783 APICUpdatePendingInterrupts(pGVCpu);
784
785 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
786 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
787
788 /*
789 * Wait if there is enough time to the next timer event.
790 */
791 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold)
792 {
793 /* If there are few other CPU cores around, we will procrastinate a
794 little before going to sleep, hoping for some device raising an
795 interrupt or similar. Though, the best thing here would be to
796 dynamically adjust the spin count according to its usfulness or
797 something... */
798 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3
799 && RTMpGetOnlineCount() >= 4)
800 {
801 /** @todo Figure out how we can skip this if it hasn't help recently...
802 * @bugref{9172#c12} */
803 uint32_t cSpinLoops = 42;
804 while (cSpinLoops-- > 0)
805 {
806 ASMNopPause();
807 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
808 APICUpdatePendingInterrupts(pGVCpu);
809 ASMNopPause();
810 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs))
811 {
812 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
813 return VINF_EM_HALT;
814 }
815 ASMNopPause();
816 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
817 {
818 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin);
819 return VINF_EM_HALT;
820 }
821 ASMNopPause();
822 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
823 {
824 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin);
825 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
826 }
827 ASMNopPause();
828 }
829 }
830
831 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
832 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
833 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
834 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
835 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime);
836 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
837 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
838 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
839 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
840 if ( rc == VINF_SUCCESS
841 || rc == VERR_INTERRUPTED)
842
843 {
844 /* Keep some stats like ring-3 does. */
845 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
846 if (cNsOverslept > 50000)
847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
848 else if (cNsOverslept < -50000)
849 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
850 else
851 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
852
853 /*
854 * Recheck whether we can resume execution or have to go to ring-3.
855 */
856 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs)
857 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs))
858 {
859 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC))
860 APICUpdatePendingInterrupts(pGVCpu);
861 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask))
862 {
863 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock);
864 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility);
865 }
866 }
867 }
868 }
869 }
870 }
871 }
872 return VINF_EM_HALT;
873}
874
875
876/**
877 * VMM ring-0 thread-context callback.
878 *
879 * This does common HM state updating and calls the HM-specific thread-context
880 * callback.
881 *
882 * @param enmEvent The thread-context event.
883 * @param pvUser Opaque pointer to the VMCPU.
884 *
885 * @thread EMT(pvUser)
886 */
887static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
888{
889 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
890
891 switch (enmEvent)
892 {
893 case RTTHREADCTXEVENT_IN:
894 {
895 /*
896 * Linux may call us with preemption enabled (really!) but technically we
897 * cannot get preempted here, otherwise we end up in an infinite recursion
898 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
899 * ad infinitum). Let's just disable preemption for now...
900 */
901 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
902 * preemption after doing the callout (one or two functions up the
903 * call chain). */
904 /** @todo r=ramshankar: See @bugref{5313#c30}. */
905 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
906 RTThreadPreemptDisable(&ParanoidPreemptState);
907
908 /* We need to update the VCPU <-> host CPU mapping. */
909 RTCPUID idHostCpu;
910 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
911 pVCpu->iHostCpuSet = iHostCpuSet;
912 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
913
914 /* In the very unlikely event that the GIP delta for the CPU we're
915 rescheduled needs calculating, try force a return to ring-3.
916 We unfortunately cannot do the measurements right here. */
917 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
918 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
919
920 /* Invoke the HM-specific thread-context callback. */
921 HMR0ThreadCtxCallback(enmEvent, pvUser);
922
923 /* Restore preemption. */
924 RTThreadPreemptRestore(&ParanoidPreemptState);
925 break;
926 }
927
928 case RTTHREADCTXEVENT_OUT:
929 {
930 /* Invoke the HM-specific thread-context callback. */
931 HMR0ThreadCtxCallback(enmEvent, pvUser);
932
933 /*
934 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
935 * have the same host CPU associated with it.
936 */
937 pVCpu->iHostCpuSet = UINT32_MAX;
938 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
939 break;
940 }
941
942 default:
943 /* Invoke the HM-specific thread-context callback. */
944 HMR0ThreadCtxCallback(enmEvent, pvUser);
945 break;
946 }
947}
948
949
950/**
951 * Creates thread switching hook for the current EMT thread.
952 *
953 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
954 * platform does not implement switcher hooks, no hooks will be create and the
955 * member set to NIL_RTTHREADCTXHOOK.
956 *
957 * @returns VBox status code.
958 * @param pVCpu The cross context virtual CPU structure.
959 * @thread EMT(pVCpu)
960 */
961VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
962{
963 VMCPU_ASSERT_EMT(pVCpu);
964 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
965
966#if 1 /* To disable this stuff change to zero. */
967 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
968 if (RT_SUCCESS(rc))
969 return rc;
970#else
971 RT_NOREF(vmmR0ThreadCtxCallback);
972 int rc = VERR_NOT_SUPPORTED;
973#endif
974
975 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
976 if (rc == VERR_NOT_SUPPORTED)
977 return VINF_SUCCESS;
978
979 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
980 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
981}
982
983
984/**
985 * Destroys the thread switching hook for the specified VCPU.
986 *
987 * @param pVCpu The cross context virtual CPU structure.
988 * @remarks Can be called from any thread.
989 */
990VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
991{
992 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
993 AssertRC(rc);
994 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
995}
996
997
998/**
999 * Disables the thread switching hook for this VCPU (if we got one).
1000 *
1001 * @param pVCpu The cross context virtual CPU structure.
1002 * @thread EMT(pVCpu)
1003 *
1004 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1005 * this call. This means you have to be careful with what you do!
1006 */
1007VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1008{
1009 /*
1010 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1011 * @bugref{7726#c19} explains the need for this trick:
1012 *
1013 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1014 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1015 * longjmp & normal return to ring-3, which opens a window where we may be
1016 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1017 * the CPU starts executing a different EMT. Both functions first disables
1018 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1019 * an opening for getting preempted.
1020 */
1021 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1022 * all the time. */
1023 /** @todo move this into the context hook disabling if(). */
1024 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1025
1026 /*
1027 * Disable the context hook, if we got one.
1028 */
1029 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1030 {
1031 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1032 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1033 AssertRC(rc);
1034 }
1035}
1036
1037
1038/**
1039 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1040 *
1041 * @returns true if registered, false otherwise.
1042 * @param pVCpu The cross context virtual CPU structure.
1043 */
1044DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1045{
1046 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1047}
1048
1049
1050/**
1051 * Whether thread-context hooks are registered for this VCPU.
1052 *
1053 * @returns true if registered, false otherwise.
1054 * @param pVCpu The cross context virtual CPU structure.
1055 */
1056VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1057{
1058 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1059}
1060
1061
1062#ifdef VBOX_WITH_STATISTICS
1063/**
1064 * Record return code statistics
1065 * @param pGVM The cross context VM structure.
1066 * @param pVCpu The cross context virtual CPU structure.
1067 * @param rc The status code.
1068 */
1069static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1070{
1071 /*
1072 * Collect statistics.
1073 */
1074 switch (rc)
1075 {
1076 case VINF_SUCCESS:
1077 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1078 break;
1079 case VINF_EM_RAW_INTERRUPT:
1080 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1081 break;
1082 case VINF_EM_RAW_INTERRUPT_HYPER:
1083 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1084 break;
1085 case VINF_EM_RAW_GUEST_TRAP:
1086 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1087 break;
1088 case VINF_EM_RAW_RING_SWITCH:
1089 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1090 break;
1091 case VINF_EM_RAW_RING_SWITCH_INT:
1092 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1093 break;
1094 case VINF_EM_RAW_STALE_SELECTOR:
1095 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1096 break;
1097 case VINF_EM_RAW_IRET_TRAP:
1098 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1099 break;
1100 case VINF_IOM_R3_IOPORT_READ:
1101 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1102 break;
1103 case VINF_IOM_R3_IOPORT_WRITE:
1104 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1105 break;
1106 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1107 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1108 break;
1109 case VINF_IOM_R3_MMIO_READ:
1110 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1111 break;
1112 case VINF_IOM_R3_MMIO_WRITE:
1113 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1114 break;
1115 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1116 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1117 break;
1118 case VINF_IOM_R3_MMIO_READ_WRITE:
1119 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1120 break;
1121 case VINF_PATM_HC_MMIO_PATCH_READ:
1122 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1123 break;
1124 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1125 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1126 break;
1127 case VINF_CPUM_R3_MSR_READ:
1128 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1129 break;
1130 case VINF_CPUM_R3_MSR_WRITE:
1131 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1132 break;
1133 case VINF_EM_RAW_EMULATE_INSTR:
1134 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1135 break;
1136 case VINF_PATCH_EMULATE_INSTR:
1137 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1138 break;
1139 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1140 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1141 break;
1142 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1143 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1144 break;
1145 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1146 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1147 break;
1148 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1149 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1150 break;
1151 case VINF_CSAM_PENDING_ACTION:
1152 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1153 break;
1154 case VINF_PGM_SYNC_CR3:
1155 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1156 break;
1157 case VINF_PATM_PATCH_INT3:
1158 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1159 break;
1160 case VINF_PATM_PATCH_TRAP_PF:
1161 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1162 break;
1163 case VINF_PATM_PATCH_TRAP_GP:
1164 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1165 break;
1166 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1167 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1168 break;
1169 case VINF_EM_RESCHEDULE_REM:
1170 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1171 break;
1172 case VINF_EM_RAW_TO_R3:
1173 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1174 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1176 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1177 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1178 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1179 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1180 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1181 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1182 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1183 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1184 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1185 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1186 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1187 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1188 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1189 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1190 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1191 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1192 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1194 else
1195 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1196 break;
1197
1198 case VINF_EM_RAW_TIMER_PENDING:
1199 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1200 break;
1201 case VINF_EM_RAW_INTERRUPT_PENDING:
1202 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1203 break;
1204 case VINF_VMM_CALL_HOST:
1205 switch (pVCpu->vmm.s.enmCallRing3Operation)
1206 {
1207 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1208 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1209 break;
1210 case VMMCALLRING3_PDM_LOCK:
1211 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1212 break;
1213 case VMMCALLRING3_PGM_POOL_GROW:
1214 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1215 break;
1216 case VMMCALLRING3_PGM_LOCK:
1217 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1218 break;
1219 case VMMCALLRING3_PGM_MAP_CHUNK:
1220 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1221 break;
1222 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1223 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1224 break;
1225 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1226 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1227 break;
1228 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1229 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1230 break;
1231 case VMMCALLRING3_VM_SET_ERROR:
1232 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1233 break;
1234 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1235 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1236 break;
1237 case VMMCALLRING3_VM_R0_ASSERTION:
1238 default:
1239 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1240 break;
1241 }
1242 break;
1243 case VINF_PATM_DUPLICATE_FUNCTION:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1245 break;
1246 case VINF_PGM_CHANGE_MODE:
1247 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1248 break;
1249 case VINF_PGM_POOL_FLUSH_PENDING:
1250 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1251 break;
1252 case VINF_EM_PENDING_REQUEST:
1253 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1254 break;
1255 case VINF_EM_HM_PATCH_TPR_INSTR:
1256 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1257 break;
1258 default:
1259 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1260 break;
1261 }
1262}
1263#endif /* VBOX_WITH_STATISTICS */
1264
1265
1266/**
1267 * The Ring 0 entry point, called by the fast-ioctl path.
1268 *
1269 * @param pGVM The global (ring-0) VM structure.
1270 * @param pVMIgnored The cross context VM structure. The return code is
1271 * stored in pVM->vmm.s.iLastGZRc.
1272 * @param idCpu The Virtual CPU ID of the calling EMT.
1273 * @param enmOperation Which operation to execute.
1274 * @remarks Assume called with interrupts _enabled_.
1275 */
1276VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1277{
1278 RT_NOREF(pVMIgnored);
1279
1280 /*
1281 * Validation.
1282 */
1283 if ( idCpu < pGVM->cCpus
1284 && pGVM->cCpus == pGVM->cCpusUnsafe)
1285 { /*likely*/ }
1286 else
1287 {
1288 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe);
1289 return;
1290 }
1291
1292 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1293 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1294 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1295 && pGVCpu->hNativeThreadR0 == hNativeThread))
1296 { /* likely */ }
1297 else
1298 {
1299 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n",
1300 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0);
1301 return;
1302 }
1303
1304 /*
1305 * SMAP fun.
1306 */
1307 VMM_CHECK_SMAP_SETUP();
1308 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1309
1310 /*
1311 * Perform requested operation.
1312 */
1313 switch (enmOperation)
1314 {
1315 /*
1316 * Run guest code using the available hardware acceleration technology.
1317 */
1318 case VMMR0_DO_HM_RUN:
1319 {
1320 for (;;) /* hlt loop */
1321 {
1322 /*
1323 * Disable preemption.
1324 */
1325 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu));
1326 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1327 RTThreadPreemptDisable(&PreemptState);
1328
1329 /*
1330 * Get the host CPU identifiers, make sure they are valid and that
1331 * we've got a TSC delta for the CPU.
1332 */
1333 RTCPUID idHostCpu;
1334 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1335 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1336 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1337 {
1338 pGVCpu->iHostCpuSet = iHostCpuSet;
1339 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu);
1340
1341 /*
1342 * Update the periodic preemption timer if it's active.
1343 */
1344 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers)
1345 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu));
1346 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1347
1348#ifdef VMM_R0_TOUCH_FPU
1349 /*
1350 * Make sure we've got the FPU state loaded so and we don't need to clear
1351 * CR0.TS and get out of sync with the host kernel when loading the guest
1352 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1353 */
1354 CPUMR0TouchHostFpu();
1355#endif
1356 int rc;
1357 bool fPreemptRestored = false;
1358 if (!HMR0SuspendPending())
1359 {
1360 /*
1361 * Enable the context switching hook.
1362 */
1363 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1364 {
1365 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook));
1366 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1367 }
1368
1369 /*
1370 * Enter HM context.
1371 */
1372 rc = HMR0Enter(pGVCpu);
1373 if (RT_SUCCESS(rc))
1374 {
1375 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM);
1376
1377 /*
1378 * When preemption hooks are in place, enable preemption now that
1379 * we're in HM context.
1380 */
1381 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu))
1382 {
1383 fPreemptRestored = true;
1384 RTThreadPreemptRestore(&PreemptState);
1385 }
1386
1387 /*
1388 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1389 */
1390 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1391 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu);
1392 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1393
1394 /*
1395 * Assert sanity on the way out. Using manual assertions code here as normal
1396 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1397 */
1398 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM
1399 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1400 {
1401 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1402 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1403 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM);
1404 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1405 }
1406 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1407 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu)))
1408 {
1409 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1410 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2),
1411 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc);
1412 rc = VERR_INVALID_STATE;
1413 }
1414
1415 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED);
1416 }
1417 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1418
1419 /*
1420 * Invalidate the host CPU identifiers before we disable the context
1421 * hook / restore preemption.
1422 */
1423 pGVCpu->iHostCpuSet = UINT32_MAX;
1424 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1425
1426 /*
1427 * Disable context hooks. Due to unresolved cleanup issues, we
1428 * cannot leave the hooks enabled when we return to ring-3.
1429 *
1430 * Note! At the moment HM may also have disabled the hook
1431 * when we get here, but the IPRT API handles that.
1432 */
1433 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1434 {
1435 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1436 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook);
1437 }
1438 }
1439 /*
1440 * The system is about to go into suspend mode; go back to ring 3.
1441 */
1442 else
1443 {
1444 rc = VINF_EM_RAW_INTERRUPT;
1445 pGVCpu->iHostCpuSet = UINT32_MAX;
1446 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1447 }
1448
1449 /** @todo When HM stops messing with the context hook state, we'll disable
1450 * preemption again before the RTThreadCtxHookDisable call. */
1451 if (!fPreemptRestored)
1452 RTThreadPreemptRestore(&PreemptState);
1453
1454 pGVCpu->vmm.s.iLastGZRc = rc;
1455
1456 /* Fire dtrace probe and collect statistics. */
1457 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1458#ifdef VBOX_WITH_STATISTICS
1459 vmmR0RecordRC(pGVM, pGVCpu, rc);
1460#endif
1461#if 1
1462 /*
1463 * If this is a halt.
1464 */
1465 if (rc != VINF_EM_HALT)
1466 { /* we're not in a hurry for a HLT, so prefer this path */ }
1467 else
1468 {
1469 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu);
1470 if (rc == VINF_SUCCESS)
1471 {
1472 pGVCpu->vmm.s.cR0HaltsSucceeded++;
1473 continue;
1474 }
1475 pGVCpu->vmm.s.cR0HaltsToRing3++;
1476 }
1477#endif
1478 }
1479 /*
1480 * Invalid CPU set index or TSC delta in need of measuring.
1481 */
1482 else
1483 {
1484 pGVCpu->iHostCpuSet = UINT32_MAX;
1485 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID);
1486 RTThreadPreemptRestore(&PreemptState);
1487 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1488 {
1489 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1490 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1491 0 /*default cTries*/);
1492 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1493 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1494 else
1495 pGVCpu->vmm.s.iLastGZRc = rc;
1496 }
1497 else
1498 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1499 }
1500 break;
1501
1502 } /* halt loop. */
1503 break;
1504 }
1505
1506#ifdef VBOX_WITH_NEM_R0
1507# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1508 case VMMR0_DO_NEM_RUN:
1509 {
1510 /*
1511 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1512 */
1513 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1514# ifdef VBOXSTRICTRC_STRICT_ENABLED
1515 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu);
1516# else
1517 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1518# endif
1519 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1520 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC);
1521
1522 pGVCpu->vmm.s.iLastGZRc = rc;
1523
1524 /*
1525 * Fire dtrace probe and collect statistics.
1526 */
1527 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc);
1528# ifdef VBOX_WITH_STATISTICS
1529 vmmR0RecordRC(pGVM, pGVCpu, rc);
1530# endif
1531 break;
1532 }
1533# endif
1534#endif
1535
1536 /*
1537 * For profiling.
1538 */
1539 case VMMR0_DO_NOP:
1540 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1541 break;
1542
1543 /*
1544 * Shouldn't happen.
1545 */
1546 default:
1547 AssertMsgFailed(("%#x\n", enmOperation));
1548 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1549 break;
1550 }
1551 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1552}
1553
1554
1555/**
1556 * Validates a session or VM session argument.
1557 *
1558 * @returns true / false accordingly.
1559 * @param pGVM The global (ring-0) VM structure.
1560 * @param pClaimedSession The session claim to validate.
1561 * @param pSession The session argument.
1562 */
1563DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1564{
1565 /* This must be set! */
1566 if (!pSession)
1567 return false;
1568
1569 /* Only one out of the two. */
1570 if (pGVM && pClaimedSession)
1571 return false;
1572 if (pGVM)
1573 pClaimedSession = pGVM->pSession;
1574 return pClaimedSession == pSession;
1575}
1576
1577
1578/**
1579 * VMMR0EntryEx worker function, either called directly or when ever possible
1580 * called thru a longjmp so we can exit safely on failure.
1581 *
1582 * @returns VBox status code.
1583 * @param pGVM The global (ring-0) VM structure.
1584 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1585 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1586 * @param enmOperation Which operation to execute.
1587 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1588 * The support driver validates this if it's present.
1589 * @param u64Arg Some simple constant argument.
1590 * @param pSession The session of the caller.
1591 *
1592 * @remarks Assume called with interrupts _enabled_.
1593 */
1594static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1595 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1596{
1597 /*
1598 * Validate pGVM and idCpu for consistency and validity.
1599 */
1600 if (pGVM != NULL)
1601 {
1602 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0))
1603 { /* likely */ }
1604 else
1605 {
1606 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation);
1607 return VERR_INVALID_POINTER;
1608 }
1609
1610 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1611 { /* likely */ }
1612 else
1613 {
1614 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1615 return VERR_INVALID_PARAMETER;
1616 }
1617
1618 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING
1619 && pGVM->enmVMState <= VMSTATE_TERMINATED
1620 && pGVM->pSession == pSession
1621 && pGVM->pSelf == pGVM))
1622 { /* likely */ }
1623 else
1624 {
1625 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1626 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation);
1627 return VERR_INVALID_POINTER;
1628 }
1629 }
1630 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1631 { /* likely */ }
1632 else
1633 {
1634 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1635 return VERR_INVALID_PARAMETER;
1636 }
1637
1638 /*
1639 * SMAP fun.
1640 */
1641 VMM_CHECK_SMAP_SETUP();
1642 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1643
1644 /*
1645 * Process the request.
1646 */
1647 int rc;
1648 switch (enmOperation)
1649 {
1650 /*
1651 * GVM requests
1652 */
1653 case VMMR0_DO_GVMM_CREATE_VM:
1654 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1655 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1656 else
1657 rc = VERR_INVALID_PARAMETER;
1658 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1659 break;
1660
1661 case VMMR0_DO_GVMM_DESTROY_VM:
1662 if (pReqHdr == NULL && u64Arg == 0)
1663 rc = GVMMR0DestroyVM(pGVM);
1664 else
1665 rc = VERR_INVALID_PARAMETER;
1666 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1667 break;
1668
1669 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1670 if (pGVM != NULL)
1671 rc = GVMMR0RegisterVCpu(pGVM, idCpu);
1672 else
1673 rc = VERR_INVALID_PARAMETER;
1674 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1675 break;
1676
1677 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1678 if (pGVM != NULL)
1679 rc = GVMMR0DeregisterVCpu(pGVM, idCpu);
1680 else
1681 rc = VERR_INVALID_PARAMETER;
1682 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1683 break;
1684
1685 case VMMR0_DO_GVMM_SCHED_HALT:
1686 if (pReqHdr)
1687 return VERR_INVALID_PARAMETER;
1688 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1689 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg);
1690 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1691 break;
1692
1693 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1694 if (pReqHdr || u64Arg)
1695 return VERR_INVALID_PARAMETER;
1696 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1697 rc = GVMMR0SchedWakeUp(pGVM, idCpu);
1698 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1699 break;
1700
1701 case VMMR0_DO_GVMM_SCHED_POKE:
1702 if (pReqHdr || u64Arg)
1703 return VERR_INVALID_PARAMETER;
1704 rc = GVMMR0SchedPoke(pGVM, idCpu);
1705 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1706 break;
1707
1708 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1709 if (u64Arg)
1710 return VERR_INVALID_PARAMETER;
1711 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1712 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1713 break;
1714
1715 case VMMR0_DO_GVMM_SCHED_POLL:
1716 if (pReqHdr || u64Arg > 1)
1717 return VERR_INVALID_PARAMETER;
1718 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg);
1719 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1720 break;
1721
1722 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1723 if (u64Arg)
1724 return VERR_INVALID_PARAMETER;
1725 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1726 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1727 break;
1728
1729 case VMMR0_DO_GVMM_RESET_STATISTICS:
1730 if (u64Arg)
1731 return VERR_INVALID_PARAMETER;
1732 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1733 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1734 break;
1735
1736 /*
1737 * Initialize the R0 part of a VM instance.
1738 */
1739 case VMMR0_DO_VMMR0_INIT:
1740 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1741 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1742 break;
1743
1744 /*
1745 * Does EMT specific ring-0 init.
1746 */
1747 case VMMR0_DO_VMMR0_INIT_EMT:
1748 rc = vmmR0InitVMEmt(pGVM, idCpu);
1749 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1750 break;
1751
1752 /*
1753 * Terminate the R0 part of a VM instance.
1754 */
1755 case VMMR0_DO_VMMR0_TERM:
1756 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/);
1757 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1758 break;
1759
1760 /*
1761 * Attempt to enable hm mode and check the current setting.
1762 */
1763 case VMMR0_DO_HM_ENABLE:
1764 rc = HMR0EnableAllCpus(pGVM);
1765 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1766 break;
1767
1768 /*
1769 * Setup the hardware accelerated session.
1770 */
1771 case VMMR0_DO_HM_SETUP_VM:
1772 rc = HMR0SetupVM(pGVM);
1773 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1774 break;
1775
1776 /*
1777 * PGM wrappers.
1778 */
1779 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1780 if (idCpu == NIL_VMCPUID)
1781 return VERR_INVALID_CPU_ID;
1782 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu);
1783 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1784 break;
1785
1786 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1787 if (idCpu == NIL_VMCPUID)
1788 return VERR_INVALID_CPU_ID;
1789 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu);
1790 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1791 break;
1792
1793 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1794 if (idCpu == NIL_VMCPUID)
1795 return VERR_INVALID_CPU_ID;
1796 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu);
1797 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1798 break;
1799
1800 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1801 if (idCpu != 0)
1802 return VERR_INVALID_CPU_ID;
1803 rc = PGMR0PhysSetupIoMmu(pGVM);
1804 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1805 break;
1806
1807 /*
1808 * GMM wrappers.
1809 */
1810 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1811 if (u64Arg)
1812 return VERR_INVALID_PARAMETER;
1813 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1814 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1815 break;
1816
1817 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1818 if (u64Arg)
1819 return VERR_INVALID_PARAMETER;
1820 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1821 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1822 break;
1823
1824 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1825 if (u64Arg)
1826 return VERR_INVALID_PARAMETER;
1827 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1828 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1829 break;
1830
1831 case VMMR0_DO_GMM_FREE_PAGES:
1832 if (u64Arg)
1833 return VERR_INVALID_PARAMETER;
1834 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1835 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1836 break;
1837
1838 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1839 if (u64Arg)
1840 return VERR_INVALID_PARAMETER;
1841 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1842 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1843 break;
1844
1845 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1846 if (u64Arg)
1847 return VERR_INVALID_PARAMETER;
1848 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1849 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1850 break;
1851
1852 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1853 if (idCpu == NIL_VMCPUID)
1854 return VERR_INVALID_CPU_ID;
1855 if (u64Arg)
1856 return VERR_INVALID_PARAMETER;
1857 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1858 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1859 break;
1860
1861 case VMMR0_DO_GMM_BALLOONED_PAGES:
1862 if (u64Arg)
1863 return VERR_INVALID_PARAMETER;
1864 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1865 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1866 break;
1867
1868 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1869 if (u64Arg)
1870 return VERR_INVALID_PARAMETER;
1871 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1872 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1873 break;
1874
1875 case VMMR0_DO_GMM_SEED_CHUNK:
1876 if (pReqHdr)
1877 return VERR_INVALID_PARAMETER;
1878 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg);
1879 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1880 break;
1881
1882 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1883 if (idCpu == NIL_VMCPUID)
1884 return VERR_INVALID_CPU_ID;
1885 if (u64Arg)
1886 return VERR_INVALID_PARAMETER;
1887 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1889 break;
1890
1891 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1892 if (idCpu == NIL_VMCPUID)
1893 return VERR_INVALID_CPU_ID;
1894 if (u64Arg)
1895 return VERR_INVALID_PARAMETER;
1896 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1898 break;
1899
1900 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1901 if (idCpu == NIL_VMCPUID)
1902 return VERR_INVALID_CPU_ID;
1903 if ( u64Arg
1904 || pReqHdr)
1905 return VERR_INVALID_PARAMETER;
1906 rc = GMMR0ResetSharedModules(pGVM, idCpu);
1907 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1908 break;
1909
1910#ifdef VBOX_WITH_PAGE_SHARING
1911 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1912 {
1913 if (idCpu == NIL_VMCPUID)
1914 return VERR_INVALID_CPU_ID;
1915 if ( u64Arg
1916 || pReqHdr)
1917 return VERR_INVALID_PARAMETER;
1918 rc = GMMR0CheckSharedModules(pGVM, idCpu);
1919 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1920 break;
1921 }
1922#endif
1923
1924#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1925 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1926 if (u64Arg)
1927 return VERR_INVALID_PARAMETER;
1928 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1929 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1930 break;
1931#endif
1932
1933 case VMMR0_DO_GMM_QUERY_STATISTICS:
1934 if (u64Arg)
1935 return VERR_INVALID_PARAMETER;
1936 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1937 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1938 break;
1939
1940 case VMMR0_DO_GMM_RESET_STATISTICS:
1941 if (u64Arg)
1942 return VERR_INVALID_PARAMETER;
1943 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1944 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1945 break;
1946
1947 /*
1948 * A quick GCFGM mock-up.
1949 */
1950 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1951 case VMMR0_DO_GCFGM_SET_VALUE:
1952 case VMMR0_DO_GCFGM_QUERY_VALUE:
1953 {
1954 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1955 return VERR_INVALID_PARAMETER;
1956 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1957 if (pReq->Hdr.cbReq != sizeof(*pReq))
1958 return VERR_INVALID_PARAMETER;
1959 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1960 {
1961 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1962 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1963 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1964 }
1965 else
1966 {
1967 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1968 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1969 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1970 }
1971 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1972 break;
1973 }
1974
1975 /*
1976 * PDM Wrappers.
1977 */
1978 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1979 {
1980 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1981 return VERR_INVALID_PARAMETER;
1982 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1983 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1984 break;
1985 }
1986
1987 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1988 {
1989 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1990 return VERR_INVALID_PARAMETER;
1991 rc = PDMR0DeviceCallReqHandler(pGVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1992 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
1993 break;
1994 }
1995
1996 /*
1997 * Requests to the internal networking service.
1998 */
1999 case VMMR0_DO_INTNET_OPEN:
2000 {
2001 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2002 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2003 return VERR_INVALID_PARAMETER;
2004 rc = IntNetR0OpenReq(pSession, pReq);
2005 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2006 break;
2007 }
2008
2009 case VMMR0_DO_INTNET_IF_CLOSE:
2010 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2011 return VERR_INVALID_PARAMETER;
2012 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2013 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2014 break;
2015
2016
2017 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2018 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2019 return VERR_INVALID_PARAMETER;
2020 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2021 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2022 break;
2023
2024 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2025 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2026 return VERR_INVALID_PARAMETER;
2027 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2028 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2029 break;
2030
2031 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2032 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2033 return VERR_INVALID_PARAMETER;
2034 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2035 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2036 break;
2037
2038 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2039 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2040 return VERR_INVALID_PARAMETER;
2041 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2042 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2043 break;
2044
2045 case VMMR0_DO_INTNET_IF_SEND:
2046 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2047 return VERR_INVALID_PARAMETER;
2048 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2049 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2050 break;
2051
2052 case VMMR0_DO_INTNET_IF_WAIT:
2053 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2054 return VERR_INVALID_PARAMETER;
2055 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2056 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2057 break;
2058
2059 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2060 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2061 return VERR_INVALID_PARAMETER;
2062 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2064 break;
2065
2066#ifdef VBOX_WITH_PCI_PASSTHROUGH
2067 /*
2068 * Requests to host PCI driver service.
2069 */
2070 case VMMR0_DO_PCIRAW_REQ:
2071 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2072 return VERR_INVALID_PARAMETER;
2073 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2074 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2075 break;
2076#endif
2077
2078 /*
2079 * NEM requests.
2080 */
2081#ifdef VBOX_WITH_NEM_R0
2082# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2083 case VMMR0_DO_NEM_INIT_VM:
2084 if (u64Arg || pReqHdr || idCpu != 0)
2085 return VERR_INVALID_PARAMETER;
2086 rc = NEMR0InitVM(pGVM);
2087 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2088 break;
2089
2090 case VMMR0_DO_NEM_INIT_VM_PART_2:
2091 if (u64Arg || pReqHdr || idCpu != 0)
2092 return VERR_INVALID_PARAMETER;
2093 rc = NEMR0InitVMPart2(pGVM);
2094 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2095 break;
2096
2097 case VMMR0_DO_NEM_MAP_PAGES:
2098 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2099 return VERR_INVALID_PARAMETER;
2100 rc = NEMR0MapPages(pGVM, idCpu);
2101 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2102 break;
2103
2104 case VMMR0_DO_NEM_UNMAP_PAGES:
2105 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2106 return VERR_INVALID_PARAMETER;
2107 rc = NEMR0UnmapPages(pGVM, idCpu);
2108 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2109 break;
2110
2111 case VMMR0_DO_NEM_EXPORT_STATE:
2112 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2113 return VERR_INVALID_PARAMETER;
2114 rc = NEMR0ExportState(pGVM, idCpu);
2115 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2116 break;
2117
2118 case VMMR0_DO_NEM_IMPORT_STATE:
2119 if (pReqHdr || idCpu == NIL_VMCPUID)
2120 return VERR_INVALID_PARAMETER;
2121 rc = NEMR0ImportState(pGVM, idCpu, u64Arg);
2122 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2123 break;
2124
2125 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2126 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2127 return VERR_INVALID_PARAMETER;
2128 rc = NEMR0QueryCpuTick(pGVM, idCpu);
2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2130 break;
2131
2132 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2133 if (pReqHdr || idCpu == NIL_VMCPUID)
2134 return VERR_INVALID_PARAMETER;
2135 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg);
2136 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2137 break;
2138
2139 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2140 if (u64Arg || pReqHdr)
2141 return VERR_INVALID_PARAMETER;
2142 rc = NEMR0UpdateStatistics(pGVM, idCpu);
2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2144 break;
2145
2146# if 1 && defined(DEBUG_bird)
2147 case VMMR0_DO_NEM_EXPERIMENT:
2148 if (pReqHdr)
2149 return VERR_INVALID_PARAMETER;
2150 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg);
2151 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING);
2152 break;
2153# endif
2154# endif
2155#endif
2156
2157 /*
2158 * For profiling.
2159 */
2160 case VMMR0_DO_NOP:
2161 case VMMR0_DO_SLOW_NOP:
2162 return VINF_SUCCESS;
2163
2164 /*
2165 * For testing Ring-0 APIs invoked in this environment.
2166 */
2167 case VMMR0_DO_TESTS:
2168 /** @todo make new test */
2169 return VINF_SUCCESS;
2170
2171 default:
2172 /*
2173 * We're returning VERR_NOT_SUPPORT here so we've got something else
2174 * than -1 which the interrupt gate glue code might return.
2175 */
2176 Log(("operation %#x is not supported\n", enmOperation));
2177 return VERR_NOT_SUPPORTED;
2178 }
2179 return rc;
2180}
2181
2182
2183/**
2184 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2185 */
2186typedef struct VMMR0ENTRYEXARGS
2187{
2188 PGVM pGVM;
2189 VMCPUID idCpu;
2190 VMMR0OPERATION enmOperation;
2191 PSUPVMMR0REQHDR pReq;
2192 uint64_t u64Arg;
2193 PSUPDRVSESSION pSession;
2194} VMMR0ENTRYEXARGS;
2195/** Pointer to a vmmR0EntryExWrapper argument package. */
2196typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2197
2198/**
2199 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2200 *
2201 * @returns VBox status code.
2202 * @param pvArgs The argument package
2203 */
2204static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2205{
2206 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2207 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2208 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2209 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2210 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2211 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2212}
2213
2214
2215/**
2216 * The Ring 0 entry point, called by the support library (SUP).
2217 *
2218 * @returns VBox status code.
2219 * @param pGVM The global (ring-0) VM structure.
2220 * @param pVM The cross context VM structure.
2221 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2222 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2223 * @param enmOperation Which operation to execute.
2224 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2225 * @param u64Arg Some simple constant argument.
2226 * @param pSession The session of the caller.
2227 * @remarks Assume called with interrupts _enabled_.
2228 */
2229VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2230 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2231{
2232 /*
2233 * Requests that should only happen on the EMT thread will be
2234 * wrapped in a setjmp so we can assert without causing trouble.
2235 */
2236 if ( pVM != NULL
2237 && pGVM != NULL
2238 && pVM == pGVM /** @todo drop pGVM */
2239 && idCpu < pGVM->cCpus
2240 && pGVM->pSession == pSession
2241 && pGVM->pSelf == pVM)
2242 {
2243 switch (enmOperation)
2244 {
2245 /* These might/will be called before VMMR3Init. */
2246 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2247 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2248 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2249 case VMMR0_DO_GMM_FREE_PAGES:
2250 case VMMR0_DO_GMM_BALLOONED_PAGES:
2251 /* On the mac we might not have a valid jmp buf, so check these as well. */
2252 case VMMR0_DO_VMMR0_INIT:
2253 case VMMR0_DO_VMMR0_TERM:
2254 {
2255 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2256 PVMCPUCC pVCpu = pGVCpu;
2257 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2258 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2259 && pVCpu->hNativeThreadR0 == hNativeThread))
2260 {
2261 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2262 break;
2263
2264 /** @todo validate this EMT claim... GVM knows. */
2265 VMMR0ENTRYEXARGS Args;
2266 Args.pGVM = pGVM;
2267 Args.idCpu = idCpu;
2268 Args.enmOperation = enmOperation;
2269 Args.pReq = pReq;
2270 Args.u64Arg = u64Arg;
2271 Args.pSession = pSession;
2272 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2273 }
2274 return VERR_VM_THREAD_NOT_EMT;
2275 }
2276
2277 default:
2278 break;
2279 }
2280 }
2281 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2282}
2283
2284
2285/**
2286 * Checks whether we've armed the ring-0 long jump machinery.
2287 *
2288 * @returns @c true / @c false
2289 * @param pVCpu The cross context virtual CPU structure.
2290 * @thread EMT
2291 * @sa VMMIsLongJumpArmed
2292 */
2293VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2294{
2295#ifdef RT_ARCH_X86
2296 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2297 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2298#else
2299 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2300 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2301#endif
2302}
2303
2304
2305/**
2306 * Checks whether we've done a ring-3 long jump.
2307 *
2308 * @returns @c true / @c false
2309 * @param pVCpu The cross context virtual CPU structure.
2310 * @thread EMT
2311 */
2312VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2313{
2314 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2315}
2316
2317
2318/**
2319 * Internal R0 logger worker: Flush logger.
2320 *
2321 * @param pLogger The logger instance to flush.
2322 * @remark This function must be exported!
2323 */
2324VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2325{
2326#ifdef LOG_ENABLED
2327 /*
2328 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2329 * (This is a bit paranoid code.)
2330 */
2331 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2332 if ( !VALID_PTR(pR0Logger)
2333 || !VALID_PTR(pR0Logger + 1)
2334 || pLogger->u32Magic != RTLOGGER_MAGIC)
2335 {
2336# ifdef DEBUG
2337 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2338# endif
2339 return;
2340 }
2341 if (pR0Logger->fFlushingDisabled)
2342 return; /* quietly */
2343
2344 PVMCC pVM = pR0Logger->pVM;
2345 if ( !VALID_PTR(pVM)
2346 || pVM->pSelf != pVM)
2347 {
2348# ifdef DEBUG
2349 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2350# endif
2351 return;
2352 }
2353
2354 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2355 if (pVCpu)
2356 {
2357 /*
2358 * Check that the jump buffer is armed.
2359 */
2360# ifdef RT_ARCH_X86
2361 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2362 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2363# else
2364 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2365 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2366# endif
2367 {
2368# ifdef DEBUG
2369 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2370# endif
2371 return;
2372 }
2373 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2374 }
2375# ifdef DEBUG
2376 else
2377 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2378# endif
2379#else
2380 NOREF(pLogger);
2381#endif /* LOG_ENABLED */
2382}
2383
2384#ifdef LOG_ENABLED
2385
2386/**
2387 * Disables flushing of the ring-0 debug log.
2388 *
2389 * @param pVCpu The cross context virtual CPU structure.
2390 */
2391VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2392{
2393 if (pVCpu->vmm.s.pR0LoggerR0)
2394 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2395 if (pVCpu->vmm.s.pR0RelLoggerR0)
2396 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2397}
2398
2399
2400/**
2401 * Enables flushing of the ring-0 debug log.
2402 *
2403 * @param pVCpu The cross context virtual CPU structure.
2404 */
2405VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2406{
2407 if (pVCpu->vmm.s.pR0LoggerR0)
2408 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2409 if (pVCpu->vmm.s.pR0RelLoggerR0)
2410 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2411}
2412
2413
2414/**
2415 * Checks if log flushing is disabled or not.
2416 *
2417 * @param pVCpu The cross context virtual CPU structure.
2418 */
2419VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2420{
2421 if (pVCpu->vmm.s.pR0LoggerR0)
2422 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2423 if (pVCpu->vmm.s.pR0RelLoggerR0)
2424 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2425 return true;
2426}
2427
2428#endif /* LOG_ENABLED */
2429
2430/**
2431 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2432 */
2433DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2434{
2435 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2436 if (pGVCpu)
2437 {
2438 PVMCPUCC pVCpu = pGVCpu;
2439 if (RT_VALID_PTR(pVCpu))
2440 {
2441 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2442 if (RT_VALID_PTR(pVmmLogger))
2443 {
2444 if ( pVmmLogger->fCreated
2445 && pVmmLogger->pVM == pGVCpu->pGVM)
2446 {
2447 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2448 return NULL;
2449 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2450 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2451 if ( iGroup != UINT16_MAX
2452 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2453 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2454 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2455 return NULL;
2456 return &pVmmLogger->Logger;
2457 }
2458 }
2459 }
2460 }
2461 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2462}
2463
2464
2465/**
2466 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2467 *
2468 * @returns true if the breakpoint should be hit, false if it should be ignored.
2469 */
2470DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2471{
2472#if 0
2473 return true;
2474#else
2475 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2476 if (pVM)
2477 {
2478 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2479
2480 if (pVCpu)
2481 {
2482#ifdef RT_ARCH_X86
2483 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2484 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2485#else
2486 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2487 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2488#endif
2489 {
2490 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2491 return RT_FAILURE_NP(rc);
2492 }
2493 }
2494 }
2495#ifdef RT_OS_LINUX
2496 return true;
2497#else
2498 return false;
2499#endif
2500#endif
2501}
2502
2503
2504/**
2505 * Override this so we can push it up to ring-3.
2506 *
2507 * @param pszExpr Expression. Can be NULL.
2508 * @param uLine Location line number.
2509 * @param pszFile Location file name.
2510 * @param pszFunction Location function name.
2511 */
2512DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2513{
2514 /*
2515 * To the log.
2516 */
2517 LogAlways(("\n!!R0-Assertion Failed!!\n"
2518 "Expression: %s\n"
2519 "Location : %s(%d) %s\n",
2520 pszExpr, pszFile, uLine, pszFunction));
2521
2522 /*
2523 * To the global VMM buffer.
2524 */
2525 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2526 if (pVM)
2527 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2528 "\n!!R0-Assertion Failed!!\n"
2529 "Expression: %.*s\n"
2530 "Location : %s(%d) %s\n",
2531 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2532 pszFile, uLine, pszFunction);
2533
2534 /*
2535 * Continue the normal way.
2536 */
2537 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2538}
2539
2540
2541/**
2542 * Callback for RTLogFormatV which writes to the ring-3 log port.
2543 * See PFNLOGOUTPUT() for details.
2544 */
2545static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2546{
2547 for (size_t i = 0; i < cbChars; i++)
2548 {
2549 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2550 }
2551
2552 NOREF(pv);
2553 return cbChars;
2554}
2555
2556
2557/**
2558 * Override this so we can push it up to ring-3.
2559 *
2560 * @param pszFormat The format string.
2561 * @param va Arguments.
2562 */
2563DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2564{
2565 va_list vaCopy;
2566
2567 /*
2568 * Push the message to the loggers.
2569 */
2570 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2571 if (pLog)
2572 {
2573 va_copy(vaCopy, va);
2574 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2575 va_end(vaCopy);
2576 }
2577 pLog = RTLogRelGetDefaultInstance();
2578 if (pLog)
2579 {
2580 va_copy(vaCopy, va);
2581 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2582 va_end(vaCopy);
2583 }
2584
2585 /*
2586 * Push it to the global VMM buffer.
2587 */
2588 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2589 if (pVM)
2590 {
2591 va_copy(vaCopy, va);
2592 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2593 va_end(vaCopy);
2594 }
2595
2596 /*
2597 * Continue the normal way.
2598 */
2599 RTAssertMsg2V(pszFormat, va);
2600}
2601
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette