VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 80334

最後變更 在這個檔案從80334是 80334,由 vboxsync 提交於 6 年 前

VMM: Eliminating the VBOX_BUGREF_9217 preprocessor macro. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 95.0 KB
 
1/* $Id: VMMR0.cpp 80334 2019-08-17 00:43:24Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#ifdef VBOX_WITH_NEM_R0
30# include <VBox/vmm/nem.h>
31#endif
32#include <VBox/vmm/em.h>
33#include <VBox/vmm/stam.h>
34#include <VBox/vmm/tm.h>
35#include "VMMInternal.h"
36#include <VBox/vmm/vmcc.h>
37#include <VBox/vmm/gvm.h>
38#ifdef VBOX_WITH_PCI_PASSTHROUGH
39# include <VBox/vmm/pdmpci.h>
40#endif
41#include <VBox/vmm/apic.h>
42
43#include <VBox/vmm/gvmm.h>
44#include <VBox/vmm/gmm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/intnet.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/param.h>
49#include <VBox/err.h>
50#include <VBox/version.h>
51#include <VBox/log.h>
52
53#include <iprt/asm-amd64-x86.h>
54#include <iprt/assert.h>
55#include <iprt/crc.h>
56#include <iprt/mp.h>
57#include <iprt/once.h>
58#include <iprt/stdarg.h>
59#include <iprt/string.h>
60#include <iprt/thread.h>
61#include <iprt/timer.h>
62#include <iprt/time.h>
63
64#include "dtrace/VBoxVMM.h"
65
66
67#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
68# pragma intrinsic(_AddressOfReturnAddress)
69#endif
70
71#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
72# error "32-bit darwin is no longer supported. Go back to 4.3 or earlier!"
73#endif
74
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80/** @def VMM_CHECK_SMAP_SETUP
81 * SMAP check setup. */
82/** @def VMM_CHECK_SMAP_CHECK
83 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
84 * it will be logged and @a a_BadExpr is executed. */
85/** @def VMM_CHECK_SMAP_CHECK2
86 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
87 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
88 * executed. */
89#if defined(VBOX_STRICT) || 1
90# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
91# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
92 do { \
93 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
94 { \
95 RTCCUINTREG fEflCheck = ASMGetFlags(); \
96 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
97 { /* likely */ } \
98 else \
99 { \
100 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
101 a_BadExpr; \
102 } \
103 } \
104 } while (0)
105# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
106 do { \
107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
108 { \
109 RTCCUINTREG fEflCheck = ASMGetFlags(); \
110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
111 { /* likely */ } \
112 else \
113 { \
114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
117 a_BadExpr; \
118 } \
119 } \
120 } while (0)
121#else
122# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
123# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
124# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
125#endif
126
127
128/*********************************************************************************************************************************
129* Internal Functions *
130*********************************************************************************************************************************/
131RT_C_DECLS_BEGIN
132#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
133extern uint64_t __udivdi3(uint64_t, uint64_t);
134extern uint64_t __umoddi3(uint64_t, uint64_t);
135#endif
136RT_C_DECLS_END
137
138
139/*********************************************************************************************************************************
140* Global Variables *
141*********************************************************************************************************************************/
142/** Drag in necessary library bits.
143 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
144PFNRT g_VMMR0Deps[] =
145{
146 (PFNRT)RTCrc32,
147 (PFNRT)RTOnce,
148#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
149 (PFNRT)__udivdi3,
150 (PFNRT)__umoddi3,
151#endif
152 NULL
153};
154
155#ifdef RT_OS_SOLARIS
156/* Dependency information for the native solaris loader. */
157extern "C" { char _depends_on[] = "vboxdrv"; }
158#endif
159
160/** The result of SUPR0GetRawModeUsability(), set by ModuleInit(). */
161int g_rcRawModeUsability = VINF_SUCCESS;
162
163
164/**
165 * Initialize the module.
166 * This is called when we're first loaded.
167 *
168 * @returns 0 on success.
169 * @returns VBox status on failure.
170 * @param hMod Image handle for use in APIs.
171 */
172DECLEXPORT(int) ModuleInit(void *hMod)
173{
174 VMM_CHECK_SMAP_SETUP();
175 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
176
177#ifdef VBOX_WITH_DTRACE_R0
178 /*
179 * The first thing to do is register the static tracepoints.
180 * (Deregistration is automatic.)
181 */
182 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
183 if (RT_FAILURE(rc2))
184 return rc2;
185#endif
186 LogFlow(("ModuleInit:\n"));
187
188#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
189 /*
190 * Display the CMOS debug code.
191 */
192 ASMOutU8(0x72, 0x03);
193 uint8_t bDebugCode = ASMInU8(0x73);
194 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
195 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
196#endif
197
198 /*
199 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
200 */
201 int rc = vmmInitFormatTypes();
202 if (RT_SUCCESS(rc))
203 {
204 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
205 rc = GVMMR0Init();
206 if (RT_SUCCESS(rc))
207 {
208 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
209 rc = GMMR0Init();
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = HMR0Init();
214 if (RT_SUCCESS(rc))
215 {
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = PGMRegisterStringFormatTypes();
218 if (RT_SUCCESS(rc))
219 {
220 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
221#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
222 rc = PGMR0DynMapInit();
223#endif
224 if (RT_SUCCESS(rc))
225 {
226 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
227 rc = IntNetR0Init();
228 if (RT_SUCCESS(rc))
229 {
230#ifdef VBOX_WITH_PCI_PASSTHROUGH
231 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
232 rc = PciRawR0Init();
233#endif
234 if (RT_SUCCESS(rc))
235 {
236 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
237 rc = CPUMR0ModuleInit();
238 if (RT_SUCCESS(rc))
239 {
240#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
241 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
242 rc = vmmR0TripleFaultHackInit();
243 if (RT_SUCCESS(rc))
244#endif
245 {
246 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
247 if (RT_SUCCESS(rc))
248 {
249 g_rcRawModeUsability = SUPR0GetRawModeUsability();
250 if (g_rcRawModeUsability != VINF_SUCCESS)
251 SUPR0Printf("VMMR0!ModuleInit: SUPR0GetRawModeUsability -> %Rrc\n",
252 g_rcRawModeUsability);
253 LogFlow(("ModuleInit: returns success\n"));
254 return VINF_SUCCESS;
255 }
256 }
257
258 /*
259 * Bail out.
260 */
261#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
262 vmmR0TripleFaultHackTerm();
263#endif
264 }
265 else
266 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
267#ifdef VBOX_WITH_PCI_PASSTHROUGH
268 PciRawR0Term();
269#endif
270 }
271 else
272 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
273 IntNetR0Term();
274 }
275 else
276 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
277#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
278 PGMR0DynMapTerm();
279#endif
280 }
281 else
282 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
283 PGMDeregisterStringFormatTypes();
284 }
285 else
286 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
287 HMR0Term();
288 }
289 else
290 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
291 GMMR0Term();
292 }
293 else
294 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
295 GVMMR0Term();
296 }
297 else
298 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
299 vmmTermFormatTypes();
300 }
301 else
302 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
303
304 LogFlow(("ModuleInit: failed %Rrc\n", rc));
305 return rc;
306}
307
308
309/**
310 * Terminate the module.
311 * This is called when we're finally unloaded.
312 *
313 * @param hMod Image handle for use in APIs.
314 */
315DECLEXPORT(void) ModuleTerm(void *hMod)
316{
317 NOREF(hMod);
318 LogFlow(("ModuleTerm:\n"));
319
320 /*
321 * Terminate the CPUM module (Local APIC cleanup).
322 */
323 CPUMR0ModuleTerm();
324
325 /*
326 * Terminate the internal network service.
327 */
328 IntNetR0Term();
329
330 /*
331 * PGM (Darwin), HM and PciRaw global cleanup.
332 */
333#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
334 PGMR0DynMapTerm();
335#endif
336#ifdef VBOX_WITH_PCI_PASSTHROUGH
337 PciRawR0Term();
338#endif
339 PGMDeregisterStringFormatTypes();
340 HMR0Term();
341#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
342 vmmR0TripleFaultHackTerm();
343#endif
344
345 /*
346 * Destroy the GMM and GVMM instances.
347 */
348 GMMR0Term();
349 GVMMR0Term();
350
351 vmmTermFormatTypes();
352
353 LogFlow(("ModuleTerm: returns\n"));
354}
355
356
357/**
358 * Initiates the R0 driver for a particular VM instance.
359 *
360 * @returns VBox status code.
361 *
362 * @param pGVM The global (ring-0) VM structure.
363 * @param pVM The cross context VM structure.
364 * @param uSvnRev The SVN revision of the ring-3 part.
365 * @param uBuildType Build type indicator.
366 * @thread EMT(0)
367 */
368static int vmmR0InitVM(PGVM pGVM, PVMCC pVM, uint32_t uSvnRev, uint32_t uBuildType)
369{
370 VMM_CHECK_SMAP_SETUP();
371 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
372
373 /*
374 * Match the SVN revisions and build type.
375 */
376 if (uSvnRev != VMMGetSvnRev())
377 {
378 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
379 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
380 return VERR_VMM_R0_VERSION_MISMATCH;
381 }
382 if (uBuildType != vmmGetBuildType())
383 {
384 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
385 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
386 return VERR_VMM_R0_VERSION_MISMATCH;
387 }
388
389 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0 /*idCpu*/);
390 if (RT_FAILURE(rc))
391 return rc;
392
393#ifdef LOG_ENABLED
394 /*
395 * Register the EMT R0 logger instance for VCPU 0.
396 */
397 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pVM);
398
399 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
400 if (pR0Logger)
401 {
402# if 0 /* testing of the logger. */
403 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
404 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
405 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
406 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
407
408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
409 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
410 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
411 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
412
413 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
414 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
415 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
416 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
417
418 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
419 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
420 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
421 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
422 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
423 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
424
425 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
426 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
427
428 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
429 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
430 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
431# endif
432 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
433 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
434 pR0Logger->fRegistered = true;
435 }
436#endif /* LOG_ENABLED */
437
438 /*
439 * Check if the host supports high resolution timers or not.
440 */
441 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
442 && !RTTimerCanDoHighResolution())
443 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
444
445 /*
446 * Initialize the per VM data for GVMM and GMM.
447 */
448 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
449 rc = GVMMR0InitVM(pGVM);
450// if (RT_SUCCESS(rc))
451// rc = GMMR0InitPerVMData(pVM);
452 if (RT_SUCCESS(rc))
453 {
454 /*
455 * Init HM, CPUM and PGM (Darwin only).
456 */
457 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
458 rc = HMR0InitVM(pVM);
459 if (RT_SUCCESS(rc))
460 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
461 if (RT_SUCCESS(rc))
462 {
463 rc = CPUMR0InitVM(pVM);
464 if (RT_SUCCESS(rc))
465 {
466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
467#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
468 rc = PGMR0DynMapInitVM(pVM);
469#endif
470 if (RT_SUCCESS(rc))
471 {
472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
473 rc = EMR0InitVM(pGVM);
474 if (RT_SUCCESS(rc))
475 {
476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
477#ifdef VBOX_WITH_PCI_PASSTHROUGH
478 rc = PciRawR0InitVM(pGVM, pVM);
479#endif
480 if (RT_SUCCESS(rc))
481 {
482 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
483 rc = GIMR0InitVM(pVM);
484 if (RT_SUCCESS(rc))
485 {
486 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
487 if (RT_SUCCESS(rc))
488 {
489 GVMMR0DoneInitVM(pGVM);
490
491 /*
492 * Collect a bit of info for the VM release log.
493 */
494 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();
495 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;
496
497 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
498 return rc;
499 }
500
501 /* bail out*/
502 GIMR0TermVM(pVM);
503 }
504#ifdef VBOX_WITH_PCI_PASSTHROUGH
505 PciRawR0TermVM(pGVM, pVM);
506#endif
507 }
508 }
509 }
510 }
511 HMR0TermVM(pVM);
512 }
513 }
514
515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
516 return rc;
517}
518
519
520/**
521 * Does EMT specific VM initialization.
522 *
523 * @returns VBox status code.
524 * @param pGVM The ring-0 VM structure.
525 * @param pVM The cross context VM structure.
526 * @param idCpu The EMT that's calling.
527 */
528static int vmmR0InitVMEmt(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
529{
530 /* Paranoia (caller checked these already). */
531 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_CPU_ID);
532 AssertReturn(pGVM->aCpus[idCpu].hEMT == RTThreadNativeSelf(), VERR_INVALID_CPU_ID);
533
534#ifdef LOG_ENABLED
535 /*
536 * Registration of ring 0 loggers.
537 */
538 PVMCPUCC pVCpu = &pGVM->aCpus[idCpu];
539 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
540 if ( pR0Logger
541 && !pR0Logger->fRegistered)
542 {
543 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
544 pR0Logger->fRegistered = true;
545 }
546#endif
547 RT_NOREF(pVM);
548
549 return VINF_SUCCESS;
550}
551
552
553
554/**
555 * Terminates the R0 bits for a particular VM instance.
556 *
557 * This is normally called by ring-3 as part of the VM termination process, but
558 * may alternatively be called during the support driver session cleanup when
559 * the VM object is destroyed (see GVMM).
560 *
561 * @returns VBox status code.
562 *
563 * @param pGVM The global (ring-0) VM structure.
564 * @param pVM The cross context VM structure.
565 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup
566 * thread.
567 * @thread EMT(0) or session clean up thread.
568 */
569VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
570{
571 /*
572 * Check EMT(0) claim if we're called from userland.
573 */
574 if (idCpu != NIL_VMCPUID)
575 {
576 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID);
577 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
578 if (RT_FAILURE(rc))
579 return rc;
580 }
581
582#ifdef VBOX_WITH_PCI_PASSTHROUGH
583 PciRawR0TermVM(pGVM, pVM);
584#endif
585
586 /*
587 * Tell GVMM what we're up to and check that we only do this once.
588 */
589 if (GVMMR0DoingTermVM(pGVM))
590 {
591 GIMR0TermVM(pVM);
592
593 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
594 * here to make sure we don't leak any shared pages if we crash... */
595#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
596 PGMR0DynMapTermVM(pVM);
597#endif
598 HMR0TermVM(pVM);
599 }
600
601 /*
602 * Deregister the logger.
603 */
604 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
605 return VINF_SUCCESS;
606}
607
608
609/**
610 * An interrupt or unhalt force flag is set, deal with it.
611 *
612 * @returns VINF_SUCCESS (or VINF_EM_HALT).
613 * @param pVCpu The cross context virtual CPU structure.
614 * @param uMWait Result from EMMonitorWaitIsActive().
615 * @param enmInterruptibility Guest CPU interruptbility level.
616 */
617static int vmmR0DoHaltInterrupt(PVMCPUCC pVCpu, unsigned uMWait, CPUMINTERRUPTIBILITY enmInterruptibility)
618{
619 Assert(!TRPMHasTrap(pVCpu));
620 Assert( enmInterruptibility > CPUMINTERRUPTIBILITY_INVALID
621 && enmInterruptibility < CPUMINTERRUPTIBILITY_END);
622
623 /*
624 * Pending interrupts w/o any SMIs or NMIs? That the usual case.
625 */
626 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
627 && !VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_INTERRUPT_NMI))
628 {
629 if (enmInterruptibility <= CPUMINTERRUPTIBILITY_UNRESTRAINED)
630 {
631 uint8_t u8Interrupt = 0;
632 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
633 Log(("vmmR0DoHaltInterrupt: CPU%d u8Interrupt=%d (%#x) rc=%Rrc\n", pVCpu->idCpu, u8Interrupt, u8Interrupt, rc));
634 if (RT_SUCCESS(rc))
635 {
636 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
637
638 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
639 AssertRCSuccess(rc);
640 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
641 return rc;
642 }
643 }
644 }
645 /*
646 * SMI is not implemented yet, at least not here.
647 */
648 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_SMI))
649 {
650 return VINF_EM_HALT;
651 }
652 /*
653 * NMI.
654 */
655 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))
656 {
657 if (enmInterruptibility < CPUMINTERRUPTIBILITY_NMI_INHIBIT)
658 {
659 /** @todo later. */
660 return VINF_EM_HALT;
661 }
662 }
663 /*
664 * Nested-guest virtual interrupt.
665 */
666 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
667 {
668 if (enmInterruptibility < CPUMINTERRUPTIBILITY_VIRT_INT_DISABLED)
669 {
670 /** @todo NSTVMX: NSTSVM: Remember, we might have to check and perform VM-exits
671 * here before injecting the virtual interrupt. See emR3ForcedActions
672 * for details. */
673 return VINF_EM_HALT;
674 }
675 }
676
677 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UNHALT))
678 {
679 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
680 return VINF_SUCCESS;
681 }
682 if (uMWait > 1)
683 {
684 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExec);
685 return VINF_SUCCESS;
686 }
687
688 return VINF_EM_HALT;
689}
690
691
692/**
693 * This does one round of vmR3HaltGlobal1Halt().
694 *
695 * The rational here is that we'll reduce latency in interrupt situations if we
696 * don't go to ring-3 immediately on a VINF_EM_HALT (guest executed HLT or
697 * MWAIT), but do one round of blocking here instead and hope the interrupt is
698 * raised in the meanwhile.
699 *
700 * If we go to ring-3 we'll quit the inner HM/NEM loop in EM and end up in the
701 * outer loop, which will then call VMR3WaitHalted() and that in turn will do a
702 * ring-0 call (unless we're too close to a timer event). When the interrupt
703 * wakes us up, we'll return from ring-0 and EM will by instinct do a
704 * rescheduling (because of raw-mode) before it resumes the HM/NEM loop and gets
705 * back to VMMR0EntryFast().
706 *
707 * @returns VINF_SUCCESS or VINF_EM_HALT.
708 * @param pGVM The ring-0 VM structure.
709 * @param pVM The cross context VM structure.
710 * @param pGVCpu The ring-0 virtual CPU structure.
711 * @param pVCpu The cross context virtual CPU structure.
712 *
713 * @todo r=bird: All the blocking/waiting and EMT managment should move out of
714 * the VM module, probably to VMM. Then this would be more weird wrt
715 * parameters and statistics.
716 */
717static int vmmR0DoHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu)
718{
719 Assert(pVCpu == pGVCpu);
720
721 /*
722 * Do spin stat historization.
723 */
724 if (++pVCpu->vmm.s.cR0Halts & 0xff)
725 { /* likely */ }
726 else if (pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)
727 {
728 pVCpu->vmm.s.cR0HaltsSucceeded = 2;
729 pVCpu->vmm.s.cR0HaltsToRing3 = 0;
730 }
731 else
732 {
733 pVCpu->vmm.s.cR0HaltsSucceeded = 0;
734 pVCpu->vmm.s.cR0HaltsToRing3 = 2;
735 }
736
737 /*
738 * Flags that makes us go to ring-3.
739 */
740 uint32_t const fVmFFs = VM_FF_TM_VIRTUAL_SYNC | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA
741 | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_CHECK_VM_STATE
742 | VM_FF_RESET | VM_FF_EMT_RENDEZVOUS | VM_FF_PGM_NEED_HANDY_PAGES
743 | VM_FF_PGM_NO_MEMORY | VM_FF_REM_HANDLER_NOTIFY | VM_FF_DEBUG_SUSPEND;
744 uint64_t const fCpuFFs = VMCPU_FF_TIMER | VMCPU_FF_PDM_CRITSECT | VMCPU_FF_IEM
745 | VMCPU_FF_REQUEST | VMCPU_FF_DBGF | VMCPU_FF_HM_UPDATE_CR3
746 | VMCPU_FF_HM_UPDATE_PAE_PDPES | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
747 | VMCPU_FF_TO_R3 | VMCPU_FF_IOM;
748
749 /*
750 * Check preconditions.
751 */
752 unsigned const uMWait = EMMonitorWaitIsActive(pVCpu);
753 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pVCpu);
754 if ( pVCpu->vmm.s.fMayHaltInRing0
755 && !TRPMHasTrap(pVCpu)
756 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED
757 || uMWait > 1))
758 {
759 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
760 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
761 {
762 /*
763 * Interrupts pending already?
764 */
765 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
766 APICUpdatePendingInterrupts(pVCpu);
767
768 /*
769 * Flags that wake up from the halted state.
770 */
771 uint64_t const fIntMask = VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_NESTED_GUEST
772 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT;
773
774 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
775 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
776 ASMNopPause();
777
778 /*
779 * Check out how long till the next timer event.
780 */
781 uint64_t u64Delta;
782 uint64_t u64GipTime = TMTimerPollGIP(pVM, pVCpu, &u64Delta);
783
784 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
785 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
786 {
787 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
788 APICUpdatePendingInterrupts(pVCpu);
789
790 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
791 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
792
793 /*
794 * Wait if there is enough time to the next timer event.
795 */
796 if (u64Delta >= pVCpu->vmm.s.cNsSpinBlockThreshold)
797 {
798 /* If there are few other CPU cores around, we will procrastinate a
799 little before going to sleep, hoping for some device raising an
800 interrupt or similar. Though, the best thing here would be to
801 dynamically adjust the spin count according to its usfulness or
802 something... */
803 if ( pVCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3
804 && RTMpGetOnlineCount() >= 4)
805 {
806 /** @todo Figure out how we can skip this if it hasn't help recently...
807 * @bugref{9172#c12} */
808 uint32_t cSpinLoops = 42;
809 while (cSpinLoops-- > 0)
810 {
811 ASMNopPause();
812 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
813 APICUpdatePendingInterrupts(pVCpu);
814 ASMNopPause();
815 if (VM_FF_IS_ANY_SET(pVM, fVmFFs))
816 {
817 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
818 return VINF_EM_HALT;
819 }
820 ASMNopPause();
821 if (VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
822 {
823 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltToR3FromSpin);
824 return VINF_EM_HALT;
825 }
826 ASMNopPause();
827 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
828 {
829 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromSpin);
830 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
831 }
832 ASMNopPause();
833 }
834 }
835
836 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3
837 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */
838 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);
839 uint64_t const u64StartSchedHalt = RTTimeNanoTS();
840 int rc = GVMMR0SchedHalt(pGVM, pVM, pGVCpu, u64GipTime);
841 uint64_t const u64EndSchedHalt = RTTimeNanoTS();
842 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt;
843 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);
844 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);
845 if ( rc == VINF_SUCCESS
846 || rc == VERR_INTERRUPTED)
847
848 {
849 /* Keep some stats like ring-3 does. */
850 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime;
851 if (cNsOverslept > 50000)
852 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);
853 else if (cNsOverslept < -50000)
854 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);
855 else
856 STAM_REL_PROFILE_ADD_PERIOD(&pVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);
857
858 /*
859 * Recheck whether we can resume execution or have to go to ring-3.
860 */
861 if ( !VM_FF_IS_ANY_SET(pVM, fVmFFs)
862 && !VMCPU_FF_IS_ANY_SET(pVCpu, fCpuFFs))
863 {
864 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
865 APICUpdatePendingInterrupts(pVCpu);
866 if (VMCPU_FF_IS_ANY_SET(pVCpu, fIntMask))
867 {
868 STAM_REL_COUNTER_INC(&pVCpu->vmm.s.StatR0HaltExecFromBlock);
869 return vmmR0DoHaltInterrupt(pVCpu, uMWait, enmInterruptibility);
870 }
871 }
872 }
873 }
874 }
875 }
876 }
877 return VINF_EM_HALT;
878}
879
880
881/**
882 * VMM ring-0 thread-context callback.
883 *
884 * This does common HM state updating and calls the HM-specific thread-context
885 * callback.
886 *
887 * @param enmEvent The thread-context event.
888 * @param pvUser Opaque pointer to the VMCPU.
889 *
890 * @thread EMT(pvUser)
891 */
892static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
893{
894 PVMCPUCC pVCpu = (PVMCPUCC)pvUser;
895
896 switch (enmEvent)
897 {
898 case RTTHREADCTXEVENT_IN:
899 {
900 /*
901 * Linux may call us with preemption enabled (really!) but technically we
902 * cannot get preempted here, otherwise we end up in an infinite recursion
903 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
904 * ad infinitum). Let's just disable preemption for now...
905 */
906 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
907 * preemption after doing the callout (one or two functions up the
908 * call chain). */
909 /** @todo r=ramshankar: See @bugref{5313#c30}. */
910 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
911 RTThreadPreemptDisable(&ParanoidPreemptState);
912
913 /* We need to update the VCPU <-> host CPU mapping. */
914 RTCPUID idHostCpu;
915 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
916 pVCpu->iHostCpuSet = iHostCpuSet;
917 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
918
919 /* In the very unlikely event that the GIP delta for the CPU we're
920 rescheduled needs calculating, try force a return to ring-3.
921 We unfortunately cannot do the measurements right here. */
922 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
923 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
924
925 /* Invoke the HM-specific thread-context callback. */
926 HMR0ThreadCtxCallback(enmEvent, pvUser);
927
928 /* Restore preemption. */
929 RTThreadPreemptRestore(&ParanoidPreemptState);
930 break;
931 }
932
933 case RTTHREADCTXEVENT_OUT:
934 {
935 /* Invoke the HM-specific thread-context callback. */
936 HMR0ThreadCtxCallback(enmEvent, pvUser);
937
938 /*
939 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
940 * have the same host CPU associated with it.
941 */
942 pVCpu->iHostCpuSet = UINT32_MAX;
943 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
944 break;
945 }
946
947 default:
948 /* Invoke the HM-specific thread-context callback. */
949 HMR0ThreadCtxCallback(enmEvent, pvUser);
950 break;
951 }
952}
953
954
955/**
956 * Creates thread switching hook for the current EMT thread.
957 *
958 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
959 * platform does not implement switcher hooks, no hooks will be create and the
960 * member set to NIL_RTTHREADCTXHOOK.
961 *
962 * @returns VBox status code.
963 * @param pVCpu The cross context virtual CPU structure.
964 * @thread EMT(pVCpu)
965 */
966VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPUCC pVCpu)
967{
968 VMCPU_ASSERT_EMT(pVCpu);
969 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
970
971#if 1 /* To disable this stuff change to zero. */
972 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
973 if (RT_SUCCESS(rc))
974 return rc;
975#else
976 RT_NOREF(vmmR0ThreadCtxCallback);
977 int rc = VERR_NOT_SUPPORTED;
978#endif
979
980 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
981 if (rc == VERR_NOT_SUPPORTED)
982 return VINF_SUCCESS;
983
984 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
985 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
986}
987
988
989/**
990 * Destroys the thread switching hook for the specified VCPU.
991 *
992 * @param pVCpu The cross context virtual CPU structure.
993 * @remarks Can be called from any thread.
994 */
995VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPUCC pVCpu)
996{
997 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
998 AssertRC(rc);
999 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
1000}
1001
1002
1003/**
1004 * Disables the thread switching hook for this VCPU (if we got one).
1005 *
1006 * @param pVCpu The cross context virtual CPU structure.
1007 * @thread EMT(pVCpu)
1008 *
1009 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
1010 * this call. This means you have to be careful with what you do!
1011 */
1012VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPUCC pVCpu)
1013{
1014 /*
1015 * Clear the VCPU <-> host CPU mapping as we've left HM context.
1016 * @bugref{7726#c19} explains the need for this trick:
1017 *
1018 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
1019 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
1020 * longjmp & normal return to ring-3, which opens a window where we may be
1021 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
1022 * the CPU starts executing a different EMT. Both functions first disables
1023 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
1024 * an opening for getting preempted.
1025 */
1026 /** @todo Make HM not need this API! Then we could leave the hooks enabled
1027 * all the time. */
1028 /** @todo move this into the context hook disabling if(). */
1029 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1030
1031 /*
1032 * Disable the context hook, if we got one.
1033 */
1034 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1035 {
1036 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1037 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1038 AssertRC(rc);
1039 }
1040}
1041
1042
1043/**
1044 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
1045 *
1046 * @returns true if registered, false otherwise.
1047 * @param pVCpu The cross context virtual CPU structure.
1048 */
1049DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1050{
1051 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
1052}
1053
1054
1055/**
1056 * Whether thread-context hooks are registered for this VCPU.
1057 *
1058 * @returns true if registered, false otherwise.
1059 * @param pVCpu The cross context virtual CPU structure.
1060 */
1061VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPUCC pVCpu)
1062{
1063 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
1064}
1065
1066
1067#ifdef VBOX_WITH_STATISTICS
1068/**
1069 * Record return code statistics
1070 * @param pVM The cross context VM structure.
1071 * @param pVCpu The cross context virtual CPU structure.
1072 * @param rc The status code.
1073 */
1074static void vmmR0RecordRC(PVMCC pVM, PVMCPUCC pVCpu, int rc)
1075{
1076 /*
1077 * Collect statistics.
1078 */
1079 switch (rc)
1080 {
1081 case VINF_SUCCESS:
1082 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
1083 break;
1084 case VINF_EM_RAW_INTERRUPT:
1085 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
1086 break;
1087 case VINF_EM_RAW_INTERRUPT_HYPER:
1088 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
1089 break;
1090 case VINF_EM_RAW_GUEST_TRAP:
1091 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
1092 break;
1093 case VINF_EM_RAW_RING_SWITCH:
1094 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
1095 break;
1096 case VINF_EM_RAW_RING_SWITCH_INT:
1097 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
1098 break;
1099 case VINF_EM_RAW_STALE_SELECTOR:
1100 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
1101 break;
1102 case VINF_EM_RAW_IRET_TRAP:
1103 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
1104 break;
1105 case VINF_IOM_R3_IOPORT_READ:
1106 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
1107 break;
1108 case VINF_IOM_R3_IOPORT_WRITE:
1109 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
1110 break;
1111 case VINF_IOM_R3_IOPORT_COMMIT_WRITE:
1112 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOCommitWrite);
1113 break;
1114 case VINF_IOM_R3_MMIO_READ:
1115 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
1116 break;
1117 case VINF_IOM_R3_MMIO_WRITE:
1118 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
1119 break;
1120 case VINF_IOM_R3_MMIO_COMMIT_WRITE:
1121 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOCommitWrite);
1122 break;
1123 case VINF_IOM_R3_MMIO_READ_WRITE:
1124 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
1125 break;
1126 case VINF_PATM_HC_MMIO_PATCH_READ:
1127 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
1128 break;
1129 case VINF_PATM_HC_MMIO_PATCH_WRITE:
1130 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
1131 break;
1132 case VINF_CPUM_R3_MSR_READ:
1133 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
1134 break;
1135 case VINF_CPUM_R3_MSR_WRITE:
1136 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
1137 break;
1138 case VINF_EM_RAW_EMULATE_INSTR:
1139 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
1140 break;
1141 case VINF_PATCH_EMULATE_INSTR:
1142 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
1143 break;
1144 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
1145 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
1146 break;
1147 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
1148 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
1149 break;
1150 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
1151 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
1152 break;
1153 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
1154 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
1155 break;
1156 case VINF_CSAM_PENDING_ACTION:
1157 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
1158 break;
1159 case VINF_PGM_SYNC_CR3:
1160 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
1161 break;
1162 case VINF_PATM_PATCH_INT3:
1163 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
1164 break;
1165 case VINF_PATM_PATCH_TRAP_PF:
1166 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
1167 break;
1168 case VINF_PATM_PATCH_TRAP_GP:
1169 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
1170 break;
1171 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
1172 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
1173 break;
1174 case VINF_EM_RESCHEDULE_REM:
1175 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
1176 break;
1177 case VINF_EM_RAW_TO_R3:
1178 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Total);
1179 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
1180 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
1181 else if (VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
1182 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
1183 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_QUEUES))
1184 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
1185 else if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1186 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
1187 else if (VM_FF_IS_SET(pVM, VM_FF_PDM_DMA))
1188 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
1189 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER))
1190 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
1191 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1192 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
1193 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TO_R3))
1194 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3FF);
1195 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1196 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iem);
1197 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1198 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Iom);
1199 else
1200 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
1201 break;
1202
1203 case VINF_EM_RAW_TIMER_PENDING:
1204 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
1205 break;
1206 case VINF_EM_RAW_INTERRUPT_PENDING:
1207 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
1208 break;
1209 case VINF_VMM_CALL_HOST:
1210 switch (pVCpu->vmm.s.enmCallRing3Operation)
1211 {
1212 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
1213 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
1214 break;
1215 case VMMCALLRING3_PDM_LOCK:
1216 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
1217 break;
1218 case VMMCALLRING3_PGM_POOL_GROW:
1219 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
1220 break;
1221 case VMMCALLRING3_PGM_LOCK:
1222 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
1223 break;
1224 case VMMCALLRING3_PGM_MAP_CHUNK:
1225 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
1226 break;
1227 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
1228 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
1229 break;
1230 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
1231 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
1232 break;
1233 case VMMCALLRING3_VMM_LOGGER_FLUSH:
1234 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
1235 break;
1236 case VMMCALLRING3_VM_SET_ERROR:
1237 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
1238 break;
1239 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
1240 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
1241 break;
1242 case VMMCALLRING3_VM_R0_ASSERTION:
1243 default:
1244 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
1245 break;
1246 }
1247 break;
1248 case VINF_PATM_DUPLICATE_FUNCTION:
1249 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
1250 break;
1251 case VINF_PGM_CHANGE_MODE:
1252 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
1253 break;
1254 case VINF_PGM_POOL_FLUSH_PENDING:
1255 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
1256 break;
1257 case VINF_EM_PENDING_REQUEST:
1258 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
1259 break;
1260 case VINF_EM_HM_PATCH_TPR_INSTR:
1261 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
1262 break;
1263 default:
1264 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
1265 break;
1266 }
1267}
1268#endif /* VBOX_WITH_STATISTICS */
1269
1270
1271/**
1272 * The Ring 0 entry point, called by the fast-ioctl path.
1273 *
1274 * @param pGVM The global (ring-0) VM structure.
1275 * @param pVM The cross context VM structure.
1276 * The return code is stored in pVM->vmm.s.iLastGZRc.
1277 * @param idCpu The Virtual CPU ID of the calling EMT.
1278 * @param enmOperation Which operation to execute.
1279 * @remarks Assume called with interrupts _enabled_.
1280 */
1281VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
1282{
1283 /*
1284 * Validation.
1285 */
1286 if ( idCpu < pGVM->cCpus
1287 && pGVM->cCpus == pVM->cCpus)
1288 { /*likely*/ }
1289 else
1290 {
1291 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x/%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);
1292 return;
1293 }
1294
1295 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1296 PVMCPUCC pVCpu = pGVCpu;
1297 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf();
1298 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
1299 && pVCpu->hNativeThreadR0 == hNativeThread))
1300 { /* likely */ }
1301 else
1302 {
1303 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pVCpu->hNativeThreadR0=%p\n",
1304 idCpu, hNativeThread, pGVCpu->hEMT, pVCpu->hNativeThreadR0);
1305 return;
1306 }
1307
1308 /*
1309 * SMAP fun.
1310 */
1311 VMM_CHECK_SMAP_SETUP();
1312 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1313
1314 /*
1315 * Perform requested operation.
1316 */
1317 switch (enmOperation)
1318 {
1319 /*
1320 * Run guest code using the available hardware acceleration technology.
1321 */
1322 case VMMR0_DO_HM_RUN:
1323 {
1324 for (;;) /* hlt loop */
1325 {
1326 /*
1327 * Disable preemption.
1328 */
1329 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1330 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1331 RTThreadPreemptDisable(&PreemptState);
1332
1333 /*
1334 * Get the host CPU identifiers, make sure they are valid and that
1335 * we've got a TSC delta for the CPU.
1336 */
1337 RTCPUID idHostCpu;
1338 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1339 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1340 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1341 {
1342 pVCpu->iHostCpuSet = iHostCpuSet;
1343 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1344
1345 /*
1346 * Update the periodic preemption timer if it's active.
1347 */
1348 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1349 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1350 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1351
1352#ifdef VMM_R0_TOUCH_FPU
1353 /*
1354 * Make sure we've got the FPU state loaded so and we don't need to clear
1355 * CR0.TS and get out of sync with the host kernel when loading the guest
1356 * FPU state. @ref sec_cpum_fpu (CPUM.cpp) and @bugref{4053}.
1357 */
1358 CPUMR0TouchHostFpu();
1359#endif
1360 int rc;
1361 bool fPreemptRestored = false;
1362 if (!HMR0SuspendPending())
1363 {
1364 /*
1365 * Enable the context switching hook.
1366 */
1367 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1368 {
1369 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1370 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1371 }
1372
1373 /*
1374 * Enter HM context.
1375 */
1376 rc = HMR0Enter(pVCpu);
1377 if (RT_SUCCESS(rc))
1378 {
1379 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1380
1381 /*
1382 * When preemption hooks are in place, enable preemption now that
1383 * we're in HM context.
1384 */
1385 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1386 {
1387 fPreemptRestored = true;
1388 RTThreadPreemptRestore(&PreemptState);
1389 }
1390
1391 /*
1392 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1393 */
1394 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1395 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1396 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1397
1398 /*
1399 * Assert sanity on the way out. Using manual assertions code here as normal
1400 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1401 */
1402 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1403 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1404 {
1405 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1406 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1407 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1408 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1409 }
1410 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1411 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1412 {
1413 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1414 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1415 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1416 rc = VERR_INVALID_STATE;
1417 }
1418
1419 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1420 }
1421 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1422
1423 /*
1424 * Invalidate the host CPU identifiers before we disable the context
1425 * hook / restore preemption.
1426 */
1427 pVCpu->iHostCpuSet = UINT32_MAX;
1428 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1429
1430 /*
1431 * Disable context hooks. Due to unresolved cleanup issues, we
1432 * cannot leave the hooks enabled when we return to ring-3.
1433 *
1434 * Note! At the moment HM may also have disabled the hook
1435 * when we get here, but the IPRT API handles that.
1436 */
1437 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1438 {
1439 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1440 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1441 }
1442 }
1443 /*
1444 * The system is about to go into suspend mode; go back to ring 3.
1445 */
1446 else
1447 {
1448 rc = VINF_EM_RAW_INTERRUPT;
1449 pVCpu->iHostCpuSet = UINT32_MAX;
1450 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1451 }
1452
1453 /** @todo When HM stops messing with the context hook state, we'll disable
1454 * preemption again before the RTThreadCtxHookDisable call. */
1455 if (!fPreemptRestored)
1456 RTThreadPreemptRestore(&PreemptState);
1457
1458 pVCpu->vmm.s.iLastGZRc = rc;
1459
1460 /* Fire dtrace probe and collect statistics. */
1461 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1462#ifdef VBOX_WITH_STATISTICS
1463 vmmR0RecordRC(pVM, pVCpu, rc);
1464#endif
1465#if 1
1466 /*
1467 * If this is a halt.
1468 */
1469 if (rc != VINF_EM_HALT)
1470 { /* we're not in a hurry for a HLT, so prefer this path */ }
1471 else
1472 {
1473 pVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);
1474 if (rc == VINF_SUCCESS)
1475 {
1476 pVCpu->vmm.s.cR0HaltsSucceeded++;
1477 continue;
1478 }
1479 pVCpu->vmm.s.cR0HaltsToRing3++;
1480 }
1481#endif
1482 }
1483 /*
1484 * Invalid CPU set index or TSC delta in need of measuring.
1485 */
1486 else
1487 {
1488 pVCpu->iHostCpuSet = UINT32_MAX;
1489 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1490 RTThreadPreemptRestore(&PreemptState);
1491 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1492 {
1493 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1494 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1495 0 /*default cTries*/);
1496 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1497 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1498 else
1499 pVCpu->vmm.s.iLastGZRc = rc;
1500 }
1501 else
1502 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1503 }
1504 break;
1505
1506 } /* halt loop. */
1507 break;
1508 }
1509
1510#ifdef VBOX_WITH_NEM_R0
1511# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
1512 case VMMR0_DO_NEM_RUN:
1513 {
1514 /*
1515 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode).
1516 */
1517 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1518 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu);
1519 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1520 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC);
1521
1522 pVCpu->vmm.s.iLastGZRc = rc;
1523
1524 /*
1525 * Fire dtrace probe and collect statistics.
1526 */
1527 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1528# ifdef VBOX_WITH_STATISTICS
1529 vmmR0RecordRC(pVM, pVCpu, rc);
1530# endif
1531 break;
1532 }
1533# endif
1534#endif
1535
1536 /*
1537 * For profiling.
1538 */
1539 case VMMR0_DO_NOP:
1540 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1541 break;
1542
1543 /*
1544 * Shouldn't happen.
1545 */
1546 default:
1547 AssertMsgFailed(("%#x\n", enmOperation));
1548 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1549 break;
1550 }
1551 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1552}
1553
1554
1555/**
1556 * Validates a session or VM session argument.
1557 *
1558 * @returns true / false accordingly.
1559 * @param pVM The cross context VM structure.
1560 * @param pClaimedSession The session claim to validate.
1561 * @param pSession The session argument.
1562 */
1563DECLINLINE(bool) vmmR0IsValidSession(PVMCC pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1564{
1565 /* This must be set! */
1566 if (!pSession)
1567 return false;
1568
1569 /* Only one out of the two. */
1570 if (pVM && pClaimedSession)
1571 return false;
1572 if (pVM)
1573 pClaimedSession = pVM->pSession;
1574 return pClaimedSession == pSession;
1575}
1576
1577
1578/**
1579 * VMMR0EntryEx worker function, either called directly or when ever possible
1580 * called thru a longjmp so we can exit safely on failure.
1581 *
1582 * @returns VBox status code.
1583 * @param pGVM The global (ring-0) VM structure.
1584 * @param pVM The cross context VM structure.
1585 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1586 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1587 * @param enmOperation Which operation to execute.
1588 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1589 * The support driver validates this if it's present.
1590 * @param u64Arg Some simple constant argument.
1591 * @param pSession The session of the caller.
1592 *
1593 * @remarks Assume called with interrupts _enabled_.
1594 */
1595static int vmmR0EntryExWorker(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
1596 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1597{
1598 /*
1599 * Validate pGVM, pVM and idCpu for consistency and validity.
1600 */
1601 if ( pGVM != NULL
1602 || pVM != NULL)
1603 {
1604 if (RT_LIKELY( RT_VALID_PTR(pGVM)
1605 && RT_VALID_PTR(pVM)
1606 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0))
1607 { /* likely */ }
1608 else
1609 {
1610 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);
1611 return VERR_INVALID_POINTER;
1612 }
1613
1614 if (RT_LIKELY(pGVM == pVM))
1615 { /* likely */ }
1616 else
1617 {
1618 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);
1619 return VERR_INVALID_PARAMETER;
1620 }
1621
1622 if (RT_LIKELY(idCpu == NIL_VMCPUID || idCpu < pGVM->cCpus))
1623 { /* likely */ }
1624 else
1625 {
1626 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu %#x (cCpus=%#x)\n", idCpu, pGVM->cCpus);
1627 return VERR_INVALID_PARAMETER;
1628 }
1629
1630 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING
1631 && pVM->enmVMState <= VMSTATE_TERMINATED
1632 && pVM->cCpus == pGVM->cCpus
1633 && pVM->pSession == pSession
1634 && pVM->pSelf == pVM))
1635 { /* likely */ }
1636 else
1637 {
1638 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",
1639 pVM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pSelf, pVM, enmOperation);
1640 return VERR_INVALID_POINTER;
1641 }
1642 }
1643 else if (RT_LIKELY(idCpu == NIL_VMCPUID))
1644 { /* likely */ }
1645 else
1646 {
1647 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1648 return VERR_INVALID_PARAMETER;
1649 }
1650
1651 /*
1652 * SMAP fun.
1653 */
1654 VMM_CHECK_SMAP_SETUP();
1655 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1656
1657 /*
1658 * Process the request.
1659 */
1660 int rc;
1661 switch (enmOperation)
1662 {
1663 /*
1664 * GVM requests
1665 */
1666 case VMMR0_DO_GVMM_CREATE_VM:
1667 if (pGVM == NULL && pVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID)
1668 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession);
1669 else
1670 rc = VERR_INVALID_PARAMETER;
1671 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1672 break;
1673
1674 case VMMR0_DO_GVMM_DESTROY_VM:
1675 if (pReqHdr == NULL && u64Arg == 0)
1676 rc = GVMMR0DestroyVM(pGVM, pVM);
1677 else
1678 rc = VERR_INVALID_PARAMETER;
1679 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1680 break;
1681
1682 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1683 if (pGVM != NULL && pVM != NULL)
1684 rc = GVMMR0RegisterVCpu(pGVM, pVM, idCpu);
1685 else
1686 rc = VERR_INVALID_PARAMETER;
1687 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1688 break;
1689
1690 case VMMR0_DO_GVMM_DEREGISTER_VMCPU:
1691 if (pGVM != NULL && pVM != NULL)
1692 rc = GVMMR0DeregisterVCpu(pGVM, pVM, idCpu);
1693 else
1694 rc = VERR_INVALID_PARAMETER;
1695 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1696 break;
1697
1698 case VMMR0_DO_GVMM_SCHED_HALT:
1699 if (pReqHdr)
1700 return VERR_INVALID_PARAMETER;
1701 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1702 rc = GVMMR0SchedHaltReq(pGVM, pVM, idCpu, u64Arg);
1703 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1704 break;
1705
1706 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1707 if (pReqHdr || u64Arg)
1708 return VERR_INVALID_PARAMETER;
1709 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1710 rc = GVMMR0SchedWakeUp(pGVM, pVM, idCpu);
1711 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1712 break;
1713
1714 case VMMR0_DO_GVMM_SCHED_POKE:
1715 if (pReqHdr || u64Arg)
1716 return VERR_INVALID_PARAMETER;
1717 rc = GVMMR0SchedPoke(pGVM, pVM, idCpu);
1718 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1719 break;
1720
1721 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1722 if (u64Arg)
1723 return VERR_INVALID_PARAMETER;
1724 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1725 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1726 break;
1727
1728 case VMMR0_DO_GVMM_SCHED_POLL:
1729 if (pReqHdr || u64Arg > 1)
1730 return VERR_INVALID_PARAMETER;
1731 rc = GVMMR0SchedPoll(pGVM, pVM, idCpu, !!u64Arg);
1732 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1733 break;
1734
1735 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1736 if (u64Arg)
1737 return VERR_INVALID_PARAMETER;
1738 rc = GVMMR0QueryStatisticsReq(pGVM, pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);
1739 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1740 break;
1741
1742 case VMMR0_DO_GVMM_RESET_STATISTICS:
1743 if (u64Arg)
1744 return VERR_INVALID_PARAMETER;
1745 rc = GVMMR0ResetStatisticsReq(pGVM, pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);
1746 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1747 break;
1748
1749 /*
1750 * Initialize the R0 part of a VM instance.
1751 */
1752 case VMMR0_DO_VMMR0_INIT:
1753 rc = vmmR0InitVM(pGVM, pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1754 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1755 break;
1756
1757 /*
1758 * Does EMT specific ring-0 init.
1759 */
1760 case VMMR0_DO_VMMR0_INIT_EMT:
1761 rc = vmmR0InitVMEmt(pGVM, pVM, idCpu);
1762 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1763 break;
1764
1765 /*
1766 * Terminate the R0 part of a VM instance.
1767 */
1768 case VMMR0_DO_VMMR0_TERM:
1769 rc = VMMR0TermVM(pGVM, pVM, 0 /*idCpu*/);
1770 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1771 break;
1772
1773 /*
1774 * Attempt to enable hm mode and check the current setting.
1775 */
1776 case VMMR0_DO_HM_ENABLE:
1777 rc = HMR0EnableAllCpus(pVM);
1778 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1779 break;
1780
1781 /*
1782 * Setup the hardware accelerated session.
1783 */
1784 case VMMR0_DO_HM_SETUP_VM:
1785 rc = HMR0SetupVM(pVM);
1786 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1787 break;
1788
1789 /*
1790 * PGM wrappers.
1791 */
1792 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1793 if (idCpu == NIL_VMCPUID)
1794 return VERR_INVALID_CPU_ID;
1795 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM, idCpu);
1796 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1797 break;
1798
1799 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1800 if (idCpu == NIL_VMCPUID)
1801 return VERR_INVALID_CPU_ID;
1802 rc = PGMR0PhysFlushHandyPages(pGVM, pVM, idCpu);
1803 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1804 break;
1805
1806 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1807 if (idCpu == NIL_VMCPUID)
1808 return VERR_INVALID_CPU_ID;
1809 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM, idCpu);
1810 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1811 break;
1812
1813 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1814 if (idCpu != 0)
1815 return VERR_INVALID_CPU_ID;
1816 rc = PGMR0PhysSetupIoMmu(pGVM, pVM);
1817 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1818 break;
1819
1820 /*
1821 * GMM wrappers.
1822 */
1823 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1824 if (u64Arg)
1825 return VERR_INVALID_PARAMETER;
1826 rc = GMMR0InitialReservationReq(pGVM, pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1827 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1828 break;
1829
1830 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1831 if (u64Arg)
1832 return VERR_INVALID_PARAMETER;
1833 rc = GMMR0UpdateReservationReq(pGVM, pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1834 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1835 break;
1836
1837 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1838 if (u64Arg)
1839 return VERR_INVALID_PARAMETER;
1840 rc = GMMR0AllocatePagesReq(pGVM, pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1841 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1842 break;
1843
1844 case VMMR0_DO_GMM_FREE_PAGES:
1845 if (u64Arg)
1846 return VERR_INVALID_PARAMETER;
1847 rc = GMMR0FreePagesReq(pGVM, pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1848 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1849 break;
1850
1851 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1852 if (u64Arg)
1853 return VERR_INVALID_PARAMETER;
1854 rc = GMMR0FreeLargePageReq(pGVM, pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1855 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1856 break;
1857
1858 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1859 if (u64Arg)
1860 return VERR_INVALID_PARAMETER;
1861 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr);
1862 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1863 break;
1864
1865 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1866 if (idCpu == NIL_VMCPUID)
1867 return VERR_INVALID_CPU_ID;
1868 if (u64Arg)
1869 return VERR_INVALID_PARAMETER;
1870 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1871 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1872 break;
1873
1874 case VMMR0_DO_GMM_BALLOONED_PAGES:
1875 if (u64Arg)
1876 return VERR_INVALID_PARAMETER;
1877 rc = GMMR0BalloonedPagesReq(pGVM, pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1878 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1879 break;
1880
1881 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1882 if (u64Arg)
1883 return VERR_INVALID_PARAMETER;
1884 rc = GMMR0MapUnmapChunkReq(pGVM, pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1885 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1886 break;
1887
1888 case VMMR0_DO_GMM_SEED_CHUNK:
1889 if (pReqHdr)
1890 return VERR_INVALID_PARAMETER;
1891 rc = GMMR0SeedChunk(pGVM, pVM, idCpu, (RTR3PTR)u64Arg);
1892 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1893 break;
1894
1895 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1896 if (idCpu == NIL_VMCPUID)
1897 return VERR_INVALID_CPU_ID;
1898 if (u64Arg)
1899 return VERR_INVALID_PARAMETER;
1900 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1901 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1902 break;
1903
1904 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1905 if (idCpu == NIL_VMCPUID)
1906 return VERR_INVALID_CPU_ID;
1907 if (u64Arg)
1908 return VERR_INVALID_PARAMETER;
1909 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1910 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1911 break;
1912
1913 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1914 if (idCpu == NIL_VMCPUID)
1915 return VERR_INVALID_CPU_ID;
1916 if ( u64Arg
1917 || pReqHdr)
1918 return VERR_INVALID_PARAMETER;
1919 rc = GMMR0ResetSharedModules(pGVM, pVM, idCpu);
1920 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1921 break;
1922
1923#ifdef VBOX_WITH_PAGE_SHARING
1924 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1925 {
1926 if (idCpu == NIL_VMCPUID)
1927 return VERR_INVALID_CPU_ID;
1928 if ( u64Arg
1929 || pReqHdr)
1930 return VERR_INVALID_PARAMETER;
1931 rc = GMMR0CheckSharedModules(pGVM, pVM, idCpu);
1932 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1933 break;
1934 }
1935#endif
1936
1937#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1938 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1939 if (u64Arg)
1940 return VERR_INVALID_PARAMETER;
1941 rc = GMMR0FindDuplicatePageReq(pGVM, pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1942 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1943 break;
1944#endif
1945
1946 case VMMR0_DO_GMM_QUERY_STATISTICS:
1947 if (u64Arg)
1948 return VERR_INVALID_PARAMETER;
1949 rc = GMMR0QueryStatisticsReq(pGVM, pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1950 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1951 break;
1952
1953 case VMMR0_DO_GMM_RESET_STATISTICS:
1954 if (u64Arg)
1955 return VERR_INVALID_PARAMETER;
1956 rc = GMMR0ResetStatisticsReq(pGVM, pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1957 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1958 break;
1959
1960 /*
1961 * A quick GCFGM mock-up.
1962 */
1963 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1964 case VMMR0_DO_GCFGM_SET_VALUE:
1965 case VMMR0_DO_GCFGM_QUERY_VALUE:
1966 {
1967 if (pGVM || pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1968 return VERR_INVALID_PARAMETER;
1969 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1970 if (pReq->Hdr.cbReq != sizeof(*pReq))
1971 return VERR_INVALID_PARAMETER;
1972 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1973 {
1974 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1975 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1976 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1977 }
1978 else
1979 {
1980 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1981 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1982 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1983 }
1984 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1985 break;
1986 }
1987
1988 /*
1989 * PDM Wrappers.
1990 */
1991 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1992 {
1993 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1994 return VERR_INVALID_PARAMETER;
1995 rc = PDMR0DriverCallReqHandler(pGVM, pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1996 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1997 break;
1998 }
1999
2000 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
2001 {
2002 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
2003 return VERR_INVALID_PARAMETER;
2004 rc = PDMR0DeviceCallReqHandler(pGVM, pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
2005 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2006 break;
2007 }
2008
2009 /*
2010 * Requests to the internal networking service.
2011 */
2012 case VMMR0_DO_INTNET_OPEN:
2013 {
2014 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
2015 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
2016 return VERR_INVALID_PARAMETER;
2017 rc = IntNetR0OpenReq(pSession, pReq);
2018 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2019 break;
2020 }
2021
2022 case VMMR0_DO_INTNET_IF_CLOSE:
2023 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2024 return VERR_INVALID_PARAMETER;
2025 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
2026 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2027 break;
2028
2029
2030 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
2031 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2032 return VERR_INVALID_PARAMETER;
2033 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
2034 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2035 break;
2036
2037 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
2038 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2039 return VERR_INVALID_PARAMETER;
2040 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
2041 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2042 break;
2043
2044 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
2045 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2046 return VERR_INVALID_PARAMETER;
2047 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
2048 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2049 break;
2050
2051 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
2052 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2053 return VERR_INVALID_PARAMETER;
2054 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
2055 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2056 break;
2057
2058 case VMMR0_DO_INTNET_IF_SEND:
2059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2060 return VERR_INVALID_PARAMETER;
2061 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
2062 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2063 break;
2064
2065 case VMMR0_DO_INTNET_IF_WAIT:
2066 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2067 return VERR_INVALID_PARAMETER;
2068 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
2069 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2070 break;
2071
2072 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
2073 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2074 return VERR_INVALID_PARAMETER;
2075 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
2076 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2077 break;
2078
2079#ifdef VBOX_WITH_PCI_PASSTHROUGH
2080 /*
2081 * Requests to host PCI driver service.
2082 */
2083 case VMMR0_DO_PCIRAW_REQ:
2084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
2085 return VERR_INVALID_PARAMETER;
2086 rc = PciRawR0ProcessReq(pGVM, pVM, pSession, (PPCIRAWSENDREQ)pReqHdr);
2087 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2088 break;
2089#endif
2090
2091 /*
2092 * NEM requests.
2093 */
2094#ifdef VBOX_WITH_NEM_R0
2095# if defined(RT_ARCH_AMD64) && defined(RT_OS_WINDOWS)
2096 case VMMR0_DO_NEM_INIT_VM:
2097 if (u64Arg || pReqHdr || idCpu != 0)
2098 return VERR_INVALID_PARAMETER;
2099 rc = NEMR0InitVM(pGVM, pVM);
2100 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2101 break;
2102
2103 case VMMR0_DO_NEM_INIT_VM_PART_2:
2104 if (u64Arg || pReqHdr || idCpu != 0)
2105 return VERR_INVALID_PARAMETER;
2106 rc = NEMR0InitVMPart2(pGVM, pVM);
2107 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2108 break;
2109
2110 case VMMR0_DO_NEM_MAP_PAGES:
2111 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2112 return VERR_INVALID_PARAMETER;
2113 rc = NEMR0MapPages(pGVM, pVM, idCpu);
2114 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2115 break;
2116
2117 case VMMR0_DO_NEM_UNMAP_PAGES:
2118 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2119 return VERR_INVALID_PARAMETER;
2120 rc = NEMR0UnmapPages(pGVM, pVM, idCpu);
2121 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2122 break;
2123
2124 case VMMR0_DO_NEM_EXPORT_STATE:
2125 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2126 return VERR_INVALID_PARAMETER;
2127 rc = NEMR0ExportState(pGVM, pVM, idCpu);
2128 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2129 break;
2130
2131 case VMMR0_DO_NEM_IMPORT_STATE:
2132 if (pReqHdr || idCpu == NIL_VMCPUID)
2133 return VERR_INVALID_PARAMETER;
2134 rc = NEMR0ImportState(pGVM, pVM, idCpu, u64Arg);
2135 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2136 break;
2137
2138 case VMMR0_DO_NEM_QUERY_CPU_TICK:
2139 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID)
2140 return VERR_INVALID_PARAMETER;
2141 rc = NEMR0QueryCpuTick(pGVM, pVM, idCpu);
2142 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2143 break;
2144
2145 case VMMR0_DO_NEM_RESUME_CPU_TICK_ON_ALL:
2146 if (pReqHdr || idCpu == NIL_VMCPUID)
2147 return VERR_INVALID_PARAMETER;
2148 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM, idCpu, u64Arg);
2149 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2150 break;
2151
2152 case VMMR0_DO_NEM_UPDATE_STATISTICS:
2153 if (u64Arg || pReqHdr)
2154 return VERR_INVALID_PARAMETER;
2155 rc = NEMR0UpdateStatistics(pGVM, pVM, idCpu);
2156 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2157 break;
2158
2159# if 1 && defined(DEBUG_bird)
2160 case VMMR0_DO_NEM_EXPERIMENT:
2161 if (pReqHdr)
2162 return VERR_INVALID_PARAMETER;
2163 rc = NEMR0DoExperiment(pGVM, pVM, idCpu, u64Arg);
2164 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
2165 break;
2166# endif
2167# endif
2168#endif
2169
2170 /*
2171 * For profiling.
2172 */
2173 case VMMR0_DO_NOP:
2174 case VMMR0_DO_SLOW_NOP:
2175 return VINF_SUCCESS;
2176
2177 /*
2178 * For testing Ring-0 APIs invoked in this environment.
2179 */
2180 case VMMR0_DO_TESTS:
2181 /** @todo make new test */
2182 return VINF_SUCCESS;
2183
2184 default:
2185 /*
2186 * We're returning VERR_NOT_SUPPORT here so we've got something else
2187 * than -1 which the interrupt gate glue code might return.
2188 */
2189 Log(("operation %#x is not supported\n", enmOperation));
2190 return VERR_NOT_SUPPORTED;
2191 }
2192 return rc;
2193}
2194
2195
2196/**
2197 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
2198 */
2199typedef struct VMMR0ENTRYEXARGS
2200{
2201 PGVM pGVM;
2202 PVMCC pVM;
2203 VMCPUID idCpu;
2204 VMMR0OPERATION enmOperation;
2205 PSUPVMMR0REQHDR pReq;
2206 uint64_t u64Arg;
2207 PSUPDRVSESSION pSession;
2208} VMMR0ENTRYEXARGS;
2209/** Pointer to a vmmR0EntryExWrapper argument package. */
2210typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
2211
2212/**
2213 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
2214 *
2215 * @returns VBox status code.
2216 * @param pvArgs The argument package
2217 */
2218static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
2219{
2220 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM,
2221 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
2222 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
2223 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
2224 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
2225 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
2226 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
2227}
2228
2229
2230/**
2231 * The Ring 0 entry point, called by the support library (SUP).
2232 *
2233 * @returns VBox status code.
2234 * @param pGVM The global (ring-0) VM structure.
2235 * @param pVM The cross context VM structure.
2236 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
2237 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
2238 * @param enmOperation Which operation to execute.
2239 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
2240 * @param u64Arg Some simple constant argument.
2241 * @param pSession The session of the caller.
2242 * @remarks Assume called with interrupts _enabled_.
2243 */
2244VMMR0DECL(int) VMMR0EntryEx(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation,
2245 PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
2246{
2247 /*
2248 * Requests that should only happen on the EMT thread will be
2249 * wrapped in a setjmp so we can assert without causing trouble.
2250 */
2251 if ( pVM != NULL
2252 && pGVM != NULL
2253 && idCpu < pGVM->cCpus
2254 && pVM->pSession == pSession
2255 && pVM->pSelf != NULL)
2256 {
2257 switch (enmOperation)
2258 {
2259 /* These might/will be called before VMMR3Init. */
2260 case VMMR0_DO_GMM_INITIAL_RESERVATION:
2261 case VMMR0_DO_GMM_UPDATE_RESERVATION:
2262 case VMMR0_DO_GMM_ALLOCATE_PAGES:
2263 case VMMR0_DO_GMM_FREE_PAGES:
2264 case VMMR0_DO_GMM_BALLOONED_PAGES:
2265 /* On the mac we might not have a valid jmp buf, so check these as well. */
2266 case VMMR0_DO_VMMR0_INIT:
2267 case VMMR0_DO_VMMR0_TERM:
2268 {
2269 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2270 PVMCPUCC pVCpu = pGVCpu;
2271 RTNATIVETHREAD hNativeThread = RTThreadNativeSelf();
2272 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread
2273 && pVCpu->hNativeThreadR0 == hNativeThread))
2274 {
2275 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
2276 break;
2277
2278 /** @todo validate this EMT claim... GVM knows. */
2279 VMMR0ENTRYEXARGS Args;
2280 Args.pGVM = pGVM;
2281 Args.pVM = pVM;
2282 Args.idCpu = idCpu;
2283 Args.enmOperation = enmOperation;
2284 Args.pReq = pReq;
2285 Args.u64Arg = u64Arg;
2286 Args.pSession = pSession;
2287 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
2288 }
2289 return VERR_VM_THREAD_NOT_EMT;
2290 }
2291
2292 default:
2293 break;
2294 }
2295 }
2296 return vmmR0EntryExWorker(pGVM, pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2297}
2298
2299
2300/**
2301 * Checks whether we've armed the ring-0 long jump machinery.
2302 *
2303 * @returns @c true / @c false
2304 * @param pVCpu The cross context virtual CPU structure.
2305 * @thread EMT
2306 * @sa VMMIsLongJumpArmed
2307 */
2308VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPUCC pVCpu)
2309{
2310#ifdef RT_ARCH_X86
2311 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2312 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2313#else
2314 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2315 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2316#endif
2317}
2318
2319
2320/**
2321 * Checks whether we've done a ring-3 long jump.
2322 *
2323 * @returns @c true / @c false
2324 * @param pVCpu The cross context virtual CPU structure.
2325 * @thread EMT
2326 */
2327VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPUCC pVCpu)
2328{
2329 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2330}
2331
2332
2333/**
2334 * Internal R0 logger worker: Flush logger.
2335 *
2336 * @param pLogger The logger instance to flush.
2337 * @remark This function must be exported!
2338 */
2339VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2340{
2341#ifdef LOG_ENABLED
2342 /*
2343 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2344 * (This is a bit paranoid code.)
2345 */
2346 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_UOFFSETOF(VMMR0LOGGER, Logger));
2347 if ( !VALID_PTR(pR0Logger)
2348 || !VALID_PTR(pR0Logger + 1)
2349 || pLogger->u32Magic != RTLOGGER_MAGIC)
2350 {
2351# ifdef DEBUG
2352 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2353# endif
2354 return;
2355 }
2356 if (pR0Logger->fFlushingDisabled)
2357 return; /* quietly */
2358
2359 PVMCC pVM = pR0Logger->pVM;
2360 if ( !VALID_PTR(pVM)
2361 || pVM->pSelf != pVM)
2362 {
2363# ifdef DEBUG
2364 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pSelf=%p! pLogger=%p\n", pVM, pVM->pSelf, pLogger);
2365# endif
2366 return;
2367 }
2368
2369 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2370 if (pVCpu)
2371 {
2372 /*
2373 * Check that the jump buffer is armed.
2374 */
2375# ifdef RT_ARCH_X86
2376 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2377 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2378# else
2379 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2380 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2381# endif
2382 {
2383# ifdef DEBUG
2384 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2385# endif
2386 return;
2387 }
2388 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2389 }
2390# ifdef DEBUG
2391 else
2392 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2393# endif
2394#else
2395 NOREF(pLogger);
2396#endif /* LOG_ENABLED */
2397}
2398
2399#ifdef LOG_ENABLED
2400
2401/**
2402 * Disables flushing of the ring-0 debug log.
2403 *
2404 * @param pVCpu The cross context virtual CPU structure.
2405 */
2406VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPUCC pVCpu)
2407{
2408 if (pVCpu->vmm.s.pR0LoggerR0)
2409 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2410 if (pVCpu->vmm.s.pR0RelLoggerR0)
2411 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = true;
2412}
2413
2414
2415/**
2416 * Enables flushing of the ring-0 debug log.
2417 *
2418 * @param pVCpu The cross context virtual CPU structure.
2419 */
2420VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPUCC pVCpu)
2421{
2422 if (pVCpu->vmm.s.pR0LoggerR0)
2423 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2424 if (pVCpu->vmm.s.pR0RelLoggerR0)
2425 pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled = false;
2426}
2427
2428
2429/**
2430 * Checks if log flushing is disabled or not.
2431 *
2432 * @param pVCpu The cross context virtual CPU structure.
2433 */
2434VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPUCC pVCpu)
2435{
2436 if (pVCpu->vmm.s.pR0LoggerR0)
2437 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2438 if (pVCpu->vmm.s.pR0RelLoggerR0)
2439 return pVCpu->vmm.s.pR0RelLoggerR0->fFlushingDisabled;
2440 return true;
2441}
2442
2443#endif /* LOG_ENABLED */
2444
2445/**
2446 * Override RTLogRelGetDefaultInstanceEx so we can do LogRel to VBox.log from EMTs in ring-0.
2447 */
2448DECLEXPORT(PRTLOGGER) RTLogRelGetDefaultInstanceEx(uint32_t fFlagsAndGroup)
2449{
2450 PGVMCPU pGVCpu = GVMMR0GetGVCpuByEMT(NIL_RTNATIVETHREAD);
2451 if (pGVCpu)
2452 {
2453 PVMCPUCC pVCpu = pGVCpu;
2454 if (RT_VALID_PTR(pVCpu))
2455 {
2456 PVMMR0LOGGER pVmmLogger = pVCpu->vmm.s.pR0RelLoggerR0;
2457 if (RT_VALID_PTR(pVmmLogger))
2458 {
2459 if ( pVmmLogger->fCreated
2460 && pVmmLogger->pVM == pGVCpu->pGVM)
2461 {
2462 if (pVmmLogger->Logger.fFlags & RTLOGFLAGS_DISABLED)
2463 return NULL;
2464 uint16_t const fFlags = RT_LO_U16(fFlagsAndGroup);
2465 uint16_t const iGroup = RT_HI_U16(fFlagsAndGroup);
2466 if ( iGroup != UINT16_MAX
2467 && ( ( pVmmLogger->Logger.afGroups[iGroup < pVmmLogger->Logger.cGroups ? iGroup : 0]
2468 & (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED))
2469 != (fFlags | (uint32_t)RTLOGGRPFLAGS_ENABLED)))
2470 return NULL;
2471 return &pVmmLogger->Logger;
2472 }
2473 }
2474 }
2475 }
2476 return SUPR0GetDefaultLogRelInstanceEx(fFlagsAndGroup);
2477}
2478
2479
2480/**
2481 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2482 *
2483 * @returns true if the breakpoint should be hit, false if it should be ignored.
2484 */
2485DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2486{
2487#if 0
2488 return true;
2489#else
2490 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2491 if (pVM)
2492 {
2493 PVMCPUCC pVCpu = VMMGetCpu(pVM);
2494
2495 if (pVCpu)
2496 {
2497#ifdef RT_ARCH_X86
2498 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2499 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2500#else
2501 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2502 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2503#endif
2504 {
2505 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2506 return RT_FAILURE_NP(rc);
2507 }
2508 }
2509 }
2510#ifdef RT_OS_LINUX
2511 return true;
2512#else
2513 return false;
2514#endif
2515#endif
2516}
2517
2518
2519/**
2520 * Override this so we can push it up to ring-3.
2521 *
2522 * @param pszExpr Expression. Can be NULL.
2523 * @param uLine Location line number.
2524 * @param pszFile Location file name.
2525 * @param pszFunction Location function name.
2526 */
2527DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2528{
2529 /*
2530 * To the log.
2531 */
2532 LogAlways(("\n!!R0-Assertion Failed!!\n"
2533 "Expression: %s\n"
2534 "Location : %s(%d) %s\n",
2535 pszExpr, pszFile, uLine, pszFunction));
2536
2537 /*
2538 * To the global VMM buffer.
2539 */
2540 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2541 if (pVM)
2542 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2543 "\n!!R0-Assertion Failed!!\n"
2544 "Expression: %.*s\n"
2545 "Location : %s(%d) %s\n",
2546 sizeof(pVM->vmm.s.szRing0AssertMsg1) / 4 * 3, pszExpr,
2547 pszFile, uLine, pszFunction);
2548
2549 /*
2550 * Continue the normal way.
2551 */
2552 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2553}
2554
2555
2556/**
2557 * Callback for RTLogFormatV which writes to the ring-3 log port.
2558 * See PFNLOGOUTPUT() for details.
2559 */
2560static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2561{
2562 for (size_t i = 0; i < cbChars; i++)
2563 {
2564 LogAlways(("%c", pachChars[i])); NOREF(pachChars);
2565 }
2566
2567 NOREF(pv);
2568 return cbChars;
2569}
2570
2571
2572/**
2573 * Override this so we can push it up to ring-3.
2574 *
2575 * @param pszFormat The format string.
2576 * @param va Arguments.
2577 */
2578DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2579{
2580 va_list vaCopy;
2581
2582 /*
2583 * Push the message to the loggers.
2584 */
2585 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2586 if (pLog)
2587 {
2588 va_copy(vaCopy, va);
2589 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2590 va_end(vaCopy);
2591 }
2592 pLog = RTLogRelGetDefaultInstance();
2593 if (pLog)
2594 {
2595 va_copy(vaCopy, va);
2596 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2597 va_end(vaCopy);
2598 }
2599
2600 /*
2601 * Push it to the global VMM buffer.
2602 */
2603 PVMCC pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2604 if (pVM)
2605 {
2606 va_copy(vaCopy, va);
2607 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2608 va_end(vaCopy);
2609 }
2610
2611 /*
2612 * Continue the normal way.
2613 */
2614 RTAssertMsg2V(pszFormat, va);
2615}
2616
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette