vbox的更動 80346 路徑 trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
- 時間撮記:
- 2019-8-19 下午07:36:29 (6 年 以前)
- svn:sync-xref-src-repo-rev:
- 132811
- 檔案:
-
- 修改 1 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r80334 r80346 103 103 } \ 104 104 } while (0) 105 # define VMM_CHECK_SMAP_CHECK2(a_p VM, a_BadExpr) \105 # define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) \ 106 106 do { \ 107 107 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \ … … 110 110 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \ 111 111 { /* likely */ } \ 112 else if (a_pGVM) \ 113 { \ 114 SUPR0BadContext((a_pGVM)->pSession, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \ 115 RTStrPrintf((a_pGVM)->vmm.s.szRing0AssertMsg1, sizeof((a_pGVM)->vmm.s.szRing0AssertMsg1), \ 116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \ 117 a_BadExpr; \ 118 } \ 112 119 else \ 113 120 { \ 114 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \ 115 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \ 116 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \ 121 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \ 117 122 a_BadExpr; \ 118 123 } \ … … 120 125 } while (0) 121 126 #else 122 # define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0123 # define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)124 # define VMM_CHECK_SMAP_CHECK2(a_p VM, a_BadExpr)NOREF(fKernelFeatures)127 # define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0 128 # define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures) 129 # define VMM_CHECK_SMAP_CHECK2(a_pGVM, a_BadExpr) NOREF(fKernelFeatures) 125 130 #endif 126 131 … … 361 366 * 362 367 * @param pGVM The global (ring-0) VM structure. 363 * @param pVM The cross context VM structure.364 368 * @param uSvnRev The SVN revision of the ring-3 part. 365 369 * @param uBuildType Build type indicator. 366 370 * @thread EMT(0) 367 371 */ 368 static int vmmR0InitVM(PGVM pGVM, PVMCC pVM,uint32_t uSvnRev, uint32_t uBuildType)372 static int vmmR0InitVM(PGVM pGVM, uint32_t uSvnRev, uint32_t uBuildType) 369 373 { 370 374 VMM_CHECK_SMAP_SETUP(); … … 387 391 } 388 392 389 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, 0 /*idCpu*/);393 int rc = GVMMR0ValidateGVMandEMT(pGVM, 0 /*idCpu*/); 390 394 if (RT_FAILURE(rc)) 391 395 return rc; … … 395 399 * Register the EMT R0 logger instance for VCPU 0. 396 400 */ 397 PVMCPUCC pVCpu = VMCC_GET_CPU_0(p VM);401 PVMCPUCC pVCpu = VMCC_GET_CPU_0(pGVM); 398 402 399 403 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0; … … 406 410 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags)); 407 411 408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);412 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 409 413 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance())); 410 RTLogSetDefaultInstanceThread(NULL, p VM->pSession);414 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession); 411 415 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance())); 412 416 … … 416 420 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n")); 417 421 418 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);422 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 419 423 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance())); 420 424 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n"); 421 425 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch)); 422 RTLogSetDefaultInstanceThread(NULL, p VM->pSession);426 RTLogSetDefaultInstanceThread(NULL, pGVM->pSession); 423 427 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance())); 424 428 … … 426 430 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch)); 427 431 428 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);432 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 429 433 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n"); 430 434 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch)); 431 435 # endif 432 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, p VM->pSession));433 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);436 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pGVM->pSession)); 437 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 434 438 pR0Logger->fRegistered = true; 435 439 } … … 439 443 * Check if the host supports high resolution timers or not. 440 444 */ 441 if ( p VM->vmm.s.fUsePeriodicPreemptionTimers445 if ( pGVM->vmm.s.fUsePeriodicPreemptionTimers 442 446 && !RTTimerCanDoHighResolution()) 443 p VM->vmm.s.fUsePeriodicPreemptionTimers = false;447 pGVM->vmm.s.fUsePeriodicPreemptionTimers = false; 444 448 445 449 /* 446 450 * Initialize the per VM data for GVMM and GMM. 447 451 */ 448 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);452 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 449 453 rc = GVMMR0InitVM(pGVM); 450 // if (RT_SUCCESS(rc))451 // rc = GMMR0InitPerVMData(pVM);452 454 if (RT_SUCCESS(rc)) 453 455 { … … 455 457 * Init HM, CPUM and PGM (Darwin only). 456 458 */ 457 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);458 rc = HMR0InitVM(p VM);459 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 460 rc = HMR0InitVM(pGVM); 459 461 if (RT_SUCCESS(rc)) 460 VMM_CHECK_SMAP_CHECK2(p VM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */462 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */ 461 463 if (RT_SUCCESS(rc)) 462 464 { 463 rc = CPUMR0InitVM(p VM);465 rc = CPUMR0InitVM(pGVM); 464 466 if (RT_SUCCESS(rc)) 465 467 { 466 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);468 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 467 469 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 468 rc = PGMR0DynMapInitVM(p VM);470 rc = PGMR0DynMapInitVM(pGVM); 469 471 #endif 470 472 if (RT_SUCCESS(rc)) 471 473 { 472 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);474 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 473 475 rc = EMR0InitVM(pGVM); 474 476 if (RT_SUCCESS(rc)) 475 477 { 476 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);478 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 477 479 #ifdef VBOX_WITH_PCI_PASSTHROUGH 478 rc = PciRawR0InitVM(pGVM , pVM);480 rc = PciRawR0InitVM(pGVM); 479 481 #endif 480 482 if (RT_SUCCESS(rc)) 481 483 { 482 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);483 rc = GIMR0InitVM(p VM);484 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 485 rc = GIMR0InitVM(pGVM); 484 486 if (RT_SUCCESS(rc)) 485 487 { 486 VMM_CHECK_SMAP_CHECK2(p VM, rc = VERR_VMM_RING0_ASSERTION);488 VMM_CHECK_SMAP_CHECK2(pGVM, rc = VERR_VMM_RING0_ASSERTION); 487 489 if (RT_SUCCESS(rc)) 488 490 { … … 492 494 * Collect a bit of info for the VM release log. 493 495 */ 494 p VM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty();495 p VM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();;496 497 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);496 pGVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty(); 497 pGVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();; 498 499 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 498 500 return rc; 499 501 } 500 502 501 503 /* bail out*/ 502 GIMR0TermVM(p VM);504 GIMR0TermVM(pGVM); 503 505 } 504 506 #ifdef VBOX_WITH_PCI_PASSTHROUGH 505 PciRawR0TermVM(pGVM , pVM);507 PciRawR0TermVM(pGVM); 506 508 #endif 507 509 } … … 509 511 } 510 512 } 511 HMR0TermVM(p VM);513 HMR0TermVM(pGVM); 512 514 } 513 515 } 514 516 515 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)p VM->pSession);517 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession); 516 518 return rc; 517 519 } … … 523 525 * @returns VBox status code. 524 526 * @param pGVM The ring-0 VM structure. 525 * @param pVM The cross context VM structure.526 527 * @param idCpu The EMT that's calling. 527 528 */ 528 static int vmmR0InitVMEmt(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)529 static int vmmR0InitVMEmt(PGVM pGVM, VMCPUID idCpu) 529 530 { 530 531 /* Paranoia (caller checked these already). */ … … 541 542 && !pR0Logger->fRegistered) 542 543 { 543 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)p VM->pSession);544 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pGVM->pSession); 544 545 pR0Logger->fRegistered = true; 545 546 } 546 547 #endif 547 RT_NOREF(pVM);548 548 549 549 return VINF_SUCCESS; … … 562 562 * 563 563 * @param pGVM The global (ring-0) VM structure. 564 * @param pVM The cross context VM structure.565 564 * @param idCpu Set to 0 if EMT(0) or NIL_VMCPUID if session cleanup 566 565 * thread. 567 566 * @thread EMT(0) or session clean up thread. 568 567 */ 569 VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, PVMCC pVM,VMCPUID idCpu)568 VMMR0_INT_DECL(int) VMMR0TermVM(PGVM pGVM, VMCPUID idCpu) 570 569 { 571 570 /* … … 575 574 { 576 575 AssertReturn(idCpu == 0, VERR_INVALID_CPU_ID); 577 int rc = GVMMR0ValidateGVMand VMandEMT(pGVM, pVM, idCpu);576 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 578 577 if (RT_FAILURE(rc)) 579 578 return rc; … … 581 580 582 581 #ifdef VBOX_WITH_PCI_PASSTHROUGH 583 PciRawR0TermVM(pGVM , pVM);582 PciRawR0TermVM(pGVM); 584 583 #endif 585 584 … … 589 588 if (GVMMR0DoingTermVM(pGVM)) 590 589 { 591 GIMR0TermVM(p VM);592 593 /** @todo I wish to call PGMR0PhysFlushHandyPages(p VM, &pVM->aCpus[idCpu])590 GIMR0TermVM(pGVM); 591 592 /** @todo I wish to call PGMR0PhysFlushHandyPages(pGVM, &pGVM->aCpus[idCpu]) 594 593 * here to make sure we don't leak any shared pages if we crash... */ 595 594 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 596 PGMR0DynMapTermVM(p VM);597 #endif 598 HMR0TermVM(p VM);595 PGMR0DynMapTermVM(pGVM); 596 #endif 597 HMR0TermVM(pGVM); 599 598 } 600 599 … … 602 601 * Deregister the logger. 603 602 */ 604 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)p VM->pSession);603 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pGVM->pSession); 605 604 return VINF_SUCCESS; 606 605 } … … 707 706 * @returns VINF_SUCCESS or VINF_EM_HALT. 708 707 * @param pGVM The ring-0 VM structure. 709 * @param pVM The cross context VM structure.710 708 * @param pGVCpu The ring-0 virtual CPU structure. 711 * @param pVCpu The cross context virtual CPU structure.712 709 * 713 710 * @todo r=bird: All the blocking/waiting and EMT managment should move out of … … 715 712 * parameters and statistics. 716 713 */ 717 static int vmmR0DoHalt(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, PVMCPUCC pVCpu) 718 { 719 Assert(pVCpu == pGVCpu); 720 714 static int vmmR0DoHalt(PGVM pGVM, PGVMCPU pGVCpu) 715 { 721 716 /* 722 717 * Do spin stat historization. 723 718 */ 724 if (++p VCpu->vmm.s.cR0Halts & 0xff)719 if (++pGVCpu->vmm.s.cR0Halts & 0xff) 725 720 { /* likely */ } 726 else if (p VCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3)727 { 728 p VCpu->vmm.s.cR0HaltsSucceeded = 2;729 p VCpu->vmm.s.cR0HaltsToRing3 = 0;721 else if (pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3) 722 { 723 pGVCpu->vmm.s.cR0HaltsSucceeded = 2; 724 pGVCpu->vmm.s.cR0HaltsToRing3 = 0; 730 725 } 731 726 else 732 727 { 733 p VCpu->vmm.s.cR0HaltsSucceeded = 0;734 p VCpu->vmm.s.cR0HaltsToRing3 = 2;728 pGVCpu->vmm.s.cR0HaltsSucceeded = 0; 729 pGVCpu->vmm.s.cR0HaltsToRing3 = 2; 735 730 } 736 731 … … 750 745 * Check preconditions. 751 746 */ 752 unsigned const uMWait = EMMonitorWaitIsActive(p VCpu);753 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(p VCpu);754 if ( p VCpu->vmm.s.fMayHaltInRing0755 && !TRPMHasTrap(p VCpu)747 unsigned const uMWait = EMMonitorWaitIsActive(pGVCpu); 748 CPUMINTERRUPTIBILITY const enmInterruptibility = CPUMGetGuestInterruptibility(pGVCpu); 749 if ( pGVCpu->vmm.s.fMayHaltInRing0 750 && !TRPMHasTrap(pGVCpu) 756 751 && ( enmInterruptibility == CPUMINTERRUPTIBILITY_UNRESTRAINED 757 752 || uMWait > 1)) 758 753 { 759 if ( !VM_FF_IS_ANY_SET(p VM, fVmFFs)760 && !VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))754 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs) 755 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 761 756 { 762 757 /* 763 758 * Interrupts pending already? 764 759 */ 765 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))766 APICUpdatePendingInterrupts(p VCpu);760 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 761 APICUpdatePendingInterrupts(pGVCpu); 767 762 768 763 /* … … 772 767 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT; 773 768 774 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))775 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);769 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 770 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 776 771 ASMNopPause(); 777 772 … … 780 775 */ 781 776 uint64_t u64Delta; 782 uint64_t u64GipTime = TMTimerPollGIP(p VM, pVCpu, &u64Delta);783 784 if ( !VM_FF_IS_ANY_SET(p VM, fVmFFs)785 && !VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))777 uint64_t u64GipTime = TMTimerPollGIP(pGVM, pGVCpu, &u64Delta); 778 779 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs) 780 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 786 781 { 787 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))788 APICUpdatePendingInterrupts(p VCpu);789 790 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))791 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);782 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 783 APICUpdatePendingInterrupts(pGVCpu); 784 785 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 786 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 792 787 793 788 /* 794 789 * Wait if there is enough time to the next timer event. 795 790 */ 796 if (u64Delta >= p VCpu->vmm.s.cNsSpinBlockThreshold)791 if (u64Delta >= pGVCpu->vmm.s.cNsSpinBlockThreshold) 797 792 { 798 793 /* If there are few other CPU cores around, we will procrastinate a … … 801 796 dynamically adjust the spin count according to its usfulness or 802 797 something... */ 803 if ( p VCpu->vmm.s.cR0HaltsSucceeded > pVCpu->vmm.s.cR0HaltsToRing3798 if ( pGVCpu->vmm.s.cR0HaltsSucceeded > pGVCpu->vmm.s.cR0HaltsToRing3 804 799 && RTMpGetOnlineCount() >= 4) 805 800 { … … 810 805 { 811 806 ASMNopPause(); 812 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))813 APICUpdatePendingInterrupts(p VCpu);807 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 808 APICUpdatePendingInterrupts(pGVCpu); 814 809 ASMNopPause(); 815 if (VM_FF_IS_ANY_SET(p VM, fVmFFs))810 if (VM_FF_IS_ANY_SET(pGVM, fVmFFs)) 816 811 { 817 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltToR3FromSpin);812 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin); 818 813 return VINF_EM_HALT; 819 814 } 820 815 ASMNopPause(); 821 if (VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))816 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 822 817 { 823 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltToR3FromSpin);818 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltToR3FromSpin); 824 819 return VINF_EM_HALT; 825 820 } 826 821 ASMNopPause(); 827 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))822 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 828 823 { 829 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltExecFromSpin);830 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);824 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromSpin); 825 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 831 826 } 832 827 ASMNopPause(); … … 836 831 /* Block. We have to set the state to VMCPUSTATE_STARTED_HALTED here so ring-3 837 832 knows when to notify us (cannot access VMINTUSERPERVMCPU::fWait from here). */ 838 VMCPU_CMPXCHG_STATE(p VCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED);833 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED_HALTED, VMCPUSTATE_STARTED); 839 834 uint64_t const u64StartSchedHalt = RTTimeNanoTS(); 840 int rc = GVMMR0SchedHalt(pGVM, p VM, pGVCpu, u64GipTime);835 int rc = GVMMR0SchedHalt(pGVM, pGVCpu, u64GipTime); 841 836 uint64_t const u64EndSchedHalt = RTTimeNanoTS(); 842 837 uint64_t const cNsElapsedSchedHalt = u64EndSchedHalt - u64StartSchedHalt; 843 VMCPU_CMPXCHG_STATE(p VCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED);844 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt);838 VMCPU_CMPXCHG_STATE(pGVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_HALTED); 839 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlock, cNsElapsedSchedHalt); 845 840 if ( rc == VINF_SUCCESS 846 841 || rc == VERR_INTERRUPTED) … … 850 845 int64_t const cNsOverslept = u64EndSchedHalt - u64GipTime; 851 846 if (cNsOverslept > 50000) 852 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept);847 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOverslept, cNsOverslept); 853 848 else if (cNsOverslept < -50000) 854 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt);849 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockInsomnia, cNsElapsedSchedHalt); 855 850 else 856 STAM_REL_PROFILE_ADD_PERIOD(&p VCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt);851 STAM_REL_PROFILE_ADD_PERIOD(&pGVCpu->vmm.s.StatR0HaltBlockOnTime, cNsElapsedSchedHalt); 857 852 858 853 /* 859 854 * Recheck whether we can resume execution or have to go to ring-3. 860 855 */ 861 if ( !VM_FF_IS_ANY_SET(p VM, fVmFFs)862 && !VMCPU_FF_IS_ANY_SET(p VCpu, fCpuFFs))856 if ( !VM_FF_IS_ANY_SET(pGVM, fVmFFs) 857 && !VMCPU_FF_IS_ANY_SET(pGVCpu, fCpuFFs)) 863 858 { 864 if (VMCPU_FF_TEST_AND_CLEAR(p VCpu, VMCPU_FF_UPDATE_APIC))865 APICUpdatePendingInterrupts(p VCpu);866 if (VMCPU_FF_IS_ANY_SET(p VCpu, fIntMask))859 if (VMCPU_FF_TEST_AND_CLEAR(pGVCpu, VMCPU_FF_UPDATE_APIC)) 860 APICUpdatePendingInterrupts(pGVCpu); 861 if (VMCPU_FF_IS_ANY_SET(pGVCpu, fIntMask)) 867 862 { 868 STAM_REL_COUNTER_INC(&p VCpu->vmm.s.StatR0HaltExecFromBlock);869 return vmmR0DoHaltInterrupt(p VCpu, uMWait, enmInterruptibility);863 STAM_REL_COUNTER_INC(&pGVCpu->vmm.s.StatR0HaltExecFromBlock); 864 return vmmR0DoHaltInterrupt(pGVCpu, uMWait, enmInterruptibility); 870 865 } 871 866 } … … 1068 1063 /** 1069 1064 * Record return code statistics 1070 * @param p VMThe cross context VM structure.1065 * @param pGVM The cross context VM structure. 1071 1066 * @param pVCpu The cross context virtual CPU structure. 1072 1067 * @param rc The status code. … … 1273 1268 * 1274 1269 * @param pGVM The global (ring-0) VM structure. 1275 * @param pVM The cross context VM structure.1276 * The return code isstored in pVM->vmm.s.iLastGZRc.1270 * @param pVMIgnored The cross context VM structure. The return code is 1271 * stored in pVM->vmm.s.iLastGZRc. 1277 1272 * @param idCpu The Virtual CPU ID of the calling EMT. 1278 1273 * @param enmOperation Which operation to execute. 1279 1274 * @remarks Assume called with interrupts _enabled_. 1280 1275 */ 1281 VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation) 1282 { 1276 VMMR0DECL(void) VMMR0EntryFast(PGVM pGVM, PVMCC pVMIgnored, VMCPUID idCpu, VMMR0OPERATION enmOperation) 1277 { 1278 RT_NOREF(pVMIgnored); 1279 1283 1280 /* 1284 1281 * Validation. 1285 1282 */ 1286 1283 if ( idCpu < pGVM->cCpus 1287 && pGVM->cCpus == p VM->cCpus)1284 && pGVM->cCpus == pGVM->cCpusUnsafe) 1288 1285 { /*likely*/ } 1289 1286 else 1290 1287 { 1291 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x /%#x\n", idCpu, pGVM->cCpus, pVM->cCpus);1288 SUPR0Printf("VMMR0EntryFast: Bad idCpu=%#x cCpus=%#x cCpusUnsafe=%#x\n", idCpu, pGVM->cCpus, pGVM->cCpusUnsafe); 1292 1289 return; 1293 1290 } 1294 1291 1295 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 1296 PVMCPUCC pVCpu = pGVCpu; 1292 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu]; 1297 1293 RTNATIVETHREAD const hNativeThread = RTThreadNativeSelf(); 1298 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread1299 && p VCpu->hNativeThreadR0 == hNativeThread))1294 if (RT_LIKELY( pGVCpu->hEMT == hNativeThread 1295 && pGVCpu->hNativeThreadR0 == hNativeThread)) 1300 1296 { /* likely */ } 1301 1297 else 1302 1298 { 1303 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p p VCpu->hNativeThreadR0=%p\n",1304 idCpu, hNativeThread, pGVCpu->hEMT, p VCpu->hNativeThreadR0);1299 SUPR0Printf("VMMR0EntryFast: Bad thread idCpu=%#x hNativeSelf=%p pGVCpu->hEmt=%p pGVCpu->hNativeThreadR0=%p\n", 1300 idCpu, hNativeThread, pGVCpu->hEMT, pGVCpu->hNativeThreadR0); 1305 1301 return; 1306 1302 } … … 1310 1306 */ 1311 1307 VMM_CHECK_SMAP_SETUP(); 1312 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1308 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1313 1309 1314 1310 /* … … 1327 1323 * Disable preemption. 1328 1324 */ 1329 Assert(!vmmR0ThreadCtxHookIsEnabled(p VCpu));1325 Assert(!vmmR0ThreadCtxHookIsEnabled(pGVCpu)); 1330 1326 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1331 1327 RTThreadPreemptDisable(&PreemptState); … … 1340 1336 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet))) 1341 1337 { 1342 p VCpu->iHostCpuSet = iHostCpuSet;1343 ASMAtomicWriteU32(&p VCpu->idHostCpu, idHostCpu);1338 pGVCpu->iHostCpuSet = iHostCpuSet; 1339 ASMAtomicWriteU32(&pGVCpu->idHostCpu, idHostCpu); 1344 1340 1345 1341 /* 1346 1342 * Update the periodic preemption timer if it's active. 1347 1343 */ 1348 if (p VM->vmm.s.fUsePeriodicPreemptionTimers)1349 GVMMR0SchedUpdatePeriodicPreemptionTimer(p VM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));1350 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1344 if (pGVM->vmm.s.fUsePeriodicPreemptionTimers) 1345 GVMMR0SchedUpdatePeriodicPreemptionTimer(pGVM, pGVCpu->idHostCpu, TMCalcHostTimerFrequency(pGVM, pGVCpu)); 1346 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1351 1347 1352 1348 #ifdef VMM_R0_TOUCH_FPU … … 1365 1361 * Enable the context switching hook. 1366 1362 */ 1367 if (p VCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)1363 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1368 1364 { 1369 Assert(!RTThreadCtxHookIsEnabled(p VCpu->vmm.s.hCtxHook));1370 int rc2 = RTThreadCtxHookEnable(p VCpu->vmm.s.hCtxHook); AssertRC(rc2);1365 Assert(!RTThreadCtxHookIsEnabled(pGVCpu->vmm.s.hCtxHook)); 1366 int rc2 = RTThreadCtxHookEnable(pGVCpu->vmm.s.hCtxHook); AssertRC(rc2); 1371 1367 } 1372 1368 … … 1374 1370 * Enter HM context. 1375 1371 */ 1376 rc = HMR0Enter(p VCpu);1372 rc = HMR0Enter(pGVCpu); 1377 1373 if (RT_SUCCESS(rc)) 1378 1374 { 1379 VMCPU_SET_STATE(p VCpu, VMCPUSTATE_STARTED_HM);1375 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED_HM); 1380 1376 1381 1377 /* … … 1383 1379 * we're in HM context. 1384 1380 */ 1385 if (vmmR0ThreadCtxHookIsEnabled(p VCpu))1381 if (vmmR0ThreadCtxHookIsEnabled(pGVCpu)) 1386 1382 { 1387 1383 fPreemptRestored = true; … … 1392 1388 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode). 1393 1389 */ 1394 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1395 rc = vmmR0CallRing3SetJmp(&p VCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);1396 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1390 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1391 rc = vmmR0CallRing3SetJmp(&pGVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pGVM, pGVCpu); 1392 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1397 1393 1398 1394 /* … … 1400 1396 * assertions are going to panic the host since we're outside the setjmp/longjmp zone. 1401 1397 */ 1402 if (RT_UNLIKELY( VMCPU_GET_STATE(p VCpu) != VMCPUSTATE_STARTED_HM1398 if (RT_UNLIKELY( VMCPU_GET_STATE(pGVCpu) != VMCPUSTATE_STARTED_HM 1403 1399 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST )) 1404 1400 { 1405 p VM->vmm.s.szRing0AssertMsg1[0] = '\0';1406 RTStrPrintf(p VM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),1407 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(p VCpu), VMCPUSTATE_STARTED_HM);1401 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1402 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2), 1403 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pGVCpu), VMCPUSTATE_STARTED_HM); 1408 1404 rc = VERR_VMM_WRONG_HM_VMCPU_STATE; 1409 1405 } 1410 1406 /** @todo Get rid of this. HM shouldn't disable the context hook. */ 1411 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(p VCpu)))1407 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pGVCpu))) 1412 1408 { 1413 p VM->vmm.s.szRing0AssertMsg1[0] = '\0';1414 RTStrPrintf(p VM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),1415 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", p VCpu, pVCpu->idCpu, rc);1409 pGVM->vmm.s.szRing0AssertMsg1[0] = '\0'; 1410 RTStrPrintf(pGVM->vmm.s.szRing0AssertMsg2, sizeof(pGVM->vmm.s.szRing0AssertMsg2), 1411 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pGVCpu, pGVCpu->idCpu, rc); 1416 1412 rc = VERR_INVALID_STATE; 1417 1413 } 1418 1414 1419 VMCPU_SET_STATE(p VCpu, VMCPUSTATE_STARTED);1415 VMCPU_SET_STATE(pGVCpu, VMCPUSTATE_STARTED); 1420 1416 } 1421 STAM_COUNTER_INC(&p VM->vmm.s.StatRunGC);1417 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC); 1422 1418 1423 1419 /* … … 1425 1421 * hook / restore preemption. 1426 1422 */ 1427 p VCpu->iHostCpuSet = UINT32_MAX;1428 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1423 pGVCpu->iHostCpuSet = UINT32_MAX; 1424 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1429 1425 1430 1426 /* … … 1435 1431 * when we get here, but the IPRT API handles that. 1436 1432 */ 1437 if (p VCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)1433 if (pGVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK) 1438 1434 { 1439 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1440 RTThreadCtxHookDisable(p VCpu->vmm.s.hCtxHook);1435 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1436 RTThreadCtxHookDisable(pGVCpu->vmm.s.hCtxHook); 1441 1437 } 1442 1438 } … … 1447 1443 { 1448 1444 rc = VINF_EM_RAW_INTERRUPT; 1449 p VCpu->iHostCpuSet = UINT32_MAX;1450 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1445 pGVCpu->iHostCpuSet = UINT32_MAX; 1446 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1451 1447 } 1452 1448 … … 1456 1452 RTThreadPreemptRestore(&PreemptState); 1457 1453 1458 p VCpu->vmm.s.iLastGZRc = rc;1454 pGVCpu->vmm.s.iLastGZRc = rc; 1459 1455 1460 1456 /* Fire dtrace probe and collect statistics. */ 1461 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(p VCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);1457 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc); 1462 1458 #ifdef VBOX_WITH_STATISTICS 1463 vmmR0RecordRC(p VM, pVCpu, rc);1459 vmmR0RecordRC(pGVM, pGVCpu, rc); 1464 1460 #endif 1465 1461 #if 1 … … 1471 1467 else 1472 1468 { 1473 p VCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pVM, pGVCpu, pVCpu);1469 pGVCpu->vmm.s.iLastGZRc = rc = vmmR0DoHalt(pGVM, pGVCpu); 1474 1470 if (rc == VINF_SUCCESS) 1475 1471 { 1476 p VCpu->vmm.s.cR0HaltsSucceeded++;1472 pGVCpu->vmm.s.cR0HaltsSucceeded++; 1477 1473 continue; 1478 1474 } 1479 p VCpu->vmm.s.cR0HaltsToRing3++;1475 pGVCpu->vmm.s.cR0HaltsToRing3++; 1480 1476 } 1481 1477 #endif … … 1486 1482 else 1487 1483 { 1488 p VCpu->iHostCpuSet = UINT32_MAX;1489 ASMAtomicWriteU32(&p VCpu->idHostCpu, NIL_RTCPUID);1484 pGVCpu->iHostCpuSet = UINT32_MAX; 1485 ASMAtomicWriteU32(&pGVCpu->idHostCpu, NIL_RTCPUID); 1490 1486 RTThreadPreemptRestore(&PreemptState); 1491 1487 if (iHostCpuSet < RTCPUSET_MAX_CPUS) 1492 1488 { 1493 int rc = SUPR0TscDeltaMeasureBySetIndex(p VM->pSession, iHostCpuSet, 0 /*fFlags*/,1489 int rc = SUPR0TscDeltaMeasureBySetIndex(pGVM->pSession, iHostCpuSet, 0 /*fFlags*/, 1494 1490 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/, 1495 1491 0 /*default cTries*/); 1496 1492 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE) 1497 p VCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;1493 pGVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3; 1498 1494 else 1499 p VCpu->vmm.s.iLastGZRc = rc;1495 pGVCpu->vmm.s.iLastGZRc = rc; 1500 1496 } 1501 1497 else 1502 p VCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;1498 pGVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX; 1503 1499 } 1504 1500 break; … … 1515 1511 * Setup the longjmp machinery and execute guest code (calls NEMR0RunGuestCode). 1516 1512 */ 1517 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1518 int rc = vmmR0CallRing3SetJmp2(&pVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu); 1519 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 1520 STAM_COUNTER_INC(&pVM->vmm.s.StatRunGC); 1521 1522 pVCpu->vmm.s.iLastGZRc = rc; 1513 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1514 # ifdef VBOXSTRICTRC_STRICT_ENABLED 1515 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, (PFNVMMR0SETJMP2)NEMR0RunGuestCode, pGVM, idCpu); 1516 # else 1517 int rc = vmmR0CallRing3SetJmp2(&pGVCpu->vmm.s.CallRing3JmpBufR0, NEMR0RunGuestCode, pGVM, idCpu); 1518 # endif 1519 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1520 STAM_COUNTER_INC(&pGVM->vmm.s.StatRunGC); 1521 1522 pGVCpu->vmm.s.iLastGZRc = rc; 1523 1523 1524 1524 /* 1525 1525 * Fire dtrace probe and collect statistics. 1526 1526 */ 1527 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(p VCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);1527 VBOXVMM_R0_VMM_RETURN_TO_RING3_NEM(pGVCpu, CPUMQueryGuestCtxPtr(pGVCpu), rc); 1528 1528 # ifdef VBOX_WITH_STATISTICS 1529 vmmR0RecordRC(p VM, pVCpu, rc);1529 vmmR0RecordRC(pGVM, pGVCpu, rc); 1530 1530 # endif 1531 1531 break; … … 1538 1538 */ 1539 1539 case VMMR0_DO_NOP: 1540 p VCpu->vmm.s.iLastGZRc = VINF_SUCCESS;1540 pGVCpu->vmm.s.iLastGZRc = VINF_SUCCESS; 1541 1541 break; 1542 1542 … … 1546 1546 default: 1547 1547 AssertMsgFailed(("%#x\n", enmOperation)); 1548 p VCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;1549 break; 1550 } 1551 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1548 pGVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED; 1549 break; 1550 } 1551 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1552 1552 } 1553 1553 … … 1557 1557 * 1558 1558 * @returns true / false accordingly. 1559 * @param p VM The cross contextVM structure.1559 * @param pGVM The global (ring-0) VM structure. 1560 1560 * @param pClaimedSession The session claim to validate. 1561 1561 * @param pSession The session argument. 1562 1562 */ 1563 DECLINLINE(bool) vmmR0IsValidSession(P VMCC pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)1563 DECLINLINE(bool) vmmR0IsValidSession(PGVM pGVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession) 1564 1564 { 1565 1565 /* This must be set! */ … … 1568 1568 1569 1569 /* Only one out of the two. */ 1570 if (p VM && pClaimedSession)1570 if (pGVM && pClaimedSession) 1571 1571 return false; 1572 if (p VM)1573 pClaimedSession = p VM->pSession;1572 if (pGVM) 1573 pClaimedSession = pGVM->pSession; 1574 1574 return pClaimedSession == pSession; 1575 1575 } … … 1582 1582 * @returns VBox status code. 1583 1583 * @param pGVM The global (ring-0) VM structure. 1584 * @param pVM The cross context VM structure.1585 1584 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM 1586 1585 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't … … 1593 1592 * @remarks Assume called with interrupts _enabled_. 1594 1593 */ 1595 static int vmmR0EntryExWorker(PGVM pGVM, PVMCC pVM,VMCPUID idCpu, VMMR0OPERATION enmOperation,1594 static int vmmR0EntryExWorker(PGVM pGVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, 1596 1595 PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession) 1597 1596 { 1598 1597 /* 1599 * Validate pGVM, pVM and idCpu for consistency and validity. 1600 */ 1601 if ( pGVM != NULL 1602 || pVM != NULL) 1603 { 1604 if (RT_LIKELY( RT_VALID_PTR(pGVM) 1605 && RT_VALID_PTR(pVM) 1606 && ((uintptr_t)pVM & PAGE_OFFSET_MASK) == 0)) 1598 * Validate pGVM and idCpu for consistency and validity. 1599 */ 1600 if (pGVM != NULL) 1601 { 1602 if (RT_LIKELY(((uintptr_t)pGVM & PAGE_OFFSET_MASK) == 0)) 1607 1603 { /* likely */ } 1608 1604 else 1609 1605 { 1610 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p and/or pVM=%p! (op=%d)\n", pGVM, pVM, enmOperation);1606 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p! (op=%d)\n", pGVM, enmOperation); 1611 1607 return VERR_INVALID_POINTER; 1612 }1613 1614 if (RT_LIKELY(pGVM == pVM))1615 { /* likely */ }1616 else1617 {1618 SUPR0Printf("vmmR0EntryExWorker: pVM mismatch: got %p, pGVM/pVM=%p\n", pVM, pGVM);1619 return VERR_INVALID_PARAMETER;1620 1608 } 1621 1609 … … 1628 1616 } 1629 1617 1630 if (RT_LIKELY( pVM->enmVMState >= VMSTATE_CREATING 1631 && pVM->enmVMState <= VMSTATE_TERMINATED 1632 && pVM->cCpus == pGVM->cCpus 1633 && pVM->pSession == pSession 1634 && pVM->pSelf == pVM)) 1618 if (RT_LIKELY( pGVM->enmVMState >= VMSTATE_CREATING 1619 && pGVM->enmVMState <= VMSTATE_TERMINATED 1620 && pGVM->pSession == pSession 1621 && pGVM->pSelf == pGVM)) 1635 1622 { /* likely */ } 1636 1623 else 1637 1624 { 1638 SUPR0Printf("vmmR0EntryExWorker: Invalid p VM=%p:{.enmVMState=%d, .cCpus=%#x(==%#x), .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n",1639 p VM, pVM->enmVMState, pVM->cCpus, pGVM->cCpus, pVM->pSession, pSession, pVM->pSelf, pVM, enmOperation);1625 SUPR0Printf("vmmR0EntryExWorker: Invalid pGVM=%p:{.enmVMState=%d, .cCpus=%#x, .pSession=%p(==%p), .pSelf=%p(==%p)}! (op=%d)\n", 1626 pGVM, pGVM->enmVMState, pGVM->cCpus, pGVM->pSession, pSession, pGVM->pSelf, pGVM, enmOperation); 1640 1627 return VERR_INVALID_POINTER; 1641 1628 } … … 1665 1652 */ 1666 1653 case VMMR0_DO_GVMM_CREATE_VM: 1667 if (pGVM == NULL && pVM == NULL &&u64Arg == 0 && idCpu == NIL_VMCPUID)1654 if (pGVM == NULL && u64Arg == 0 && idCpu == NIL_VMCPUID) 1668 1655 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr, pSession); 1669 1656 else … … 1674 1661 case VMMR0_DO_GVMM_DESTROY_VM: 1675 1662 if (pReqHdr == NULL && u64Arg == 0) 1676 rc = GVMMR0DestroyVM(pGVM , pVM);1663 rc = GVMMR0DestroyVM(pGVM); 1677 1664 else 1678 1665 rc = VERR_INVALID_PARAMETER; … … 1681 1668 1682 1669 case VMMR0_DO_GVMM_REGISTER_VMCPU: 1683 if (pGVM != NULL && pVM != NULL)1684 rc = GVMMR0RegisterVCpu(pGVM, pVM,idCpu);1670 if (pGVM != NULL) 1671 rc = GVMMR0RegisterVCpu(pGVM, idCpu); 1685 1672 else 1686 1673 rc = VERR_INVALID_PARAMETER; 1687 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1674 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1688 1675 break; 1689 1676 1690 1677 case VMMR0_DO_GVMM_DEREGISTER_VMCPU: 1691 if (pGVM != NULL && pVM != NULL)1692 rc = GVMMR0DeregisterVCpu(pGVM, pVM,idCpu);1678 if (pGVM != NULL) 1679 rc = GVMMR0DeregisterVCpu(pGVM, idCpu); 1693 1680 else 1694 1681 rc = VERR_INVALID_PARAMETER; 1695 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1682 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1696 1683 break; 1697 1684 … … 1699 1686 if (pReqHdr) 1700 1687 return VERR_INVALID_PARAMETER; 1701 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1702 rc = GVMMR0SchedHaltReq(pGVM, pVM,idCpu, u64Arg);1703 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1688 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1689 rc = GVMMR0SchedHaltReq(pGVM, idCpu, u64Arg); 1690 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1704 1691 break; 1705 1692 … … 1707 1694 if (pReqHdr || u64Arg) 1708 1695 return VERR_INVALID_PARAMETER; 1709 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1710 rc = GVMMR0SchedWakeUp(pGVM, pVM,idCpu);1711 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1696 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1697 rc = GVMMR0SchedWakeUp(pGVM, idCpu); 1698 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1712 1699 break; 1713 1700 … … 1715 1702 if (pReqHdr || u64Arg) 1716 1703 return VERR_INVALID_PARAMETER; 1717 rc = GVMMR0SchedPoke(pGVM, pVM,idCpu);1718 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1704 rc = GVMMR0SchedPoke(pGVM, idCpu); 1705 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1719 1706 break; 1720 1707 … … 1722 1709 if (u64Arg) 1723 1710 return VERR_INVALID_PARAMETER; 1724 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, pVM,(PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);1725 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1711 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pGVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr); 1712 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1726 1713 break; 1727 1714 … … 1729 1716 if (pReqHdr || u64Arg > 1) 1730 1717 return VERR_INVALID_PARAMETER; 1731 rc = GVMMR0SchedPoll(pGVM, pVM,idCpu, !!u64Arg);1732 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1718 rc = GVMMR0SchedPoll(pGVM, idCpu, !!u64Arg); 1719 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1733 1720 break; 1734 1721 … … 1736 1723 if (u64Arg) 1737 1724 return VERR_INVALID_PARAMETER; 1738 rc = GVMMR0QueryStatisticsReq(pGVM, pVM,(PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession);1739 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1725 rc = GVMMR0QueryStatisticsReq(pGVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr, pSession); 1726 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1740 1727 break; 1741 1728 … … 1743 1730 if (u64Arg) 1744 1731 return VERR_INVALID_PARAMETER; 1745 rc = GVMMR0ResetStatisticsReq(pGVM, pVM,(PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession);1746 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1732 rc = GVMMR0ResetStatisticsReq(pGVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr, pSession); 1733 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1747 1734 break; 1748 1735 … … 1751 1738 */ 1752 1739 case VMMR0_DO_VMMR0_INIT: 1753 rc = vmmR0InitVM(pGVM, pVM,RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));1754 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1740 rc = vmmR0InitVM(pGVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg)); 1741 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1755 1742 break; 1756 1743 … … 1759 1746 */ 1760 1747 case VMMR0_DO_VMMR0_INIT_EMT: 1761 rc = vmmR0InitVMEmt(pGVM, pVM,idCpu);1762 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1748 rc = vmmR0InitVMEmt(pGVM, idCpu); 1749 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1763 1750 break; 1764 1751 … … 1767 1754 */ 1768 1755 case VMMR0_DO_VMMR0_TERM: 1769 rc = VMMR0TermVM(pGVM, pVM,0 /*idCpu*/);1770 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1756 rc = VMMR0TermVM(pGVM, 0 /*idCpu*/); 1757 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1771 1758 break; 1772 1759 … … 1775 1762 */ 1776 1763 case VMMR0_DO_HM_ENABLE: 1777 rc = HMR0EnableAllCpus(p VM);1778 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1764 rc = HMR0EnableAllCpus(pGVM); 1765 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1779 1766 break; 1780 1767 … … 1783 1770 */ 1784 1771 case VMMR0_DO_HM_SETUP_VM: 1785 rc = HMR0SetupVM(p VM);1786 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1772 rc = HMR0SetupVM(pGVM); 1773 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1787 1774 break; 1788 1775 … … 1793 1780 if (idCpu == NIL_VMCPUID) 1794 1781 return VERR_INVALID_CPU_ID; 1795 rc = PGMR0PhysAllocateHandyPages(pGVM, pVM,idCpu);1796 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1782 rc = PGMR0PhysAllocateHandyPages(pGVM, idCpu); 1783 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1797 1784 break; 1798 1785 … … 1800 1787 if (idCpu == NIL_VMCPUID) 1801 1788 return VERR_INVALID_CPU_ID; 1802 rc = PGMR0PhysFlushHandyPages(pGVM, pVM,idCpu);1803 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1789 rc = PGMR0PhysFlushHandyPages(pGVM, idCpu); 1790 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1804 1791 break; 1805 1792 … … 1807 1794 if (idCpu == NIL_VMCPUID) 1808 1795 return VERR_INVALID_CPU_ID; 1809 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, pVM,idCpu);1810 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1796 rc = PGMR0PhysAllocateLargeHandyPage(pGVM, idCpu); 1797 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1811 1798 break; 1812 1799 … … 1814 1801 if (idCpu != 0) 1815 1802 return VERR_INVALID_CPU_ID; 1816 rc = PGMR0PhysSetupIoMmu(pGVM , pVM);1817 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1803 rc = PGMR0PhysSetupIoMmu(pGVM); 1804 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1818 1805 break; 1819 1806 … … 1824 1811 if (u64Arg) 1825 1812 return VERR_INVALID_PARAMETER; 1826 rc = GMMR0InitialReservationReq(pGVM, pVM,idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);1827 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1813 rc = GMMR0InitialReservationReq(pGVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr); 1814 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1828 1815 break; 1829 1816 … … 1831 1818 if (u64Arg) 1832 1819 return VERR_INVALID_PARAMETER; 1833 rc = GMMR0UpdateReservationReq(pGVM, pVM,idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);1834 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1820 rc = GMMR0UpdateReservationReq(pGVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr); 1821 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1835 1822 break; 1836 1823 … … 1838 1825 if (u64Arg) 1839 1826 return VERR_INVALID_PARAMETER; 1840 rc = GMMR0AllocatePagesReq(pGVM, pVM,idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);1841 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1827 rc = GMMR0AllocatePagesReq(pGVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr); 1828 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1842 1829 break; 1843 1830 … … 1845 1832 if (u64Arg) 1846 1833 return VERR_INVALID_PARAMETER; 1847 rc = GMMR0FreePagesReq(pGVM, pVM,idCpu, (PGMMFREEPAGESREQ)pReqHdr);1848 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1834 rc = GMMR0FreePagesReq(pGVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr); 1835 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1849 1836 break; 1850 1837 … … 1852 1839 if (u64Arg) 1853 1840 return VERR_INVALID_PARAMETER; 1854 rc = GMMR0FreeLargePageReq(pGVM, pVM,idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);1855 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1841 rc = GMMR0FreeLargePageReq(pGVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr); 1842 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1856 1843 break; 1857 1844 … … 1860 1847 return VERR_INVALID_PARAMETER; 1861 1848 rc = GMMR0QueryHypervisorMemoryStatsReq((PGMMMEMSTATSREQ)pReqHdr); 1862 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1849 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1863 1850 break; 1864 1851 … … 1868 1855 if (u64Arg) 1869 1856 return VERR_INVALID_PARAMETER; 1870 rc = GMMR0QueryMemoryStatsReq(pGVM, pVM,idCpu, (PGMMMEMSTATSREQ)pReqHdr);1871 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1857 rc = GMMR0QueryMemoryStatsReq(pGVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr); 1858 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1872 1859 break; 1873 1860 … … 1875 1862 if (u64Arg) 1876 1863 return VERR_INVALID_PARAMETER; 1877 rc = GMMR0BalloonedPagesReq(pGVM, pVM,idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);1878 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1864 rc = GMMR0BalloonedPagesReq(pGVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr); 1865 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1879 1866 break; 1880 1867 … … 1882 1869 if (u64Arg) 1883 1870 return VERR_INVALID_PARAMETER; 1884 rc = GMMR0MapUnmapChunkReq(pGVM, pVM,(PGMMMAPUNMAPCHUNKREQ)pReqHdr);1885 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1871 rc = GMMR0MapUnmapChunkReq(pGVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr); 1872 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1886 1873 break; 1887 1874 … … 1889 1876 if (pReqHdr) 1890 1877 return VERR_INVALID_PARAMETER; 1891 rc = GMMR0SeedChunk(pGVM, pVM,idCpu, (RTR3PTR)u64Arg);1892 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1878 rc = GMMR0SeedChunk(pGVM, idCpu, (RTR3PTR)u64Arg); 1879 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1893 1880 break; 1894 1881 … … 1898 1885 if (u64Arg) 1899 1886 return VERR_INVALID_PARAMETER; 1900 rc = GMMR0RegisterSharedModuleReq(pGVM, pVM,idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);1901 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1887 rc = GMMR0RegisterSharedModuleReq(pGVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr); 1888 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1902 1889 break; 1903 1890 … … 1907 1894 if (u64Arg) 1908 1895 return VERR_INVALID_PARAMETER; 1909 rc = GMMR0UnregisterSharedModuleReq(pGVM, pVM,idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);1910 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1896 rc = GMMR0UnregisterSharedModuleReq(pGVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr); 1897 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1911 1898 break; 1912 1899 … … 1917 1904 || pReqHdr) 1918 1905 return VERR_INVALID_PARAMETER; 1919 rc = GMMR0ResetSharedModules(pGVM, pVM,idCpu);1920 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1906 rc = GMMR0ResetSharedModules(pGVM, idCpu); 1907 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1921 1908 break; 1922 1909 … … 1929 1916 || pReqHdr) 1930 1917 return VERR_INVALID_PARAMETER; 1931 rc = GMMR0CheckSharedModules(pGVM, pVM,idCpu);1932 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1918 rc = GMMR0CheckSharedModules(pGVM, idCpu); 1919 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1933 1920 break; 1934 1921 } … … 1939 1926 if (u64Arg) 1940 1927 return VERR_INVALID_PARAMETER; 1941 rc = GMMR0FindDuplicatePageReq(pGVM, pVM,(PGMMFINDDUPLICATEPAGEREQ)pReqHdr);1942 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1928 rc = GMMR0FindDuplicatePageReq(pGVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr); 1929 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1943 1930 break; 1944 1931 #endif … … 1947 1934 if (u64Arg) 1948 1935 return VERR_INVALID_PARAMETER; 1949 rc = GMMR0QueryStatisticsReq(pGVM, pVM,(PGMMQUERYSTATISTICSSREQ)pReqHdr);1950 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1936 rc = GMMR0QueryStatisticsReq(pGVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr); 1937 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1951 1938 break; 1952 1939 … … 1954 1941 if (u64Arg) 1955 1942 return VERR_INVALID_PARAMETER; 1956 rc = GMMR0ResetStatisticsReq(pGVM, pVM,(PGMMRESETSTATISTICSSREQ)pReqHdr);1957 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1943 rc = GMMR0ResetStatisticsReq(pGVM, (PGMMRESETSTATISTICSSREQ)pReqHdr); 1944 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1958 1945 break; 1959 1946 … … 1965 1952 case VMMR0_DO_GCFGM_QUERY_VALUE: 1966 1953 { 1967 if (pGVM || pVM ||!pReqHdr || u64Arg || idCpu != NIL_VMCPUID)1954 if (pGVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 1968 1955 return VERR_INVALID_PARAMETER; 1969 1956 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr; … … 1982 1969 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value); 1983 1970 } 1984 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1971 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1985 1972 break; 1986 1973 } … … 1993 1980 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 1994 1981 return VERR_INVALID_PARAMETER; 1995 rc = PDMR0DriverCallReqHandler(pGVM, pVM,(PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);1996 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1982 rc = PDMR0DriverCallReqHandler(pGVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr); 1983 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 1997 1984 break; 1998 1985 } … … 2002 1989 if (!pReqHdr || u64Arg || idCpu != NIL_VMCPUID) 2003 1990 return VERR_INVALID_PARAMETER; 2004 rc = PDMR0DeviceCallReqHandler(pGVM, pVM,(PPDMDEVICECALLREQHANDLERREQ)pReqHdr);2005 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);1991 rc = PDMR0DeviceCallReqHandler(pGVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr); 1992 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2006 1993 break; 2007 1994 } … … 2013 2000 { 2014 2001 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr; 2015 if (u64Arg || !pReq || !vmmR0IsValidSession(p VM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)2002 if (u64Arg || !pReq || !vmmR0IsValidSession(pGVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID) 2016 2003 return VERR_INVALID_PARAMETER; 2017 2004 rc = IntNetR0OpenReq(pSession, pReq); 2018 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2005 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2019 2006 break; 2020 2007 } 2021 2008 2022 2009 case VMMR0_DO_INTNET_IF_CLOSE: 2023 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2010 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2024 2011 return VERR_INVALID_PARAMETER; 2025 2012 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr); 2026 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2013 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2027 2014 break; 2028 2015 2029 2016 2030 2017 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS: 2031 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2018 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2032 2019 return VERR_INVALID_PARAMETER; 2033 2020 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr); 2034 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2021 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2035 2022 break; 2036 2023 2037 2024 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE: 2038 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2025 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2039 2026 return VERR_INVALID_PARAMETER; 2040 2027 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr); 2041 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2028 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2042 2029 break; 2043 2030 2044 2031 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS: 2045 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2032 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2046 2033 return VERR_INVALID_PARAMETER; 2047 2034 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr); 2048 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2035 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2049 2036 break; 2050 2037 2051 2038 case VMMR0_DO_INTNET_IF_SET_ACTIVE: 2052 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2039 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2053 2040 return VERR_INVALID_PARAMETER; 2054 2041 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr); 2055 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2042 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2056 2043 break; 2057 2044 2058 2045 case VMMR0_DO_INTNET_IF_SEND: 2059 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2046 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2060 2047 return VERR_INVALID_PARAMETER; 2061 2048 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr); 2062 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2049 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2063 2050 break; 2064 2051 2065 2052 case VMMR0_DO_INTNET_IF_WAIT: 2066 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2053 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2067 2054 return VERR_INVALID_PARAMETER; 2068 2055 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr); 2069 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2056 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2070 2057 break; 2071 2058 2072 2059 case VMMR0_DO_INTNET_IF_ABORT_WAIT: 2073 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2060 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2074 2061 return VERR_INVALID_PARAMETER; 2075 2062 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr); 2076 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2063 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2077 2064 break; 2078 2065 … … 2082 2069 */ 2083 2070 case VMMR0_DO_PCIRAW_REQ: 2084 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(p VM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)2085 return VERR_INVALID_PARAMETER; 2086 rc = PciRawR0ProcessReq(pGVM, p VM, pSession, (PPCIRAWSENDREQ)pReqHdr);2087 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2071 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pGVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID) 2072 return VERR_INVALID_PARAMETER; 2073 rc = PciRawR0ProcessReq(pGVM, pSession, (PPCIRAWSENDREQ)pReqHdr); 2074 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2088 2075 break; 2089 2076 #endif … … 2097 2084 if (u64Arg || pReqHdr || idCpu != 0) 2098 2085 return VERR_INVALID_PARAMETER; 2099 rc = NEMR0InitVM(pGVM , pVM);2100 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2086 rc = NEMR0InitVM(pGVM); 2087 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2101 2088 break; 2102 2089 … … 2104 2091 if (u64Arg || pReqHdr || idCpu != 0) 2105 2092 return VERR_INVALID_PARAMETER; 2106 rc = NEMR0InitVMPart2(pGVM , pVM);2107 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2093 rc = NEMR0InitVMPart2(pGVM); 2094 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2108 2095 break; 2109 2096 … … 2111 2098 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2112 2099 return VERR_INVALID_PARAMETER; 2113 rc = NEMR0MapPages(pGVM, pVM,idCpu);2114 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2100 rc = NEMR0MapPages(pGVM, idCpu); 2101 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2115 2102 break; 2116 2103 … … 2118 2105 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2119 2106 return VERR_INVALID_PARAMETER; 2120 rc = NEMR0UnmapPages(pGVM, pVM,idCpu);2121 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2107 rc = NEMR0UnmapPages(pGVM, idCpu); 2108 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2122 2109 break; 2123 2110 … … 2125 2112 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2126 2113 return VERR_INVALID_PARAMETER; 2127 rc = NEMR0ExportState(pGVM, pVM,idCpu);2128 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2114 rc = NEMR0ExportState(pGVM, idCpu); 2115 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2129 2116 break; 2130 2117 … … 2132 2119 if (pReqHdr || idCpu == NIL_VMCPUID) 2133 2120 return VERR_INVALID_PARAMETER; 2134 rc = NEMR0ImportState(pGVM, pVM,idCpu, u64Arg);2135 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2121 rc = NEMR0ImportState(pGVM, idCpu, u64Arg); 2122 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2136 2123 break; 2137 2124 … … 2139 2126 if (u64Arg || pReqHdr || idCpu == NIL_VMCPUID) 2140 2127 return VERR_INVALID_PARAMETER; 2141 rc = NEMR0QueryCpuTick(pGVM, pVM,idCpu);2142 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2128 rc = NEMR0QueryCpuTick(pGVM, idCpu); 2129 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2143 2130 break; 2144 2131 … … 2146 2133 if (pReqHdr || idCpu == NIL_VMCPUID) 2147 2134 return VERR_INVALID_PARAMETER; 2148 rc = NEMR0ResumeCpuTickOnAll(pGVM, pVM,idCpu, u64Arg);2149 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2135 rc = NEMR0ResumeCpuTickOnAll(pGVM, idCpu, u64Arg); 2136 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2150 2137 break; 2151 2138 … … 2153 2140 if (u64Arg || pReqHdr) 2154 2141 return VERR_INVALID_PARAMETER; 2155 rc = NEMR0UpdateStatistics(pGVM, pVM,idCpu);2156 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2142 rc = NEMR0UpdateStatistics(pGVM, idCpu); 2143 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2157 2144 break; 2158 2145 … … 2161 2148 if (pReqHdr) 2162 2149 return VERR_INVALID_PARAMETER; 2163 rc = NEMR0DoExperiment(pGVM, pVM,idCpu, u64Arg);2164 VMM_CHECK_SMAP_CHECK2(p VM, RT_NOTHING);2150 rc = NEMR0DoExperiment(pGVM, idCpu, u64Arg); 2151 VMM_CHECK_SMAP_CHECK2(pGVM, RT_NOTHING); 2165 2152 break; 2166 2153 # endif … … 2200 2187 { 2201 2188 PGVM pGVM; 2202 PVMCC pVM;2203 2189 VMCPUID idCpu; 2204 2190 VMMR0OPERATION enmOperation; … … 2219 2205 { 2220 2206 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pGVM, 2221 ((PVMMR0ENTRYEXARGS)pvArgs)->pVM,2222 2207 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu, 2223 2208 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation, … … 2251 2236 if ( pVM != NULL 2252 2237 && pGVM != NULL 2238 && pVM == pGVM /** @todo drop pGVM */ 2253 2239 && idCpu < pGVM->cCpus 2254 && p VM->pSession == pSession2255 && p VM->pSelf != NULL)2240 && pGVM->pSession == pSession 2241 && pGVM->pSelf == pVM) 2256 2242 { 2257 2243 switch (enmOperation) … … 2279 2265 VMMR0ENTRYEXARGS Args; 2280 2266 Args.pGVM = pGVM; 2281 Args.pVM = pVM;2282 2267 Args.idCpu = idCpu; 2283 2268 Args.enmOperation = enmOperation; … … 2294 2279 } 2295 2280 } 2296 return vmmR0EntryExWorker(pGVM, pVM,idCpu, enmOperation, pReq, u64Arg, pSession);2281 return vmmR0EntryExWorker(pGVM, idCpu, enmOperation, pReq, u64Arg, pSession); 2297 2282 } 2298 2283
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器