儲存庫 vbox 的更動 90420
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/VBox/err.h
r90418 r90420 1603 1603 /** Returned by PCI config space callbacks to indicate taking default action. */ 1604 1604 #define VINF_PDM_PCI_DO_DEFAULT (7200) 1605 /** Failed to abort entering a critical section in ring-0. */ 1606 #define VERR_PDM_CRITSECT_ABORT_FAILED (-7201) 1605 1607 /** @} */ 1606 1608 -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r90390 r90420 37 37 # include <iprt/semaphore.h> 38 38 #endif 39 #ifdef IN_RING0 40 # include <iprt/time.h> 41 #endif 39 42 #if defined(IN_RING3) || defined(IN_RING0) 40 43 # include <iprt/thread.h> … … 125 128 * @retval VERR_SEM_DESTROYED if destroyed. 126 129 * 127 * @param pVM The cross context VM structure. 128 * @param pCritSect The critsect. 129 * @param hNativeSelf The native thread handle. 130 * @param pSrcPos The source position of the lock operation. 131 * @param rcBusy The status code to return when we're in RC or R0 132 */ 133 static int pdmR3R0CritSectEnterContended(PVMCC pVM, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, 130 * @param pVM The cross context VM structure. 131 * @param pVCpu The cross context virtual CPU structure if ring-0 and on 132 * an EMT, otherwise NULL. 133 * @param pCritSect The critsect. 134 * @param hNativeSelf The native thread handle. 135 * @param pSrcPos The source position of the lock operation. 136 * @param rcBusy The status code to return when we're in RC or R0 137 */ 138 static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, 134 139 PCRTLOCKVALSRCPOS pSrcPos, int rcBusy) 135 140 { … … 147 152 /* 148 153 * The wait loop. 149 */ 150 PSUPDRVSESSION pSession = pVM->pSession; 151 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; 154 * 155 * This handles VERR_TIMEOUT and VERR_INTERRUPTED. 156 */ 157 STAM_REL_PROFILE_START(&pCritSect->s.StatWait, a); 158 PSUPDRVSESSION const pSession = pVM->pSession; 159 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; 152 160 # ifdef IN_RING3 153 161 # ifdef PDMCRITSECT_STRICT 154 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();162 RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt(); 155 163 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); 156 164 if (RT_FAILURE(rc2)) 157 165 return rc2; 158 166 # else 159 RTTHREAD hThreadSelf = RTThreadSelf();167 RTTHREAD const hThreadSelf = RTThreadSelf(); 160 168 # endif 169 # else /* IN_RING0 */ 170 uint64_t const tsStart = RTTimeNanoTS(); 171 uint64_t cNsMaxTotal = RT_NS_5MIN; 172 uint64_t const cNsMaxRetry = RT_NS_15SEC; 173 uint32_t cMsMaxOne = RT_MS_5SEC; 161 174 # endif 162 175 for (;;) … … 186 199 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true); 187 200 # endif 188 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);201 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC); 189 202 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT); 190 203 # else /* IN_RING0 */ 191 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);204 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne); 192 205 # endif /* IN_RING0 */ 193 206 194 207 /* 195 * Deal with the return code and critsect destruction.208 * Make sure the critical section hasn't been delete before continuing. 196 209 */ 197 210 if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)) 198 211 { /* likely */ } 199 212 else 213 { 214 LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc)); 200 215 return VERR_SEM_DESTROYED; 216 } 217 218 /* 219 * Most likely we're here because we got signalled. 220 */ 201 221 if (rc == VINF_SUCCESS) 222 { 223 STAM_REL_PROFILE_STOP(&pCritSect->s.StatContentionWait, a); 202 224 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos); 203 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)) 204 { /* likely */ } 225 } 226 227 /* 228 * Timeout and interrupted waits needs careful handling in ring-0 229 * because we're cooperating with ring-3 on this critical section 230 * and thus need to make absolutely sure we won't get stuck here. 231 * 232 * The r0 interrupted case means something is pending (termination, 233 * signal, APC, debugger, whatever), so we must try our best to 234 * return to the caller and to ring-3 so it can be dealt with. 235 */ 236 if (RT_LIKELY(rc == VINF_TIMEOUT || rc == VERR_INTERRUPTED)) 237 { 238 # ifdef IN_RING0 239 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart; 240 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD); 241 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING, 242 ("rcTerm=%Rrc\n", rcTerm)); 243 if (rcTerm == VERR_NOT_SUPPORTED) 244 cNsMaxTotal = RT_NS_1MIN; 245 246 if (rc == VERR_TIMEOUT) 247 { 248 /* Try return get out of here with a non-VINF_SUCCESS status if 249 the thread is terminating or if the timeout has been exceeded. */ 250 if ( rcTerm != VINF_THREAD_IS_TERMINATING 251 && cNsElapsed <= cNsMaxTotal) 252 continue; 253 } 254 else 255 { 256 /* For interrupt cases, we must return if we can. Only if we */ 257 if ( rcTerm != VINF_THREAD_IS_TERMINATING 258 && rcBusy == VINF_SUCCESS 259 && pVCpu != NULL 260 && cNsElapsed <= cNsMaxTotal) 261 continue; 262 } 263 264 /* 265 * Let try get out of here. We must very carefully undo the 266 * cLockers increment we did using compare-and-exchange so that 267 * we don't race the semaphore signalling in PDMCritSectLeave 268 * and end up with spurious wakeups and two owners at once. 269 */ 270 uint32_t cNoIntWaits = 0; 271 uint32_t cCmpXchgs = 0; 272 int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers); 273 for (;;) 274 { 275 if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC) 276 { 277 if (cLockers > 0 && cCmpXchgs < _64M) 278 { 279 bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers); 280 if (fRc) 281 { 282 LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect, 283 rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc)); 284 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters); 285 return rcBusy != VINF_SUCCESS ? rcBusy : rc; 286 } 287 cCmpXchgs++; 288 ASMNopPause(); 289 continue; 290 } 291 292 if (cLockers == 0) 293 { 294 /* 295 * We are racing someone in PDMCritSectLeave. 296 * 297 * For the VERR_TIMEOUT case we'll just retry taking it the normal 298 * way for a while. For VERR_INTERRUPTED we're in for more fun as 299 * the previous owner might not have signalled the semaphore yet, 300 * so we'll do a short non-interruptible wait instead and then guru. 301 */ 302 if ( rc == VERR_TIMEOUT 303 && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry) 304 break; 305 306 if ( rc == VERR_INTERRUPTED 307 && ( cNoIntWaits == 0 308 || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS)) 309 { 310 int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/); 311 if (rc2 == VINF_SUCCESS) 312 { 313 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting); 314 STAM_REL_PROFILE_STOP(&pCritSect->s.StatContentionWait, a); 315 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos); 316 } 317 cNoIntWaits++; 318 cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers); 319 continue; 320 } 321 } 322 else 323 LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect)); 324 325 /* Sabotage the critical section and return error to caller. */ 326 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT); 327 LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n", 328 pCritSect, rc, rcTerm)); 329 return VERR_PDM_CRITSECT_ABORT_FAILED; 330 } 331 LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n", 332 pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm)); 333 return VERR_SEM_DESTROYED; 334 } 335 336 /* We get here if we timed out. Just retry now that it 337 appears someone left already. */ 338 Assert(rc == VINF_TIMEOUT); 339 cMsMaxOne = 10 /*ms*/; 340 341 # else /* IN_RING3 */ 342 RT_NOREF(pVM, pVCpu, rcBusy); 343 # endif /* IN_RING3 */ 344 } 345 /* 346 * Any other return code is fatal. 347 */ 205 348 else 206 349 { 207 350 AssertMsgFailed(("rc=%Rrc\n", rc)); 208 return rc;351 return RT_FAILURE_NP(rc) ? rc : -rc; 209 352 } 210 211 # ifdef IN_RING0212 /* Something is pending (signal, APC, debugger, whatever), just go back213 to ring-3 so the kernel can deal with it when leaving kernel context.214 215 Note! We've incremented cLockers already and cannot safely decrement216 it without creating a race with PDMCritSectLeave, resulting in217 spurious wakeups. */218 RT_NOREF(rcBusy);219 /** @todo eliminate this and return rcBusy instead. Guru if rcBusy is220 * VINF_SUCCESS.221 *222 * Update: If we use cmpxchg to carefully decrement cLockers, we can avoid the223 * race and spurious wakeup. The race in question are the two decrement224 * operations, if we lose out to the PDMCritSectLeave CPU, it will signal the225 * semaphore and leave it signalled while cLockers is zero. If we use cmpxchg226 * to make sure this won't happen and repeate the loop should cLockers reach227 * zero (i.e. we're the only one around and the semaphore is or will soon be228 * signalled), we can make this work.229 *230 * The ring-0 RTSemEventWaitEx code never return VERR_INTERRUPTED for an already231 * signalled event, however we're racing the signal call here so it may not yet232 * be sinalled when we call RTSemEventWaitEx again... Maybe do a233 * non-interruptible wait for a short while? Or put a max loop count on this?234 * There is always the possiblity that the thread is in user mode and will be235 * killed before it gets to waking up the next waiting thread... We probably236 * need a general timeout here for ring-0 waits and retun rcBusy/guru if it237 * we get stuck here for too long...238 */239 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);240 rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);241 AssertRC(rc);242 # else243 RT_NOREF(pVM, rcBusy);244 # endif245 353 } 246 354 /* won't get here */ … … 325 433 */ 326 434 NOREF(rcBusy); 327 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos, rcBusy);435 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy); 328 436 329 437 #elif defined(IN_RING0) … … 349 457 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 350 458 351 rc = pdmR3R0CritSectEnterContended(pVM, p CritSect, hNativeSelf, pSrcPos, rcBusy);459 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy); 352 460 353 461 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx); … … 360 468 /* Non-EMT. */ 361 469 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 362 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos, rcBusy);470 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy); 363 471 364 472 # else /* old code: */ … … 368 476 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) 369 477 && ASMIntAreEnabled()) 370 return pdmR3R0CritSectEnterContended(pVM, pCritSect, hNativeSelf, pSrcPos, rcBusy);478 return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy); 371 479 372 480 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); -
trunk/src/VBox/VMM/VMMR3/PDMCritSect.cpp
r90379 r90420 55 55 { 56 56 RT_NOREF_PV(pVM); 57 STAM_REG(pVM, &pVM->pdm.s.StatQueuedCritSectLeaves, STAMTYPE_COUNTER, "/PDM/QueuedCritSectLeaves", STAMUNIT_OCCURENCES, 58 "Number of times a critical section leave request needed to be queued for ring-3 execution."); 57 STAM_REL_REG(pVM, &pVM->pdm.s.StatQueuedCritSectLeaves, STAMTYPE_COUNTER, "/PDM/QueuedCritSectLeaves", STAMUNIT_OCCURENCES, 58 "Number of times a critical section leave request needed to be queued for ring-3 execution."); 59 STAM_REL_REG(pVM, &pVM->pdm.s.StatAbortedCritSectEnters, STAMTYPE_COUNTER, "/PDM/AbortedCritSectEnters", STAMUNIT_OCCURENCES, 60 "Number of times we've successfully aborted a wait in ring-0."); 61 STAM_REL_REG(pVM, &pVM->pdm.s.StatCritSectEntersWhileAborting, STAMTYPE_COUNTER, "/PDM/CritSectEntersWhileAborting", STAMUNIT_OCCURENCES, 62 "Number of times we've got the critical section ownership while trying to abort a wait due to VERR_INTERRUPTED."); 59 63 return VINF_SUCCESS; 60 64 } … … 164 168 pCritSect->pszName = pszName; 165 169 166 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName); 167 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLockBusy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLockBusy", pCritSect->pszName); 168 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName); 169 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName); 170 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 171 STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLock", pCritSect->pszName); 172 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZLockBusy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 173 STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZLockBusy", pCritSect->pszName); 174 STAMR3RegisterF(pVM, &pCritSect->StatContentionRZUnlock, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 175 STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionRZUnlock", pCritSect->pszName); 176 STAMR3RegisterF(pVM, &pCritSect->StatContentionR3, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, 177 STAMUNIT_OCCURENCES, NULL, "/PDM/CritSects/%s/ContentionR3", pCritSect->pszName); 178 STAMR3RegisterF(pVM, &pCritSect->StatContentionWait, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, 179 STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/ContentionWait", pCritSect->pszName); 170 180 #ifdef VBOX_WITH_STATISTICS 171 STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName); 181 STAMR3RegisterF(pVM, &pCritSect->StatLocked, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, 182 STAMUNIT_TICKS_PER_OCCURENCE, NULL, "/PDM/CritSects/%s/Locked", pCritSect->pszName); 172 183 #endif 173 184 -
trunk/src/VBox/VMM/include/PDMInternal.h
r90379 r90420 444 444 /** R3 lock contention. */ 445 445 STAMCOUNTER StatContentionR3; 446 /** Profiling waiting on the lock (all rings). */ 447 STAMPROFILE StatContentionWait; 446 448 /** Profiling the time the section is locked. */ 447 449 STAMPROFILEADV StatLocked; … … 450 452 /** Pointer to private critical section data. */ 451 453 typedef PDMCRITSECTINT *PPDMCRITSECTINT; 454 455 /** Special magic value set when we failed to abort entering in ring-0 due to a 456 * timeout, interruption or pending thread termination. */ 457 #define PDMCRITSECT_MAGIC_FAILED_ABORT UINT32_C(0x0bad0326) 452 458 453 459 /** Indicates that the critical section is queued for unlock. … … 1466 1472 /** Number of times a critical section leave request needed to be queued for ring-3 execution. */ 1467 1473 STAMCOUNTER StatQueuedCritSectLeaves; 1474 /** Number of times we've successfully aborted a wait in ring-0. */ 1475 STAMCOUNTER StatAbortedCritSectEnters; 1476 /** Number of times we've got the critical section ownership while trying to 1477 * abort a wait due to VERR_INTERRUPTED. */ 1478 STAMCOUNTER StatCritSectEntersWhileAborting; 1468 1479 } PDM; 1469 1480 AssertCompileMemberAlignment(PDM, CritSect, 8);
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器