VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 20702

最後變更 在這個檔案從20702是 20702,由 vboxsync 提交於 16 年 前

update.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 16.5 KB
 
1/* $Id: PDMAllCritSect.cpp 20702 2009-06-19 08:57:31Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
27#include "PDMInternal.h"
28#include <VBox/pdm.h>
29#include <VBox/mm.h>
30#include <VBox/vm.h>
31#include <VBox/err.h>
32#include <VBox/hwaccm.h>
33
34#include <VBox/log.h>
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#ifdef IN_RING3
38# include <iprt/semaphore.h>
39#endif
40
41
42/*******************************************************************************
43* Defined Constants And Macros *
44*******************************************************************************/
45/** The number loops to spin for in ring-3. */
46#define PDMCRITSECT_SPIN_COUNT_R3 20
47/** The number loops to spin for in ring-0. */
48#define PDMCRITSECT_SPIN_COUNT_R0 256
49/** The number loops to spin for in the raw-mode context. */
50#define PDMCRITSECT_SPIN_COUNT_RC 256
51
52/** @def PDMCRITSECT_STRICT
53 * Enables/disables PDM critsect strictness like deadlock detection. */
54#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
55# define PDMCRITSECT_STRICT
56#endif
57
58
59/**
60 * Gets the ring-3 native thread handle of the calling thread.
61 *
62 * @returns native thread handle (ring-3).
63 * @param pCritSect The critical section. This is used in R0 and RC.
64 */
65DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PPDMCRITSECT pCritSect)
66{
67#ifdef IN_RING3
68 NOREF(pCritSect);
69 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
70#else
71 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
72 VERR_SEM_DESTROYED);
73 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
74 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
75 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
76#endif
77 return hNativeSelf;
78}
79
80
81/**
82 * Tail code called when we've wont the battle for the lock.
83 *
84 * @returns VINF_SUCCESS.
85 *
86 * @param pCritSect The critical section.
87 * @param hNativeSelf The native handle of this thread.
88 */
89DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
90{
91 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
92 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
93
94 pCritSect->s.Core.cNestings = 1;
95 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
96
97# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
98 pCritSect->s.Core.Strict.pszEnterFile = NULL;
99 pCritSect->s.Core.Strict.u32EnterLine = 0;
100 pCritSect->s.Core.Strict.uEnterId = 0;
101 RTTHREAD hSelf = RTThreadSelf();
102 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, hSelf);
103 RTThreadWriteLockInc(hSelf);
104# endif
105
106 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
107 return VINF_SUCCESS;
108}
109
110
111#ifdef IN_RING3
112/**
113 * Deals with the contended case in ring-3.
114 *
115 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
116 * @param pCritSect The critsect.
117 * @param hNativeSelf The native thread handle.
118 */
119static int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf)
120{
121 /*
122 * Start waiting.
123 */
124 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
125 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
126 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
127
128 /*
129 * The wait loop.
130 */
131 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
132 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
133# ifdef PDMCRITSECT_STRICT
134 RTTHREAD hSelf = RTThreadSelf();
135 if (hSelf == NIL_RTTHREAD)
136 RTThreadAdopt(RTTHREADTYPE_DEFAULT, 0, NULL, &hSelf);
137# endif
138 for (;;)
139 {
140# ifdef PDMCRITSECT_STRICT
141 RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, (uintptr_t)pCritSect, NULL, 0, 0);
142# endif
143 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
144# ifdef PDMCRITSECT_STRICT
145 RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
146# endif
147 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
148 return VERR_SEM_DESTROYED;
149 if (rc == VINF_SUCCESS)
150 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
151 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
152 }
153 /* won't get here */
154}
155#endif /* IN_RING3 */
156
157
158/**
159 * Enters a PDM critical section.
160 *
161 * @returns VINF_SUCCESS if entered successfully.
162 * @returns rcBusy when encountering a busy critical section in GC/R0.
163 * @returns VERR_SEM_DESTROYED if the critical section is dead.
164 *
165 * @param pCritSect The PDM critical section to enter.
166 * @param rcBusy The status code to return when we're in GC or R0
167 * and the section is busy.
168 */
169VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
170{
171 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
172
173 /*
174 * If the critical section has already been destroyed, then inform the caller.
175 */
176 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
177
178 /*
179 * See if we're lucky.
180 */
181 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
182 /* Not owned ... */
183 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
184 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
185
186 /* ... or nested. */
187 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
188 {
189 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
190 pCritSect->s.Core.cNestings++;
191 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
192 return VINF_SUCCESS;
193 }
194
195 /*
196 * Spin for a bit without incrementing the counter.
197 */
198 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
199 * cpu systems. */
200 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
201 while (cSpinsLeft-- > 0)
202 {
203 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
204 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
205 /** @todo need pause/nop instruction here! */
206 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
207 cli'ed pendingpreemption check up front using sti w/ instruction fusing
208 for avoiding races. Hmm ... This is assuming the other party is actually
209 executing code on another CPU... */
210 }
211
212#ifdef IN_RING3
213 /*
214 * Take the slow path.
215 */
216 return pdmR3CritSectEnterContended(pCritSect, hNativeSelf);
217#else
218 /*
219 * Return busy.
220 */
221 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
222 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
223 return rcBusy;
224#endif
225}
226
227
228/**
229 * Try enter a critical section.
230 *
231 * @retval VINF_SUCCESS on success.
232 * @retval VERR_SEM_BUSY if the critsect was owned.
233 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
234 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
235 *
236 * @param pCritSect The critical section.
237 */
238VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
239{
240 /*
241 * If the critical section has already been destroyed, then inform the caller.
242 */
243 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);
244
245 /*
246 * See if we're lucky.
247 */
248 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
249 /* Not owned ... */
250 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
251 return pdmCritSectEnterFirst(pCritSect, hNativeSelf);
252
253 /* ... or nested. */
254 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
255 {
256 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
257 pCritSect->s.Core.cNestings++;
258 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
259 return VINF_SUCCESS;
260 }
261
262 /* no spinning */
263
264 /*
265 * Return busy.
266 */
267#ifdef IN_RING3
268 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
269#else
270 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
271#endif
272 LogFlow(("PDMCritSectTryEnter: locked\n"));
273 return VERR_SEM_BUSY;
274}
275
276
277#ifdef IN_RING3
278/**
279 * Enters a PDM critical section.
280 *
281 * @returns VINF_SUCCESS if entered successfully.
282 * @returns rcBusy when encountering a busy critical section in GC/R0.
283 * @returns VERR_SEM_DESTROYED if the critical section is dead.
284 *
285 * @param pCritSect The PDM critical section to enter.
286 * @param fCallHost Whether this is a VMMGCCallHost() or VMMR0CallHost() request.
287 */
288VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallHost)
289{
290 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
291 if ( rc == VINF_SUCCESS
292 && fCallHost
293 && pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
294 {
295 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
296 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
297 }
298 return rc;
299}
300#endif /* IN_RING3 */
301
302
303/**
304 * Leaves a critical section entered with PDMCritSectEnter().
305 *
306 * @param pCritSect The PDM critical section to leave.
307 */
308VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
309{
310 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
311 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
312 Assert(pCritSect->s.Core.cNestings >= 1);
313
314 /*
315 * Nested leave.
316 */
317 if (pCritSect->s.Core.cNestings > 1)
318 {
319 pCritSect->s.Core.cNestings--;
320 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
321 return;
322 }
323
324#if defined(IN_RING3) || defined(IN_RING0)
325 /*
326 * Leave for real.
327 */
328 /* update members. */
329# ifdef IN_RING3
330 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
331 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
332# if defined(PDMCRITSECT_STRICT)
333 if (pCritSect->s.Core.Strict.ThreadOwner != NIL_RTTHREAD)
334 RTThreadWriteLockDec(pCritSect->s.Core.Strict.ThreadOwner);
335 ASMAtomicWriteHandle(&pCritSect->s.Core.Strict.ThreadOwner, NIL_RTTHREAD);
336# endif
337# endif
338 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
339 Assert(pCritSect->s.Core.Strict.ThreadOwner == NIL_RTTHREAD);
340 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
341 pCritSect->s.Core.cNestings--;
342
343 /* stop and decrement lockers. */
344 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
345 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
346 {
347 /* Someone is waiting, wake up one of them. */
348 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
349 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
350 int rc = SUPSemEventSignal(pSession, hEvent);
351 AssertRC(rc);
352 }
353
354# ifdef IN_RING3
355 /* Signal exit event. */
356 if (hEventToSignal != NIL_RTSEMEVENT)
357 {
358 LogBird(("Signalling %#x\n", hEventToSignal));
359 int rc = RTSemEventSignal(hEventToSignal);
360 AssertRC(rc);
361 }
362# endif
363
364#else /* IN_RC */
365 /*
366 * Try leave it.
367 */
368 if (pCritSect->s.Core.cLockers == 0)
369 {
370 pCritSect->s.Core.cNestings = 0;
371 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
372 pCritSect->s.Core.fFlags &= ~PDMCRITSECT_FLAGS_PENDING_UNLOCK;
373 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
374
375 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
376 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
377 return;
378
379 /* darn, someone raced in on us. */
380 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
381 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
382 pCritSect->s.Core.cNestings = 1;
383 }
384 pCritSect->s.Core.fFlags |= PDMCRITSECT_FLAGS_PENDING_UNLOCK;
385
386 /*
387 * Queue the request.
388 */
389 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
390 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
391 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
392 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
393 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
394 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
395 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
396 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
397 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
398 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
399#endif /* IN_RC */
400}
401
402
403#if defined(IN_RING3) || defined(IN_RING0)
404/**
405 * Process the critical sections queued for ring-3 'leave'.
406 *
407 * @param pVCpu The VMCPU handle.
408 */
409VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
410{
411 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
412
413 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
414 for (RTUINT i = 0; i < c; i++)
415 {
416# ifdef IN_RING3
417 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
418# else
419 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
420# endif
421
422 PDMCritSectLeave(pCritSect);
423 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
424 }
425
426 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
427 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
428}
429#endif /* IN_RING3 || IN_RING0 */
430
431
432/**
433 * Checks the caller is the owner of the critical section.
434 *
435 * @returns true if owner.
436 * @returns false if not owner.
437 * @param pCritSect The critical section.
438 */
439VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
440{
441#ifdef IN_RING3
442 return RTCritSectIsOwner(&pCritSect->s.Core);
443#else
444 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
445 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
446 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
447 return false;
448 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
449#endif
450}
451
452
453/**
454 * Checks the specified VCPU is the owner of the critical section.
455 *
456 * @returns true if owner.
457 * @returns false if not owner.
458 * @param pCritSect The critical section.
459 * @param idCpu VCPU id
460 */
461VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
462{
463#ifdef IN_RING3
464 NOREF(idCpu);
465 return RTCritSectIsOwner(&pCritSect->s.Core);
466#else
467 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
468 AssertPtr(pVM);
469 Assert(idCpu < pVM->cCPUs);
470 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
471 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
472#endif
473}
474
475
476/**
477 * Checks if somebody currently owns the critical section.
478 *
479 * @returns true if locked.
480 * @returns false if not locked.
481 *
482 * @param pCritSect The critical section.
483 *
484 * @remarks This doesn't prove that no deadlocks will occur later on; it's
485 * just a debugging tool
486 */
487VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
488{
489 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
490 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
491}
492
493
494/**
495 * Checks if a critical section is initialized or not.
496 *
497 * @returns true if initialized.
498 * @returns false if not initialized.
499 * @param pCritSect The critical section.
500 */
501VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
502{
503 return RTCritSectIsInitialized(&pCritSect->s.Core);
504}
505
506
507/**
508 * Gets the recursion depth.
509 *
510 * @returns The recursion depth.
511 * @param pCritSect The critical section.
512 */
513VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
514{
515 return RTCritSectGetRecursion(&pCritSect->s.Core);
516}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette