VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 91815

最後變更 在這個檔案從91815是 91815,由 vboxsync 提交於 3 年 前

VMM/PDMCritSectRw: Don't preempt while on custom stack. [build fix] bugref:10124

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 85.3 KB
 
1/* $Id: PDMAllCritSectRw.cpp 91815 2021-10-18 09:47:29Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40# include <iprt/thread.h>
41#endif
42#ifdef IN_RING0
43# include <iprt/time.h>
44#endif
45#ifdef RT_ARCH_AMD64
46# include <iprt/x86.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#if 0 /* unused */
54/** The number loops to spin for shared access in ring-3. */
55#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
56/** The number loops to spin for shared access in ring-0. */
57#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
58/** The number loops to spin for shared access in the raw-mode context. */
59#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
60
61/** The number loops to spin for exclusive access in ring-3. */
62#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
63/** The number loops to spin for exclusive access in ring-0. */
64#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
65/** The number loops to spin for exclusive access in the raw-mode context. */
66#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
67#endif
68
69/** Max number of write or write/read recursions. */
70#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
71
72/** Skips some of the overly paranoid atomic reads and updates.
73 * Makes some assumptions about cache coherence, though not brave enough not to
74 * always end with an atomic update. */
75#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
76
77/** For reading RTCRITSECTRWSTATE::s::u64State. */
78#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
79# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
80#else
81# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
82#endif
83
84
85/* Undefine the automatic VBOX_STRICT API mappings. */
86#undef PDMCritSectRwEnterExcl
87#undef PDMCritSectRwTryEnterExcl
88#undef PDMCritSectRwEnterShared
89#undef PDMCritSectRwTryEnterShared
90
91
92/*********************************************************************************************************************************
93* Defined Constants And Macros *
94*********************************************************************************************************************************/
95#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
96static int32_t g_fCmpWriteSupported = -1;
97#endif
98
99
100/*********************************************************************************************************************************
101* Internal Functions *
102*********************************************************************************************************************************/
103#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
104static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
105#else
106DECLASM(int) pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
107DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
108#endif
109
110
111#ifdef RTASM_HAVE_CMP_WRITE_U128
112
113# ifdef RT_ARCH_AMD64
114/**
115 * Called once to initialize g_fCmpWriteSupported.
116 */
117DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
118{
119 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
120 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
121 return fCmpWriteSupported;
122}
123# endif
124
125
126/**
127 * Indicates whether hardware actually supports 128-bit compare & write.
128 */
129DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
130{
131# ifdef RT_ARCH_AMD64
132 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
133 if (RT_LIKELY(fCmpWriteSupported >= 0))
134 return fCmpWriteSupported != 0;
135 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
136# else
137 return true;
138# endif
139}
140
141#endif /* RTASM_HAVE_CMP_WRITE_U128 */
142
143/**
144 * Gets the ring-3 native thread handle of the calling thread.
145 *
146 * @returns native thread handle (ring-3).
147 * @param pVM The cross context VM structure.
148 * @param pThis The read/write critical section. This is only used in
149 * R0 and RC.
150 */
151DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
152{
153#ifdef IN_RING3
154 RT_NOREF(pVM, pThis);
155 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
156#else
157 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
158 NIL_RTNATIVETHREAD);
159 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
160 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD;
161 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
162#endif
163 return hNativeSelf;
164}
165
166
167DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
168{
169 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
170 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
171 return VERR_PDM_CRITSECTRW_IPE;
172}
173
174
175
176#ifdef IN_RING3
177/**
178 * Changes the lock validator sub-class of the read/write critical section.
179 *
180 * It is recommended to try make sure that nobody is using this critical section
181 * while changing the value.
182 *
183 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
184 * lock validator isn't compiled in or either of the parameters are
185 * invalid.
186 * @param pThis Pointer to the read/write critical section.
187 * @param uSubClass The new sub-class value.
188 */
189VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
190{
191 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
192 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
193# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
194 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
195
196 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
197 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
198# else
199 NOREF(uSubClass);
200 return RTLOCKVAL_SUB_CLASS_INVALID;
201# endif
202}
203#endif /* IN_RING3 */
204
205
206/**
207 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
208 */
209DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
210 bool fNoVal, RTTHREAD hThreadSelf)
211{
212#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
213 if (!fNoVal)
214 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
215#else
216 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
217#endif
218
219 /* got it! */
220 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
221 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
222 return VINF_SUCCESS;
223}
224
225/**
226 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
227 * that decrement the wait count and maybe resets the semaphore.
228 */
229DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
230 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
231{
232 for (;;)
233 {
234 uint64_t const u64OldState = u64State;
235 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
236 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
237 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
238 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
239 cWait--;
240 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
241 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
242
243 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
244 {
245 if (cWait == 0)
246 {
247 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
248 {
249 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
250 AssertRCReturn(rc, rc);
251 }
252 }
253 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
254 }
255
256 ASMNopPause();
257 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
258 ASMNopPause();
259
260 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
261 }
262 /* not reached */
263}
264
265
266#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
267/**
268 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
269 * and returns @a rc.
270 *
271 * @note May return VINF_SUCCESS if we race the exclusive leave function and
272 * come out on the bottom.
273 *
274 * Ring-3 only calls in a case where it is _not_ acceptable to take the
275 * lock, so even if we get the lock we'll have to leave. In the ring-0
276 * contexts, we can safely return VINF_SUCCESS in case of a race.
277 */
278DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
279 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
280{
281#ifdef IN_RING0
282 uint64_t const tsStart = RTTimeNanoTS();
283 uint64_t cNsElapsed = 0;
284#endif
285 for (;;)
286 {
287 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
288 uint64_t u64OldState = u64State;
289
290 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
291 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
292 cWait--;
293
294 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
295 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
296
297 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
298 {
299 c--;
300 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
301 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
302 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
303 return rc;
304 }
305 else
306 {
307 /*
308 * The direction changed, so we can actually get the lock now.
309 *
310 * This means that we _have_ to wait on the semaphore to be signalled
311 * so we can properly reset it. Otherwise the stuff gets out of wack,
312 * because signalling and resetting will race one another. An
313 * exception would be if we're not the last reader waiting and don't
314 * need to worry about the resetting.
315 *
316 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
317 * but that would still leave a racing PDMCritSectRwEnterShared
318 * spinning hard for a little bit, which isn't great...
319 */
320 if (cWait == 0)
321 {
322# ifdef IN_RING0
323 /* Do timeout processing first to avoid redoing the above. */
324 uint32_t cMsWait;
325 if (cNsElapsed <= RT_NS_10SEC)
326 cMsWait = 32;
327 else
328 {
329 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
330 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
331 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
332 {
333 LogFunc(("%p: giving up\n", pThis));
334 return rc;
335 }
336 cMsWait = 2;
337 }
338
339 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
340 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
341 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
342# else
343 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
344 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
345 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
346# endif
347 if (rcWait == VINF_SUCCESS)
348 {
349# ifdef IN_RING0
350 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
351# else
352 /* ring-3: Cannot return VINF_SUCCESS. */
353 Assert(RT_FAILURE_NP(rc));
354 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
355 if (RT_SUCCESS(rc2))
356 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
357 return rc;
358# endif
359 }
360 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
361 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
362 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
363 }
364 else
365 {
366 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
367 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
368 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
369 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
370 }
371
372# ifdef IN_RING0
373 /* Calculate the elapsed time here to avoid redoing state work. */
374 cNsElapsed = RTTimeNanoTS() - tsStart;
375# endif
376 }
377
378 ASMNopPause();
379 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
380 ASMNopPause();
381 }
382}
383#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
384
385
386/**
387 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
388 * Caller has already added us to the read and read-wait counters.
389 */
390static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
391 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
392{
393 PSUPDRVSESSION const pSession = pVM->pSession;
394 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
395# ifdef IN_RING0
396 uint64_t const tsStart = RTTimeNanoTS();
397 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
398 uint64_t cNsMaxTotal = cNsMaxTotalDef;
399 uint32_t cMsMaxOne = RT_MS_5SEC;
400 bool fNonInterruptible = false;
401# endif
402
403 for (uint32_t iLoop = 0; ; iLoop++)
404 {
405 /*
406 * Wait for the direction to switch.
407 */
408 int rc;
409# ifdef IN_RING3
410# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
411 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
412 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
413 if (RT_FAILURE(rc))
414 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
415# else
416 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
417# endif
418# endif
419
420 for (;;)
421 {
422 /*
423 * We always wait with a timeout so we can re-check the structure sanity
424 * and not get stuck waiting on a corrupt or deleted section.
425 */
426# ifdef IN_RING3
427 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
428# else
429 rc = !fNonInterruptible
430 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
431 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
432 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
433 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
434# endif
435 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
436 { /* likely */ }
437 else
438 {
439# ifdef IN_RING3
440 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
441# endif
442 return VERR_SEM_DESTROYED;
443 }
444 if (RT_LIKELY(rc == VINF_SUCCESS))
445 break;
446
447 /*
448 * Timeout and interrupted waits needs careful handling in ring-0
449 * because we're cooperating with ring-3 on this critical section
450 * and thus need to make absolutely sure we won't get stuck here.
451 *
452 * The r0 interrupted case means something is pending (termination,
453 * signal, APC, debugger, whatever), so we must try our best to
454 * return to the caller and to ring-3 so it can be dealt with.
455 */
456 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
457 {
458# ifdef IN_RING0
459 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
460 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
461 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
462 ("rcTerm=%Rrc\n", rcTerm));
463 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
464 cNsMaxTotal = RT_NS_1MIN;
465
466 if (rc == VERR_TIMEOUT)
467 {
468 /* Try return get out of here with a non-VINF_SUCCESS status if
469 the thread is terminating or if the timeout has been exceeded. */
470 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
471 if ( rcTerm == VINF_THREAD_IS_TERMINATING
472 || cNsElapsed > cNsMaxTotal)
473 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
474 pSrcPos, fNoVal, hThreadSelf);
475 }
476 else
477 {
478 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
479 we will try non-interruptible sleep for a while to help resolve the issue
480 w/o guru'ing. */
481 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
482 if ( rcTerm != VINF_THREAD_IS_TERMINATING
483 && rcBusy == VINF_SUCCESS
484 && pVCpu != NULL
485 && cNsElapsed <= cNsMaxTotal)
486 {
487 if (!fNonInterruptible)
488 {
489 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
490 fNonInterruptible = true;
491 cMsMaxOne = 32;
492 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
493 if (cNsLeft > RT_NS_10SEC)
494 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
495 }
496 }
497 else
498 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
499 pSrcPos, fNoVal, hThreadSelf);
500 }
501# else /* IN_RING3 */
502 RT_NOREF(pVM, pVCpu, rcBusy);
503# endif /* IN_RING3 */
504 }
505 /*
506 * Any other return code is fatal.
507 */
508 else
509 {
510# ifdef IN_RING3
511 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
512# endif
513 AssertMsgFailed(("rc=%Rrc\n", rc));
514 return RT_FAILURE_NP(rc) ? rc : -rc;
515 }
516 }
517
518# ifdef IN_RING3
519 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
520# endif
521
522 /*
523 * Check the direction.
524 */
525 Assert(pThis->s.Core.fNeedReset);
526 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
527 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
528 {
529 /*
530 * Decrement the wait count and maybe reset the semaphore (if we're last).
531 */
532 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
533 }
534
535 AssertMsg(iLoop < 1,
536 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
537 RTThreadYield();
538 }
539
540 /* not reached */
541}
542
543
544/**
545 * Worker that enters a read/write critical section with shard access.
546 *
547 * @returns VBox status code.
548 * @param pVM The cross context VM structure.
549 * @param pThis Pointer to the read/write critical section.
550 * @param rcBusy The busy return code for ring-0 and ring-3.
551 * @param fTryOnly Only try enter it, don't wait.
552 * @param pSrcPos The source position. (Can be NULL.)
553 * @param fNoVal No validation records.
554 */
555#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
556static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
557 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
558#else
559DECLASM(int) StkBack_pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
560 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
561#endif
562{
563 /*
564 * Validate input.
565 */
566 AssertPtr(pThis);
567 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
568
569#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
570 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
571 if (!fTryOnly)
572 {
573 int rc9;
574 RTNATIVETHREAD hNativeWriter;
575 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
576 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
577 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
578 else
579 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
580 if (RT_FAILURE(rc9))
581 return rc9;
582 }
583#else
584 RTTHREAD hThreadSelf = NIL_RTTHREAD;
585#endif
586
587 /*
588 * Work the state.
589 */
590 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
591 uint64_t u64OldState = u64State;
592 for (;;)
593 {
594 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
595 {
596 /* It flows in the right direction, try follow it before it changes. */
597 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
598 c++;
599 Assert(c < RTCSRW_CNT_MASK / 4);
600 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
601 u64State &= ~RTCSRW_CNT_RD_MASK;
602 u64State |= c << RTCSRW_CNT_RD_SHIFT;
603 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
604 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
605 }
606 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
607 {
608 /* Wrong direction, but we're alone here and can simply try switch the direction. */
609 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
610 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
611 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
612 {
613 Assert(!pThis->s.Core.fNeedReset);
614 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
615 }
616 }
617 else
618 {
619 /* Is the writer perhaps doing a read recursion? */
620 RTNATIVETHREAD hNativeWriter;
621 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
622 if (hNativeWriter != NIL_RTNATIVETHREAD)
623 {
624 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
625 if (hNativeSelf == hNativeWriter)
626 {
627#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
628 if (!fNoVal)
629 {
630 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
631 if (RT_FAILURE(rc9))
632 return rc9;
633 }
634#endif
635 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
636 Assert(cReads < _16K);
637 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
638 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
639 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
640 return VINF_SUCCESS; /* don't break! */
641 }
642 }
643
644 /*
645 * If we're only trying, return already.
646 */
647 if (fTryOnly)
648 {
649 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
650 return VERR_SEM_BUSY;
651 }
652
653#if defined(IN_RING3) || defined(IN_RING0)
654 /*
655 * Add ourselves to the queue and wait for the direction to change.
656 */
657 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
658 c++;
659 Assert(c < RTCSRW_CNT_MASK / 2);
660 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
661
662 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
663 cWait++;
664 Assert(cWait <= c);
665 Assert(cWait < RTCSRW_CNT_MASK / 2);
666 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
667
668 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
669 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
670
671 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
672 {
673 /*
674 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
675 */
676# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
677 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
678# elif defined(IN_RING3)
679 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
680# else /* IN_RING0 */
681 /*
682 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
683 * account when waiting on contended locks.
684 */
685 PVMCPUCC pVCpu = VMMGetCpu(pVM);
686 if (pVCpu)
687 {
688 VMMR0EMTBLOCKCTX Ctx;
689 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
690 if (rc == VINF_SUCCESS)
691 {
692 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
693
694 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
695
696 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
697 }
698 else
699 {
700 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
701 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
702 }
703 return rc;
704 }
705
706 /* Non-EMT. */
707 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
708 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
709# endif /* IN_RING0 */
710 }
711
712#else /* !IN_RING3 && !IN_RING0 */
713 /*
714 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
715 * back to ring-3 and do it there or return rcBusy.
716 */
717# error "Unused code."
718 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
719 if (rcBusy == VINF_SUCCESS)
720 {
721 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
722 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
723 * back to ring-3. Goes for both kind of crit sects. */
724 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
725 }
726 return rcBusy;
727#endif /* !IN_RING3 && !IN_RING0 */
728 }
729
730 ASMNopPause();
731 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
732 { /* likely */ }
733 else
734 return VERR_SEM_DESTROYED;
735 ASMNopPause();
736
737 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
738 u64OldState = u64State;
739 }
740 /* not reached */
741}
742#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
743decltype(StkBack_pdmCritSectRwEnterShared) pdmCritSectRwEnterShared;
744#endif
745
746
747/**
748 * Enter a critical section with shared (read) access.
749 *
750 * @returns VBox status code.
751 * @retval VINF_SUCCESS on success.
752 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
753 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
754 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
755 * during the operation.
756 *
757 * @param pVM The cross context VM structure.
758 * @param pThis Pointer to the read/write critical section.
759 * @param rcBusy The status code to return when we're in RC or R0 and the
760 * section is busy. Pass VINF_SUCCESS to acquired the
761 * critical section thru a ring-3 call if necessary.
762 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
763 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
764 * RTCritSectRwEnterShared.
765 */
766VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
767{
768#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
769 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
770#else
771 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
772 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
773#endif
774}
775
776
777/**
778 * Enter a critical section with shared (read) access.
779 *
780 * @returns VBox status code.
781 * @retval VINF_SUCCESS on success.
782 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
783 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
784 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
785 * during the operation.
786 *
787 * @param pVM The cross context VM structure.
788 * @param pThis Pointer to the read/write critical section.
789 * @param rcBusy The status code to return when we're in RC or R0 and the
790 * section is busy. Pass VINF_SUCCESS to acquired the
791 * critical section thru a ring-3 call if necessary.
792 * @param uId Where we're entering the section.
793 * @param SRC_POS The source position.
794 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
795 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
796 * RTCritSectRwEnterSharedDebug.
797 */
798VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
799{
800 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
801#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
802 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
803#else
804 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
805 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
806#endif
807}
808
809
810/**
811 * Try enter a critical section with shared (read) access.
812 *
813 * @returns VBox status code.
814 * @retval VINF_SUCCESS on success.
815 * @retval VERR_SEM_BUSY if the critsect was owned.
816 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
817 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
818 * during the operation.
819 *
820 * @param pVM The cross context VM structure.
821 * @param pThis Pointer to the read/write critical section.
822 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
823 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
824 * RTCritSectRwTryEnterShared.
825 */
826VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
827{
828#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
829 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
830#else
831 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
832 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
833#endif
834}
835
836
837/**
838 * Try enter a critical section with shared (read) access.
839 *
840 * @returns VBox status code.
841 * @retval VINF_SUCCESS on success.
842 * @retval VERR_SEM_BUSY if the critsect was owned.
843 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
844 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
845 * during the operation.
846 *
847 * @param pVM The cross context VM structure.
848 * @param pThis Pointer to the read/write critical section.
849 * @param uId Where we're entering the section.
850 * @param SRC_POS The source position.
851 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
852 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
853 * RTCritSectRwTryEnterSharedDebug.
854 */
855VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
856{
857 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
858#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
859 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
860#else
861 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
862 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
863#endif
864}
865
866
867#ifdef IN_RING3
868/**
869 * Enters a PDM read/write critical section with shared (read) access.
870 *
871 * @returns VINF_SUCCESS if entered successfully.
872 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
873 * during the operation.
874 *
875 * @param pVM The cross context VM structure.
876 * @param pThis Pointer to the read/write critical section.
877 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
878 */
879VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
880{
881 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
882}
883#endif
884
885
886/**
887 * Leave a critical section held with shared access.
888 *
889 * @returns VBox status code.
890 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
891 * during the operation.
892 * @param pVM The cross context VM structure.
893 * @param pThis Pointer to the read/write critical section.
894 * @param fNoVal No validation records (i.e. queued release).
895 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
896 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
897 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
898 */
899#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
900static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
901#else
902DECLASM(int) StkBack_pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
903#endif
904{
905 /*
906 * Validate handle.
907 */
908 AssertPtr(pThis);
909 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
910
911#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
912 NOREF(fNoVal);
913#endif
914
915 /*
916 * Check the direction and take action accordingly.
917 */
918#ifdef IN_RING0
919 PVMCPUCC pVCpu = NULL;
920#endif
921 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
922 uint64_t u64OldState = u64State;
923 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
924 {
925#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
926 if (fNoVal)
927 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
928 else
929 {
930 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
931 if (RT_FAILURE(rc9))
932 return rc9;
933 }
934#endif
935 for (;;)
936 {
937 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
938 AssertReturn(c > 0, VERR_NOT_OWNER);
939 c--;
940
941 if ( c > 0
942 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
943 {
944 /* Don't change the direction. */
945 u64State &= ~RTCSRW_CNT_RD_MASK;
946 u64State |= c << RTCSRW_CNT_RD_SHIFT;
947 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
948 break;
949 }
950 else
951 {
952#if defined(IN_RING3) || defined(IN_RING0)
953# ifdef IN_RING0
954 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
955 if (!pVCpu)
956 pVCpu = VMMGetCpu(pVM);
957 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
958 || VMMRZCallRing3IsEnabled(pVCpu)
959 || RTSemEventIsSignalSafe()
960 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
961 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
962 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
963 )
964# endif
965 {
966 /* Reverse the direction and signal the writer threads. */
967 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
968 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
969 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
970 {
971 int rc;
972# ifdef IN_RING0
973 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
974 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
975 {
976 VMMR0EMTBLOCKCTX Ctx;
977 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
978 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
979
980 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
981
982 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
983 }
984 else
985# endif
986 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
987 AssertRC(rc);
988 return rc;
989 }
990 }
991#endif /* IN_RING3 || IN_RING0 */
992#ifndef IN_RING3
993# ifdef IN_RING0
994 else
995# endif
996 {
997 /* Queue the exit request (ring-3). */
998# ifndef IN_RING0
999 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1000# endif
1001 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
1002 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
1003 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
1004 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1005 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
1006 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1007 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
1008 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & PAGE_OFFSET_MASK)
1009 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1010 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
1011 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
1012 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1013 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1014 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1015 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
1016 break;
1017 }
1018#endif
1019 }
1020
1021 ASMNopPause();
1022 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1023 { }
1024 else
1025 return VERR_SEM_DESTROYED;
1026 ASMNopPause();
1027
1028 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1029 u64OldState = u64State;
1030 }
1031 }
1032 else
1033 {
1034 /*
1035 * Write direction. Check that it's the owner calling and that it has reads to undo.
1036 */
1037 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1038 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1039
1040 RTNATIVETHREAD hNativeWriter;
1041 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1042 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1043 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1044#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1045 if (!fNoVal)
1046 {
1047 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1048 if (RT_FAILURE(rc))
1049 return rc;
1050 }
1051#endif
1052 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1053 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1054 }
1055
1056 return VINF_SUCCESS;
1057}
1058
1059
1060/**
1061 * Leave a critical section held with shared access.
1062 *
1063 * @returns VBox status code.
1064 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1065 * during the operation.
1066 * @param pVM The cross context VM structure.
1067 * @param pThis Pointer to the read/write critical section.
1068 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1069 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1070 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1071 */
1072VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1073{
1074 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1075}
1076
1077
1078#if defined(IN_RING3) || defined(IN_RING0)
1079/**
1080 * PDMCritSectBothFF interface.
1081 *
1082 * @param pVM The cross context VM structure.
1083 * @param pThis Pointer to the read/write critical section.
1084 */
1085void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1086{
1087 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1088}
1089#endif
1090
1091
1092/**
1093 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1094 *
1095 * @returns @a rc unless corrupted.
1096 * @param pThis Pointer to the read/write critical section.
1097 * @param rc The status to return.
1098 */
1099DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1100{
1101 /*
1102 * Decrement the counts and return the error.
1103 */
1104 for (;;)
1105 {
1106 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1107 uint64_t const u64OldState = u64State;
1108 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1109 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1110 c--;
1111 u64State &= ~RTCSRW_CNT_WR_MASK;
1112 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1113 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1114 return rc;
1115
1116 ASMNopPause();
1117 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1118 ASMNopPause();
1119 }
1120}
1121
1122
1123/**
1124 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1125 * gotten exclusive ownership of the critical section.
1126 */
1127DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1128 bool fNoVal, RTTHREAD hThreadSelf)
1129{
1130 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1131 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1132
1133#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1134 pThis->s.Core.cWriteRecursions = 1;
1135#else
1136 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1137#endif
1138 Assert(pThis->s.Core.cWriterReads == 0);
1139
1140#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1141 if (!fNoVal)
1142 {
1143 if (hThreadSelf == NIL_RTTHREAD)
1144 hThreadSelf = RTThreadSelfAutoAdopt();
1145 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1146 }
1147#endif
1148 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1149 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1150 return VINF_SUCCESS;
1151}
1152
1153
1154#if defined(IN_RING3) || defined(IN_RING0)
1155/**
1156 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1157 * contended.
1158 */
1159static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1160 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1161{
1162 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1163
1164 PSUPDRVSESSION const pSession = pVM->pSession;
1165 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1166# ifdef IN_RING0
1167 uint64_t const tsStart = RTTimeNanoTS();
1168 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
1169 uint64_t cNsMaxTotal = cNsMaxTotalDef;
1170 uint32_t cMsMaxOne = RT_MS_5SEC;
1171 bool fNonInterruptible = false;
1172# endif
1173
1174 for (uint32_t iLoop = 0; ; iLoop++)
1175 {
1176 /*
1177 * Wait for our turn.
1178 */
1179 int rc;
1180# ifdef IN_RING3
1181# ifdef PDMCRITSECTRW_STRICT
1182 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1183 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1184 if (RT_SUCCESS(rc))
1185 { /* likely */ }
1186 else
1187 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1188# else
1189 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1190# endif
1191# endif
1192
1193 for (;;)
1194 {
1195 /*
1196 * We always wait with a timeout so we can re-check the structure sanity
1197 * and not get stuck waiting on a corrupt or deleted section.
1198 */
1199# ifdef IN_RING3
1200 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1201# else
1202 rc = !fNonInterruptible
1203 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1204 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1205 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1206 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1207# endif
1208 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1209 { /* likely */ }
1210 else
1211 {
1212# ifdef IN_RING3
1213 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1214# endif
1215 return VERR_SEM_DESTROYED;
1216 }
1217 if (RT_LIKELY(rc == VINF_SUCCESS))
1218 break;
1219
1220 /*
1221 * Timeout and interrupted waits needs careful handling in ring-0
1222 * because we're cooperating with ring-3 on this critical section
1223 * and thus need to make absolutely sure we won't get stuck here.
1224 *
1225 * The r0 interrupted case means something is pending (termination,
1226 * signal, APC, debugger, whatever), so we must try our best to
1227 * return to the caller and to ring-3 so it can be dealt with.
1228 */
1229 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1230 {
1231# ifdef IN_RING0
1232 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1233 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1234 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1235 ("rcTerm=%Rrc\n", rcTerm));
1236 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
1237 cNsMaxTotal = RT_NS_1MIN;
1238
1239 if (rc == VERR_TIMEOUT)
1240 {
1241 /* Try return get out of here with a non-VINF_SUCCESS status if
1242 the thread is terminating or if the timeout has been exceeded. */
1243 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1244 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1245 || cNsElapsed > cNsMaxTotal)
1246 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1247 }
1248 else
1249 {
1250 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1251 we will try non-interruptible sleep for a while to help resolve the issue
1252 w/o guru'ing. */
1253 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1254 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1255 && rcBusy == VINF_SUCCESS
1256 && pVCpu != NULL
1257 && cNsElapsed <= cNsMaxTotal)
1258 {
1259 if (!fNonInterruptible)
1260 {
1261 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1262 fNonInterruptible = true;
1263 cMsMaxOne = 32;
1264 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1265 if (cNsLeft > RT_NS_10SEC)
1266 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1267 }
1268 }
1269 else
1270 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1271 }
1272# else /* IN_RING3 */
1273 RT_NOREF(pVM, pVCpu, rcBusy);
1274# endif /* IN_RING3 */
1275 }
1276 /*
1277 * Any other return code is fatal.
1278 */
1279 else
1280 {
1281# ifdef IN_RING3
1282 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1283# endif
1284 AssertMsgFailed(("rc=%Rrc\n", rc));
1285 return RT_FAILURE_NP(rc) ? rc : -rc;
1286 }
1287 }
1288
1289# ifdef IN_RING3
1290 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1291# endif
1292
1293 /*
1294 * Try take exclusive write ownership.
1295 */
1296 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1297 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1298 {
1299 bool fDone;
1300 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1301 if (fDone)
1302 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1303 }
1304 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1305 }
1306}
1307#endif /* IN_RING3 || IN_RING0 */
1308
1309
1310/**
1311 * Worker that enters a read/write critical section with exclusive access.
1312 *
1313 * @returns VBox status code.
1314 * @param pVM The cross context VM structure.
1315 * @param pThis Pointer to the read/write critical section.
1316 * @param rcBusy The busy return code for ring-0 and ring-3.
1317 * @param fTryOnly Only try enter it, don't wait.
1318 * @param pSrcPos The source position. (Can be NULL.)
1319 * @param fNoVal No validation records.
1320 */
1321#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
1322static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1323 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1324#else
1325DECLASM(int) StkBack_pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1326 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1327#endif
1328{
1329 /*
1330 * Validate input.
1331 */
1332 AssertPtr(pThis);
1333 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1334
1335 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1336#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1337 if (!fTryOnly)
1338 {
1339 hThreadSelf = RTThreadSelfAutoAdopt();
1340 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1341 if (RT_FAILURE(rc9))
1342 return rc9;
1343 }
1344#endif
1345
1346 /*
1347 * Check if we're already the owner and just recursing.
1348 */
1349 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1350 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1351 RTNATIVETHREAD hNativeWriter;
1352 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1353 if (hNativeSelf == hNativeWriter)
1354 {
1355 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1356#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1357 if (!fNoVal)
1358 {
1359 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1360 if (RT_FAILURE(rc9))
1361 return rc9;
1362 }
1363#endif
1364 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1365#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1366 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1367#else
1368 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1369#endif
1370 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1371 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1372 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1373 return VINF_SUCCESS;
1374 }
1375
1376 /*
1377 * First we try grab an idle critical section using 128-bit atomics.
1378 */
1379 /** @todo This could be moved up before the recursion check. */
1380 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1381#ifdef RTASM_HAVE_CMP_WRITE_U128
1382 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1383 && pdmCritSectRwIsCmpWriteU128Supported())
1384 {
1385 RTCRITSECTRWSTATE OldState;
1386 OldState.s.u64State = u64State;
1387 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1388 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1389
1390 RTCRITSECTRWSTATE NewState;
1391 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1392 NewState.s.hNativeWriter = hNativeSelf;
1393
1394 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1395 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1396
1397 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1398 }
1399#endif
1400
1401 /*
1402 * Do it step by step. Update the state to reflect our desire.
1403 */
1404 uint64_t u64OldState = u64State;
1405
1406 for (;;)
1407 {
1408 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1409 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1410 {
1411 /* It flows in the right direction, try follow it before it changes. */
1412 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1413 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1414 c++;
1415 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1416 u64State &= ~RTCSRW_CNT_WR_MASK;
1417 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1418 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1419 break;
1420 }
1421 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1422 {
1423 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1424 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1425 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1426 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1427 break;
1428 }
1429 else if (fTryOnly)
1430 {
1431 /* Wrong direction and we're not supposed to wait, just return. */
1432 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1433 return VERR_SEM_BUSY;
1434 }
1435 else
1436 {
1437 /* Add ourselves to the write count and break out to do the wait. */
1438 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1439 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1440 c++;
1441 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1442 u64State &= ~RTCSRW_CNT_WR_MASK;
1443 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1444 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1445 break;
1446 }
1447
1448 ASMNopPause();
1449
1450 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1451 { /* likely */ }
1452 else
1453 return VERR_SEM_DESTROYED;
1454
1455 ASMNopPause();
1456 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1457 u64OldState = u64State;
1458 }
1459
1460 /*
1461 * If we're in write mode now try grab the ownership. Play fair if there
1462 * are threads already waiting.
1463 */
1464 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1465 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1466 || fTryOnly);
1467 if (fDone)
1468 {
1469 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1470 if (fDone)
1471 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1472 }
1473
1474 /*
1475 * Okay, we have contention and will have to wait unless we're just trying.
1476 */
1477 if (fTryOnly)
1478 {
1479 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1480 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1481 }
1482
1483 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1484
1485 /*
1486 * Ring-3 is pretty straight forward.
1487 */
1488#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1489 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1490#elif defined(IN_RING3)
1491 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1492
1493#elif defined(IN_RING0)
1494 /*
1495 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1496 * account when waiting on contended locks.
1497 */
1498 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1499 if (pVCpu)
1500 {
1501 VMMR0EMTBLOCKCTX Ctx;
1502 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1503 if (rc == VINF_SUCCESS)
1504 {
1505 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1506
1507 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1508
1509 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1510 }
1511 else
1512 {
1513 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1514 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1515 }
1516 return rc;
1517 }
1518
1519 /* Non-EMT. */
1520 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1521 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1522
1523#else
1524# error "Unused."
1525 /*
1526 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1527 */
1528 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1529 if (rcBusy == VINF_SUCCESS)
1530 {
1531 Assert(!fTryOnly);
1532 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1533 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1534 * back to ring-3. Goes for both kind of crit sects. */
1535 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1536 }
1537 return rcBusy;
1538#endif
1539}
1540#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
1541decltype(StkBack_pdmCritSectRwEnterExcl) pdmCritSectRwEnterExcl;
1542#endif
1543
1544
1545/**
1546 * Try enter a critical section with exclusive (write) access.
1547 *
1548 * @returns VBox status code.
1549 * @retval VINF_SUCCESS on success.
1550 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1551 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1552 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1553 * during the operation.
1554 *
1555 * @param pVM The cross context VM structure.
1556 * @param pThis Pointer to the read/write critical section.
1557 * @param rcBusy The status code to return when we're in RC or R0 and the
1558 * section is busy. Pass VINF_SUCCESS to acquired the
1559 * critical section thru a ring-3 call if necessary.
1560 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1561 * PDMCritSectRwTryEnterExclDebug,
1562 * PDMCritSectEnterDebug, PDMCritSectEnter,
1563 * RTCritSectRwEnterExcl.
1564 */
1565VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1566{
1567#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1568 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1569#else
1570 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1571 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1572#endif
1573}
1574
1575
1576/**
1577 * Try enter a critical section with exclusive (write) access.
1578 *
1579 * @returns VBox status code.
1580 * @retval VINF_SUCCESS on success.
1581 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1582 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1583 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1584 * during the operation.
1585 *
1586 * @param pVM The cross context VM structure.
1587 * @param pThis Pointer to the read/write critical section.
1588 * @param rcBusy The status code to return when we're in RC or R0 and the
1589 * section is busy. Pass VINF_SUCCESS to acquired the
1590 * critical section thru a ring-3 call if necessary.
1591 * @param uId Where we're entering the section.
1592 * @param SRC_POS The source position.
1593 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1594 * PDMCritSectRwTryEnterExclDebug,
1595 * PDMCritSectEnterDebug, PDMCritSectEnter,
1596 * RTCritSectRwEnterExclDebug.
1597 */
1598VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1599{
1600 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1601#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1602 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1603#else
1604 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1605 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1606#endif
1607}
1608
1609
1610/**
1611 * Try enter a critical section with exclusive (write) access.
1612 *
1613 * @retval VINF_SUCCESS on success.
1614 * @retval VERR_SEM_BUSY if the critsect was owned.
1615 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1616 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1617 * during the operation.
1618 *
1619 * @param pVM The cross context VM structure.
1620 * @param pThis Pointer to the read/write critical section.
1621 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1622 * PDMCritSectRwEnterExclDebug,
1623 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1624 * RTCritSectRwTryEnterExcl.
1625 */
1626VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1627{
1628#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1629 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1630#else
1631 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1632 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1633#endif
1634}
1635
1636
1637/**
1638 * Try enter a critical section with exclusive (write) access.
1639 *
1640 * @retval VINF_SUCCESS on success.
1641 * @retval VERR_SEM_BUSY if the critsect was owned.
1642 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1643 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1644 * during the operation.
1645 *
1646 * @param pVM The cross context VM structure.
1647 * @param pThis Pointer to the read/write critical section.
1648 * @param uId Where we're entering the section.
1649 * @param SRC_POS The source position.
1650 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1651 * PDMCritSectRwEnterExclDebug,
1652 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1653 * RTCritSectRwTryEnterExclDebug.
1654 */
1655VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1656{
1657 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1658#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1659 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1660#else
1661 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1662 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1663#endif
1664}
1665
1666
1667#ifdef IN_RING3
1668/**
1669 * Enters a PDM read/write critical section with exclusive (write) access.
1670 *
1671 * @returns VINF_SUCCESS if entered successfully.
1672 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1673 * during the operation.
1674 *
1675 * @param pVM The cross context VM structure.
1676 * @param pThis Pointer to the read/write critical section.
1677 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1678 */
1679VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1680{
1681 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1682}
1683#endif /* IN_RING3 */
1684
1685
1686/**
1687 * Leave a critical section held exclusively.
1688 *
1689 * @returns VBox status code.
1690 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1691 * during the operation.
1692 * @param pVM The cross context VM structure.
1693 * @param pThis Pointer to the read/write critical section.
1694 * @param fNoVal No validation records (i.e. queued release).
1695 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1696 */
1697#if !defined(VMM_R0_SWITCH_STACK) || !defined(IN_RING0)
1698static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1699#else
1700DECLASM(int) StkBack_pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1701#endif
1702{
1703 /*
1704 * Validate handle.
1705 */
1706 AssertPtr(pThis);
1707 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1708
1709#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1710 NOREF(fNoVal);
1711#endif
1712
1713 /*
1714 * Check ownership.
1715 */
1716 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1717 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1718
1719 RTNATIVETHREAD hNativeWriter;
1720 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1721 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1722
1723
1724 /*
1725 * Unwind one recursion. Not the last?
1726 */
1727 if (pThis->s.Core.cWriteRecursions != 1)
1728 {
1729#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1730 if (fNoVal)
1731 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1732 else
1733 {
1734 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1735 if (RT_FAILURE(rc9))
1736 return rc9;
1737 }
1738#endif
1739#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1740 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1741#else
1742 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1743#endif
1744 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1745 return VINF_SUCCESS;
1746 }
1747
1748
1749 /*
1750 * Final recursion.
1751 */
1752 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1753#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1754 if (fNoVal)
1755 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1756 else
1757 {
1758 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1759 if (RT_FAILURE(rc9))
1760 return rc9;
1761 }
1762#endif
1763
1764
1765#ifdef RTASM_HAVE_CMP_WRITE_U128
1766 /*
1767 * See if we can get out w/o any signalling as this is a common case.
1768 */
1769 if (pdmCritSectRwIsCmpWriteU128Supported())
1770 {
1771 RTCRITSECTRWSTATE OldState;
1772 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1773 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1774 {
1775 OldState.s.hNativeWriter = hNativeSelf;
1776 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1777
1778 RTCRITSECTRWSTATE NewState;
1779 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1780 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1781
1782# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1783 pThis->s.Core.cWriteRecursions = 0;
1784# else
1785 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1786# endif
1787 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1788
1789 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1790 return VINF_SUCCESS;
1791
1792 /* bail out. */
1793 pThis->s.Core.cWriteRecursions = 1;
1794 }
1795 }
1796#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1797
1798
1799#if defined(IN_RING3) || defined(IN_RING0)
1800 /*
1801 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1802 * Ring-0: Try leave for real, depends on host and context.
1803 */
1804# ifdef IN_RING0
1805 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1806 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1807 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1808 || VMMRZCallRing3IsEnabled(pVCpu)
1809 || RTSemEventIsSignalSafe()
1810 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1811 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1812 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1813 )
1814# endif
1815 {
1816# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1817 pThis->s.Core.cWriteRecursions = 0;
1818# else
1819 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1820# endif
1821 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1822 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1823
1824 for (;;)
1825 {
1826 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1827 uint64_t u64OldState = u64State;
1828
1829 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1830 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1831 c--;
1832
1833 if ( c > 0
1834 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1835 {
1836 /*
1837 * Don't change the direction, wake up the next writer if any.
1838 */
1839 u64State &= ~RTCSRW_CNT_WR_MASK;
1840 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1841 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1842 {
1843 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1844 int rc;
1845 if (c == 0)
1846 rc = VINF_SUCCESS;
1847# ifdef IN_RING0
1848 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1849 {
1850 VMMR0EMTBLOCKCTX Ctx;
1851 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1852 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1853
1854 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1855
1856 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1857 }
1858# endif
1859 else
1860 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1861 AssertRC(rc);
1862 return rc;
1863 }
1864 }
1865 else
1866 {
1867 /*
1868 * Reverse the direction and signal the reader threads.
1869 */
1870 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1871 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1872 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1873 {
1874 Assert(!pThis->s.Core.fNeedReset);
1875 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1876 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1877
1878 int rc;
1879# ifdef IN_RING0
1880 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1881 {
1882 VMMR0EMTBLOCKCTX Ctx;
1883 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1884 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1885
1886 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1887
1888 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1889 }
1890 else
1891# endif
1892 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1893 AssertRC(rc);
1894 return rc;
1895 }
1896 }
1897
1898 ASMNopPause();
1899 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1900 { /*likely*/ }
1901 else
1902 return VERR_SEM_DESTROYED;
1903 ASMNopPause();
1904 }
1905 /* not reached! */
1906 }
1907#endif /* IN_RING3 || IN_RING0 */
1908
1909
1910#ifndef IN_RING3
1911 /*
1912 * Queue the requested exit for ring-3 execution.
1913 */
1914# ifndef IN_RING0
1915 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1916# endif
1917 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1918 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1919 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1920 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1921 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1922 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1923 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1924 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK)
1925 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1926 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1927 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1928 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1929 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1930 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1931 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1932 return VINF_SUCCESS;
1933#endif
1934}
1935#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
1936decltype(StkBack_pdmCritSectRwLeaveExclWorker) pdmCritSectRwLeaveExclWorker;
1937#endif
1938
1939
1940/**
1941 * Leave a critical section held exclusively.
1942 *
1943 * @returns VBox status code.
1944 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1945 * during the operation.
1946 * @param pVM The cross context VM structure.
1947 * @param pThis Pointer to the read/write critical section.
1948 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1949 */
1950VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1951{
1952 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1953}
1954
1955
1956#if defined(IN_RING3) || defined(IN_RING0)
1957/**
1958 * PDMCritSectBothFF interface.
1959 *
1960 * @param pVM The cross context VM structure.
1961 * @param pThis Pointer to the read/write critical section.
1962 */
1963void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1964{
1965 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1966}
1967#endif
1968
1969
1970/**
1971 * Checks the caller is the exclusive (write) owner of the critical section.
1972 *
1973 * @retval true if owner.
1974 * @retval false if not owner.
1975 * @param pVM The cross context VM structure.
1976 * @param pThis Pointer to the read/write critical section.
1977 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1978 * RTCritSectRwIsWriteOwner.
1979 */
1980VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1981{
1982 /*
1983 * Validate handle.
1984 */
1985 AssertPtr(pThis);
1986 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1987
1988 /*
1989 * Check ownership.
1990 */
1991 RTNATIVETHREAD hNativeWriter;
1992 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1993 if (hNativeWriter == NIL_RTNATIVETHREAD)
1994 return false;
1995 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1996}
1997
1998
1999/**
2000 * Checks if the caller is one of the read owners of the critical section.
2001 *
2002 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
2003 * enabled. Meaning, the answer is not trustworhty unless
2004 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
2005 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
2006 * creating the semaphore. And finally, if you used a locking class,
2007 * don't disable deadlock detection by setting cMsMinDeadlock to
2008 * RT_INDEFINITE_WAIT.
2009 *
2010 * In short, only use this for assertions.
2011 *
2012 * @returns @c true if reader, @c false if not.
2013 * @param pVM The cross context VM structure.
2014 * @param pThis Pointer to the read/write critical section.
2015 * @param fWannaHear What you'd like to hear when lock validation is not
2016 * available. (For avoiding asserting all over the place.)
2017 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
2018 */
2019VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
2020{
2021 /*
2022 * Validate handle.
2023 */
2024 AssertPtr(pThis);
2025 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
2026
2027 /*
2028 * Inspect the state.
2029 */
2030 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2031 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
2032 {
2033 /*
2034 * It's in write mode, so we can only be a reader if we're also the
2035 * current writer.
2036 */
2037 RTNATIVETHREAD hWriter;
2038 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2039 if (hWriter == NIL_RTNATIVETHREAD)
2040 return false;
2041 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2042 }
2043
2044 /*
2045 * Read mode. If there are no current readers, then we cannot be a reader.
2046 */
2047 if (!(u64State & RTCSRW_CNT_RD_MASK))
2048 return false;
2049
2050#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2051 /*
2052 * Ask the lock validator.
2053 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2054 */
2055 NOREF(fWannaHear);
2056 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2057#else
2058 /*
2059 * Ok, we don't know, just tell the caller what he want to hear.
2060 */
2061 return fWannaHear;
2062#endif
2063}
2064
2065
2066/**
2067 * Gets the write recursion count.
2068 *
2069 * @returns The write recursion count (0 if bad critsect).
2070 * @param pThis Pointer to the read/write critical section.
2071 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2072 * RTCritSectRwGetWriteRecursion.
2073 */
2074VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2075{
2076 /*
2077 * Validate handle.
2078 */
2079 AssertPtr(pThis);
2080 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2081
2082 /*
2083 * Return the requested data.
2084 */
2085 return pThis->s.Core.cWriteRecursions;
2086}
2087
2088
2089/**
2090 * Gets the read recursion count of the current writer.
2091 *
2092 * @returns The read recursion count (0 if bad critsect).
2093 * @param pThis Pointer to the read/write critical section.
2094 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2095 * RTCritSectRwGetWriterReadRecursion.
2096 */
2097VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2098{
2099 /*
2100 * Validate handle.
2101 */
2102 AssertPtr(pThis);
2103 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2104
2105 /*
2106 * Return the requested data.
2107 */
2108 return pThis->s.Core.cWriterReads;
2109}
2110
2111
2112/**
2113 * Gets the current number of reads.
2114 *
2115 * This includes all read recursions, so it might be higher than the number of
2116 * read owners. It does not include reads done by the current writer.
2117 *
2118 * @returns The read count (0 if bad critsect).
2119 * @param pThis Pointer to the read/write critical section.
2120 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2121 * RTCritSectRwGetReadCount.
2122 */
2123VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2124{
2125 /*
2126 * Validate input.
2127 */
2128 AssertPtr(pThis);
2129 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2130
2131 /*
2132 * Return the requested data.
2133 */
2134 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2135 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2136 return 0;
2137 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2138}
2139
2140
2141/**
2142 * Checks if the read/write critical section is initialized or not.
2143 *
2144 * @retval true if initialized.
2145 * @retval false if not initialized.
2146 * @param pThis Pointer to the read/write critical section.
2147 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2148 */
2149VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2150{
2151 AssertPtr(pThis);
2152 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2153}
2154
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette