VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 90784

最後變更 在這個檔案從90784是 90677,由 vboxsync 提交於 4 年 前

VMM/PDMCritSectRwEnterShared: Implemented waiting in ring-0/HM context. bugref:6695

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 83.7 KB
 
1/* $Id: PDMAllCritSectRw.cpp 90677 2021-08-13 10:30:37Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECTRW
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40# include <iprt/thread.h>
41#endif
42#ifdef IN_RING0
43# include <iprt/time.h>
44#endif
45#ifdef RT_ARCH_AMD64
46# include <iprt/x86.h>
47#endif
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53#if 0 /* unused */
54/** The number loops to spin for shared access in ring-3. */
55#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
56/** The number loops to spin for shared access in ring-0. */
57#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
58/** The number loops to spin for shared access in the raw-mode context. */
59#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
60
61/** The number loops to spin for exclusive access in ring-3. */
62#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
63/** The number loops to spin for exclusive access in ring-0. */
64#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
65/** The number loops to spin for exclusive access in the raw-mode context. */
66#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
67#endif
68
69/** Max number of write or write/read recursions. */
70#define PDM_CRITSECTRW_MAX_RECURSIONS _1M
71
72/** Skips some of the overly paranoid atomic reads and updates.
73 * Makes some assumptions about cache coherence, though not brave enough not to
74 * always end with an atomic update. */
75#define PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
76
77/** For reading RTCRITSECTRWSTATE::s::u64State. */
78#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
79# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicUoReadU64(a_pu64State)
80#else
81# define PDMCRITSECTRW_READ_STATE(a_pu64State) ASMAtomicReadU64(a_pu64State)
82#endif
83
84
85/* Undefine the automatic VBOX_STRICT API mappings. */
86#undef PDMCritSectRwEnterExcl
87#undef PDMCritSectRwTryEnterExcl
88#undef PDMCritSectRwEnterShared
89#undef PDMCritSectRwTryEnterShared
90
91
92/*********************************************************************************************************************************
93* Defined Constants And Macros *
94*********************************************************************************************************************************/
95#if defined(RTASM_HAVE_CMP_WRITE_U128) && defined(RT_ARCH_AMD64)
96static int32_t g_fCmpWriteSupported = -1;
97#endif
98
99
100/*********************************************************************************************************************************
101* Internal Functions *
102*********************************************************************************************************************************/
103static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal);
104
105
106#ifdef RTASM_HAVE_CMP_WRITE_U128
107
108# ifdef RT_ARCH_AMD64
109/**
110 * Called once to initialize g_fCmpWriteSupported.
111 */
112DECL_NO_INLINE(static, bool) pdmCritSectRwIsCmpWriteU128SupportedSlow(void)
113{
114 bool const fCmpWriteSupported = RT_BOOL(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16);
115 ASMAtomicWriteS32(&g_fCmpWriteSupported, fCmpWriteSupported);
116 return fCmpWriteSupported;
117}
118# endif
119
120
121/**
122 * Indicates whether hardware actually supports 128-bit compare & write.
123 */
124DECL_FORCE_INLINE(bool) pdmCritSectRwIsCmpWriteU128Supported(void)
125{
126# ifdef RT_ARCH_AMD64
127 int32_t const fCmpWriteSupported = g_fCmpWriteSupported;
128 if (RT_LIKELY(fCmpWriteSupported >= 0))
129 return fCmpWriteSupported != 0;
130 return pdmCritSectRwIsCmpWriteU128SupportedSlow();
131# else
132 return true;
133# endif
134}
135
136#endif /* RTASM_HAVE_CMP_WRITE_U128 */
137
138/**
139 * Gets the ring-3 native thread handle of the calling thread.
140 *
141 * @returns native thread handle (ring-3).
142 * @param pVM The cross context VM structure.
143 * @param pThis The read/write critical section. This is only used in
144 * R0 and RC.
145 */
146DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
147{
148#ifdef IN_RING3
149 RT_NOREF(pVM, pThis);
150 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
151#else
152 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
153 NIL_RTNATIVETHREAD);
154 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
155 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD;
156 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
157#endif
158 return hNativeSelf;
159}
160
161
162DECL_NO_INLINE(static, int) pdmCritSectRwCorrupted(PPDMCRITSECTRW pThis, const char *pszMsg)
163{
164 ASMAtomicWriteU32(&pThis->s.Core.u32Magic, PDMCRITSECTRW_MAGIC_CORRUPT);
165 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pThis));
166 return VERR_PDM_CRITSECTRW_IPE;
167}
168
169
170
171#ifdef IN_RING3
172/**
173 * Changes the lock validator sub-class of the read/write critical section.
174 *
175 * It is recommended to try make sure that nobody is using this critical section
176 * while changing the value.
177 *
178 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
179 * lock validator isn't compiled in or either of the parameters are
180 * invalid.
181 * @param pThis Pointer to the read/write critical section.
182 * @param uSubClass The new sub-class value.
183 */
184VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
185{
186 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
187 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
188# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
189 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
190
191 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
192 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
193# else
194 NOREF(uSubClass);
195 return RTLOCKVAL_SUB_CLASS_INVALID;
196# endif
197}
198#endif /* IN_RING3 */
199
200
201/**
202 * Worker for pdmCritSectRwEnterShared returning with read-ownership of the CS.
203 */
204DECL_FORCE_INLINE(int) pdmCritSectRwEnterSharedGotIt(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
205 bool fNoVal, RTTHREAD hThreadSelf)
206{
207#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
208 if (!fNoVal)
209 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
210#else
211 RT_NOREF(pSrcPos, fNoVal, hThreadSelf);
212#endif
213
214 /* got it! */
215 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
216 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
217 return VINF_SUCCESS;
218}
219
220/**
221 * Worker for pdmCritSectRwEnterShared and pdmCritSectRwEnterSharedBailOut
222 * that decrement the wait count and maybe resets the semaphore.
223 */
224DECLINLINE(int) pdmCritSectRwEnterSharedGotItAfterWaiting(PVMCC pVM, PPDMCRITSECTRW pThis, uint64_t u64State,
225 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
226{
227 for (;;)
228 {
229 uint64_t const u64OldState = u64State;
230 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
231 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count"));
232 AssertReturn((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT > 0,
233 pdmCritSectRwCorrupted(pThis, "Invalid read count"));
234 cWait--;
235 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
236 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
237
238 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
239 {
240 if (cWait == 0)
241 {
242 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
243 {
244 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
245 AssertRCReturn(rc, rc);
246 }
247 }
248 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
249 }
250
251 ASMNopPause();
252 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
253 ASMNopPause();
254
255 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
256 }
257 /* not reached */
258}
259
260
261#if defined(IN_RING0) || (defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT))
262/**
263 * Worker for pdmCritSectRwEnterSharedContended that decrements both read counts
264 * and returns @a rc.
265 *
266 * @note May return VINF_SUCCESS if we race the exclusive leave function and
267 * come out on the bottom.
268 *
269 * Ring-3 only calls in a case where it is _not_ acceptable to take the
270 * lock, so even if we get the lock we'll have to leave. In the ring-0
271 * contexts, we can safely return VINF_SUCCESS in case of a race.
272 */
273DECL_NO_INLINE(static, int) pdmCritSectRwEnterSharedBailOut(PVMCC pVM, PPDMCRITSECTRW pThis, int rc,
274 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
275{
276#ifdef IN_RING0
277 uint64_t const tsStart = RTTimeNanoTS();
278 uint64_t cNsElapsed = 0;
279#endif
280 for (;;)
281 {
282 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
283 uint64_t u64OldState = u64State;
284
285 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
286 AssertReturn(cWait > 0, pdmCritSectRwCorrupted(pThis, "Invalid waiting read count on bailout"));
287 cWait--;
288
289 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
290 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid read count on bailout"));
291
292 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
293 {
294 c--;
295 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
296 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
297 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
298 return rc;
299 }
300 else
301 {
302 /*
303 * The direction changed, so we can actually get the lock now.
304 *
305 * This means that we _have_ to wait on the semaphore to be signalled
306 * so we can properly reset it. Otherwise the stuff gets out of wack,
307 * because signalling and resetting will race one another. An
308 * exception would be if we're not the last reader waiting and don't
309 * need to worry about the resetting.
310 *
311 * An option would be to do the resetting in PDMCritSectRwEnterExcl,
312 * but that would still leave a racing PDMCritSectRwEnterShared
313 * spinning hard for a little bit, which isn't great...
314 */
315 if (cWait == 0)
316 {
317# ifdef IN_RING0
318 /* Do timeout processing first to avoid redoing the above. */
319 uint32_t cMsWait;
320 if (cNsElapsed <= RT_NS_10SEC)
321 cMsWait = 32;
322 else
323 {
324 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
325 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
326 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
327 {
328 LogFunc(("%p: giving up\n", pThis));
329 return rc;
330 }
331 cMsWait = 2;
332 }
333
334 int rcWait = SUPSemEventMultiWait(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, cMsWait);
335 Log11Func(("%p: rc=%Rrc %'RU64 ns (hNativeWriter=%p u64State=%#RX64)\n", pThis, rcWait,
336 RTTimeNanoTS() - tsStart, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
337# else
338 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
339 int rcWait = SUPSemEventMultiWaitNoResume(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead, RT_MS_5SEC);
340 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
341# endif
342 if (rcWait == VINF_SUCCESS)
343 {
344# ifdef IN_RING0
345 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
346# else
347 /* ring-3: Cannot return VINF_SUCCESS. */
348 Assert(RT_FAILURE_NP(rc));
349 int rc2 = pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
350 if (RT_SUCCESS(rc2))
351 rc2 = pdmCritSectRwLeaveSharedWorker(pVM, pThis, fNoVal);
352 return rc;
353# endif
354 }
355 AssertMsgReturn(rcWait == VERR_TIMEOUT || rcWait == VERR_INTERRUPTED,
356 ("%p: rcWait=%Rrc rc=%Rrc", pThis, rcWait, rc),
357 RT_FAILURE_NP(rcWait) ? rcWait : -rcWait);
358 }
359 else
360 {
361 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
362 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
363 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
364 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
365 }
366
367# ifdef IN_RING0
368 /* Calculate the elapsed time here to avoid redoing state work. */
369 cNsElapsed = RTTimeNanoTS() - tsStart;
370# endif
371 }
372
373 ASMNopPause();
374 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
375 ASMNopPause();
376 }
377}
378#endif /* IN_RING0 || (IN_RING3 && PDMCRITSECTRW_STRICT) */
379
380
381/**
382 * Worker for pdmCritSectRwEnterShared that handles waiting for a contended CS.
383 * Caller has already added us to the read and read-wait counters.
384 */
385static int pdmCritSectRwEnterSharedContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis,
386 int rcBusy, PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, RTTHREAD hThreadSelf)
387{
388 PSUPDRVSESSION const pSession = pVM->pSession;
389 SUPSEMEVENTMULTI const hEventMulti = (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead;
390# ifdef IN_RING0
391 uint64_t const tsStart = RTTimeNanoTS();
392 uint64_t cNsMaxTotal = RT_NS_5MIN;
393 uint32_t cMsMaxOne = RT_MS_5SEC;
394 bool fNonInterruptible = false;
395# endif
396
397 for (uint32_t iLoop = 0; ; iLoop++)
398 {
399 /*
400 * Wait for the direction to switch.
401 */
402 int rc;
403# ifdef IN_RING3
404# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
405 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
406 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
407 if (RT_FAILURE(rc))
408 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
409# else
410 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
411# endif
412# endif
413
414 for (;;)
415 {
416 /*
417 * We always wait with a timeout so we can re-check the structure sanity
418 * and not get stuck waiting on a corrupt or deleted section.
419 */
420# ifdef IN_RING3
421 rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, RT_MS_5SEC);
422# else
423 rc = !fNonInterruptible
424 ? SUPSemEventMultiWaitNoResume(pSession, hEventMulti, cMsMaxOne)
425 : SUPSemEventMultiWait(pSession, hEventMulti, cMsMaxOne);
426 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p u64State=%#RX64)\n", pThis, rc,
427 RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter, pThis->s.Core.u.s.u64State));
428# endif
429 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
430 { /* likely */ }
431 else
432 {
433# ifdef IN_RING3
434 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
435# endif
436 return VERR_SEM_DESTROYED;
437 }
438 if (RT_LIKELY(rc == VINF_SUCCESS))
439 break;
440
441 /*
442 * Timeout and interrupted waits needs careful handling in ring-0
443 * because we're cooperating with ring-3 on this critical section
444 * and thus need to make absolutely sure we won't get stuck here.
445 *
446 * The r0 interrupted case means something is pending (termination,
447 * signal, APC, debugger, whatever), so we must try our best to
448 * return to the caller and to ring-3 so it can be dealt with.
449 */
450 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
451 {
452# ifdef IN_RING0
453 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
454 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
455 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
456 ("rcTerm=%Rrc\n", rcTerm));
457 if (rcTerm == VERR_NOT_SUPPORTED)
458 cNsMaxTotal = RT_NS_1MIN;
459
460 if (rc == VERR_TIMEOUT)
461 {
462 /* Try return get out of here with a non-VINF_SUCCESS status if
463 the thread is terminating or if the timeout has been exceeded. */
464 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrTimeout);
465 if ( rcTerm == VINF_THREAD_IS_TERMINATING
466 || cNsElapsed > cNsMaxTotal)
467 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
468 pSrcPos, fNoVal, hThreadSelf);
469 }
470 else
471 {
472 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
473 we will try non-interruptible sleep for a while to help resolve the issue
474 w/o guru'ing. */
475 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedVerrInterrupted);
476 if ( rcTerm != VINF_THREAD_IS_TERMINATING
477 && rcBusy == VINF_SUCCESS
478 && pVCpu != NULL
479 && cNsElapsed <= cNsMaxTotal)
480 {
481 if (!fNonInterruptible)
482 {
483 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwSharedNonInterruptibleWaits);
484 fNonInterruptible = true;
485 cMsMaxOne = 32;
486 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
487 if (cNsLeft > RT_NS_10SEC)
488 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
489 }
490 }
491 else
492 return pdmCritSectRwEnterSharedBailOut(pVM, pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc,
493 pSrcPos, fNoVal, hThreadSelf);
494 }
495# else /* IN_RING3 */
496 RT_NOREF(pVM, pVCpu, rcBusy);
497# endif /* IN_RING3 */
498 }
499 /*
500 * Any other return code is fatal.
501 */
502 else
503 {
504# ifdef IN_RING3
505 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
506# endif
507 AssertMsgFailed(("rc=%Rrc\n", rc));
508 return RT_FAILURE_NP(rc) ? rc : -rc;
509 }
510 }
511
512# ifdef IN_RING3
513 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
514# endif
515
516 /*
517 * Check the direction.
518 */
519 Assert(pThis->s.Core.fNeedReset);
520 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
521 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
522 {
523 /*
524 * Decrement the wait count and maybe reset the semaphore (if we're last).
525 */
526 return pdmCritSectRwEnterSharedGotItAfterWaiting(pVM, pThis, u64State, pSrcPos, fNoVal, hThreadSelf);
527 }
528
529 AssertMsg(iLoop < 1,
530 ("%p: %u u64State=%#RX64 hNativeWriter=%p\n", pThis, iLoop, u64State, pThis->s.Core.u.s.hNativeWriter));
531 RTThreadYield();
532 }
533
534 /* not reached */
535}
536
537
538/**
539 * Worker that enters a read/write critical section with shard access.
540 *
541 * @returns VBox status code.
542 * @param pVM The cross context VM structure.
543 * @param pThis Pointer to the read/write critical section.
544 * @param rcBusy The busy return code for ring-0 and ring-3.
545 * @param fTryOnly Only try enter it, don't wait.
546 * @param pSrcPos The source position. (Can be NULL.)
547 * @param fNoVal No validation records.
548 */
549static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
550 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
551{
552 /*
553 * Validate input.
554 */
555 AssertPtr(pThis);
556 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
557
558#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
559 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
560 if (!fTryOnly)
561 {
562 int rc9;
563 RTNATIVETHREAD hNativeWriter;
564 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
565 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
566 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
567 else
568 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
569 if (RT_FAILURE(rc9))
570 return rc9;
571 }
572#else
573 RTTHREAD hThreadSelf = NIL_RTTHREAD;
574#endif
575
576 /*
577 * Work the state.
578 */
579 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
580 uint64_t u64OldState = u64State;
581 for (;;)
582 {
583 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
584 {
585 /* It flows in the right direction, try follow it before it changes. */
586 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
587 c++;
588 Assert(c < RTCSRW_CNT_MASK / 4);
589 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
590 u64State &= ~RTCSRW_CNT_RD_MASK;
591 u64State |= c << RTCSRW_CNT_RD_SHIFT;
592 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
593 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
594 }
595 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
596 {
597 /* Wrong direction, but we're alone here and can simply try switch the direction. */
598 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
599 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
600 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
601 {
602 Assert(!pThis->s.Core.fNeedReset);
603 return pdmCritSectRwEnterSharedGotIt(pThis, pSrcPos, fNoVal, hThreadSelf);
604 }
605 }
606 else
607 {
608 /* Is the writer perhaps doing a read recursion? */
609 RTNATIVETHREAD hNativeWriter;
610 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
611 if (hNativeWriter != NIL_RTNATIVETHREAD)
612 {
613 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
614 if (hNativeSelf == hNativeWriter)
615 {
616#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
617 if (!fNoVal)
618 {
619 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
620 if (RT_FAILURE(rc9))
621 return rc9;
622 }
623#endif
624 uint32_t const cReads = ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
625 Assert(cReads < _16K);
626 AssertReturnStmt(cReads < PDM_CRITSECTRW_MAX_RECURSIONS, ASMAtomicDecU32(&pThis->s.Core.cWriterReads),
627 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
628 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
629 return VINF_SUCCESS; /* don't break! */
630 }
631 }
632
633 /*
634 * If we're only trying, return already.
635 */
636 if (fTryOnly)
637 {
638 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
639 return VERR_SEM_BUSY;
640 }
641
642#if defined(IN_RING3) || defined(IN_RING0)
643 /*
644 * Add ourselves to the queue and wait for the direction to change.
645 */
646 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
647 c++;
648 Assert(c < RTCSRW_CNT_MASK / 2);
649 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
650
651 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
652 cWait++;
653 Assert(cWait <= c);
654 Assert(cWait < RTCSRW_CNT_MASK / 2);
655 AssertReturn(cWait < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_READERS);
656
657 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
658 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
659
660 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
661 {
662 /*
663 * In ring-3 it's straight forward, just optimize the RTThreadSelf() call.
664 */
665# if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
666 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
667# elif defined(IN_RING3)
668 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, RTThreadSelf());
669# else /* IN_RING0 */
670 /*
671 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
672 * account when waiting on contended locks.
673 */
674 PVMCPUCC pVCpu = VMMGetCpu(pVM);
675 if (pVCpu)
676 {
677 VMMR0EMTBLOCKCTX Ctx;
678 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
679 if (rc == VINF_SUCCESS)
680 {
681 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
682
683 rc = pdmCritSectRwEnterSharedContended(pVM, pVCpu, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
684
685 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
686 }
687 else
688 {
689 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
690 rc = pdmCritSectRwEnterSharedBailOut(pVM, pThis, rc, pSrcPos, fNoVal, hThreadSelf);
691 }
692 return rc;
693 }
694
695 /* Non-EMT. */
696 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
697 return pdmCritSectRwEnterSharedContended(pVM, NULL, pThis, rcBusy, pSrcPos, fNoVal, hThreadSelf);
698# endif /* IN_RING0 */
699 }
700
701#else /* !IN_RING3 && !IN_RING0 */
702 /*
703 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
704 * back to ring-3 and do it there or return rcBusy.
705 */
706# error "Unused code."
707 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
708 if (rcBusy == VINF_SUCCESS)
709 {
710 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
711 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
712 * back to ring-3. Goes for both kind of crit sects. */
713 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
714 }
715 return rcBusy;
716#endif /* !IN_RING3 && !IN_RING0 */
717 }
718
719 ASMNopPause();
720 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
721 { /* likely */ }
722 else
723 return VERR_SEM_DESTROYED;
724 ASMNopPause();
725
726 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
727 u64OldState = u64State;
728 }
729 /* not reached */
730}
731
732
733/**
734 * Enter a critical section with shared (read) access.
735 *
736 * @returns VBox status code.
737 * @retval VINF_SUCCESS on success.
738 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
739 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
740 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
741 * during the operation.
742 *
743 * @param pVM The cross context VM structure.
744 * @param pThis Pointer to the read/write critical section.
745 * @param rcBusy The status code to return when we're in RC or R0 and the
746 * section is busy. Pass VINF_SUCCESS to acquired the
747 * critical section thru a ring-3 call if necessary.
748 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
749 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
750 * RTCritSectRwEnterShared.
751 */
752VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
753{
754#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
755 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
756#else
757 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
758 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
759#endif
760}
761
762
763/**
764 * Enter a critical section with shared (read) access.
765 *
766 * @returns VBox status code.
767 * @retval VINF_SUCCESS on success.
768 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
769 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
770 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
771 * during the operation.
772 *
773 * @param pVM The cross context VM structure.
774 * @param pThis Pointer to the read/write critical section.
775 * @param rcBusy The status code to return when we're in RC or R0 and the
776 * section is busy. Pass VINF_SUCCESS to acquired the
777 * critical section thru a ring-3 call if necessary.
778 * @param uId Where we're entering the section.
779 * @param SRC_POS The source position.
780 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
781 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
782 * RTCritSectRwEnterSharedDebug.
783 */
784VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
785{
786 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
787#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
788 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
789#else
790 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
791 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
792#endif
793}
794
795
796/**
797 * Try enter a critical section with shared (read) access.
798 *
799 * @returns VBox status code.
800 * @retval VINF_SUCCESS on success.
801 * @retval VERR_SEM_BUSY if the critsect was owned.
802 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
803 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
804 * during the operation.
805 *
806 * @param pVM The cross context VM structure.
807 * @param pThis Pointer to the read/write critical section.
808 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
809 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
810 * RTCritSectRwTryEnterShared.
811 */
812VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
813{
814#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
815 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
816#else
817 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
818 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
819#endif
820}
821
822
823/**
824 * Try enter a critical section with shared (read) access.
825 *
826 * @returns VBox status code.
827 * @retval VINF_SUCCESS on success.
828 * @retval VERR_SEM_BUSY if the critsect was owned.
829 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
830 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
831 * during the operation.
832 *
833 * @param pVM The cross context VM structure.
834 * @param pThis Pointer to the read/write critical section.
835 * @param uId Where we're entering the section.
836 * @param SRC_POS The source position.
837 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
838 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
839 * RTCritSectRwTryEnterSharedDebug.
840 */
841VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
842{
843 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
844#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
845 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
846#else
847 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
848 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
849#endif
850}
851
852
853#ifdef IN_RING3
854/**
855 * Enters a PDM read/write critical section with shared (read) access.
856 *
857 * @returns VINF_SUCCESS if entered successfully.
858 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
859 * during the operation.
860 *
861 * @param pVM The cross context VM structure.
862 * @param pThis Pointer to the read/write critical section.
863 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
864 */
865VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
866{
867 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
868}
869#endif
870
871
872/**
873 * Leave a critical section held with shared access.
874 *
875 * @returns VBox status code.
876 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
877 * during the operation.
878 * @param pVM The cross context VM structure.
879 * @param pThis Pointer to the read/write critical section.
880 * @param fNoVal No validation records (i.e. queued release).
881 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
882 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
883 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
884 */
885static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
886{
887 /*
888 * Validate handle.
889 */
890 AssertPtr(pThis);
891 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
892
893#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
894 NOREF(fNoVal);
895#endif
896
897 /*
898 * Check the direction and take action accordingly.
899 */
900#ifdef IN_RING0
901 PVMCPUCC pVCpu = NULL;
902#endif
903 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
904 uint64_t u64OldState = u64State;
905 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
906 {
907#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
908 if (fNoVal)
909 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
910 else
911 {
912 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
913 if (RT_FAILURE(rc9))
914 return rc9;
915 }
916#endif
917 for (;;)
918 {
919 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
920 AssertReturn(c > 0, VERR_NOT_OWNER);
921 c--;
922
923 if ( c > 0
924 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
925 {
926 /* Don't change the direction. */
927 u64State &= ~RTCSRW_CNT_RD_MASK;
928 u64State |= c << RTCSRW_CNT_RD_SHIFT;
929 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
930 break;
931 }
932 else
933 {
934#if defined(IN_RING3) || defined(IN_RING0)
935# ifdef IN_RING0
936 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
937 if (!pVCpu)
938 pVCpu = VMMGetCpu(pVM);
939 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
940 || VMMRZCallRing3IsEnabled(pVCpu)
941 || RTSemEventIsSignalSafe()
942 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
943 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
944 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
945 )
946# endif
947 {
948 /* Reverse the direction and signal the writer threads. */
949 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
950 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
951 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
952 {
953 int rc;
954# ifdef IN_RING0
955 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
956 if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
957 {
958 VMMR0EMTBLOCKCTX Ctx;
959 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
960 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
961
962 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
963
964 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
965 }
966 else
967# endif
968 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
969 AssertRC(rc);
970 return rc;
971 }
972 }
973#endif /* IN_RING3 || IN_RING0 */
974#ifndef IN_RING3
975# ifdef IN_RING0
976 else
977# endif
978 {
979 /* Queue the exit request (ring-3). */
980# ifndef IN_RING0
981 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
982# endif
983 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
984 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
985 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves),
986 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
987 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = pThis->s.pSelfR3;
988 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
989 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i])
990 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] & PAGE_OFFSET_MASK)
991 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
992 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i], pThis),
993 pdmCritSectRwCorrupted(pThis, "Invalid self pointer"));
994 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
995 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
996 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
997 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
998 break;
999 }
1000#endif
1001 }
1002
1003 ASMNopPause();
1004 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1005 { }
1006 else
1007 return VERR_SEM_DESTROYED;
1008 ASMNopPause();
1009
1010 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1011 u64OldState = u64State;
1012 }
1013 }
1014 else
1015 {
1016 /*
1017 * Write direction. Check that it's the owner calling and that it has reads to undo.
1018 */
1019 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1020 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1021
1022 RTNATIVETHREAD hNativeWriter;
1023 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1024 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1025 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
1026#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1027 if (!fNoVal)
1028 {
1029 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
1030 if (RT_FAILURE(rc))
1031 return rc;
1032 }
1033#endif
1034 uint32_t cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
1035 AssertReturn(cDepth < PDM_CRITSECTRW_MAX_RECURSIONS, pdmCritSectRwCorrupted(pThis, "too many writer-read recursions"));
1036 }
1037
1038 return VINF_SUCCESS;
1039}
1040
1041
1042/**
1043 * Leave a critical section held with shared access.
1044 *
1045 * @returns VBox status code.
1046 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1047 * during the operation.
1048 * @param pVM The cross context VM structure.
1049 * @param pThis Pointer to the read/write critical section.
1050 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
1051 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
1052 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
1053 */
1054VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
1055{
1056 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
1057}
1058
1059
1060#if defined(IN_RING3) || defined(IN_RING0)
1061/**
1062 * PDMCritSectBothFF interface.
1063 *
1064 * @param pVM The cross context VM structure.
1065 * @param pThis Pointer to the read/write critical section.
1066 */
1067void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1068{
1069 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
1070}
1071#endif
1072
1073
1074/**
1075 * Worker for pdmCritSectRwEnterExcl that bails out on wait failure.
1076 *
1077 * @returns @a rc unless corrupted.
1078 * @param pThis Pointer to the read/write critical section.
1079 * @param rc The status to return.
1080 */
1081DECL_NO_INLINE(static, int) pdmCritSectRwEnterExclBailOut(PPDMCRITSECTRW pThis, int rc)
1082{
1083 /*
1084 * Decrement the counts and return the error.
1085 */
1086 for (;;)
1087 {
1088 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1089 uint64_t const u64OldState = u64State;
1090 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1091 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on bailout"));
1092 c--;
1093 u64State &= ~RTCSRW_CNT_WR_MASK;
1094 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1095 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1096 return rc;
1097
1098 ASMNopPause();
1099 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1100 ASMNopPause();
1101 }
1102}
1103
1104
1105/**
1106 * Worker for pdmCritSectRwEnterExcl that handles the red tape after we've
1107 * gotten exclusive ownership of the critical section.
1108 */
1109DECL_FORCE_INLINE(int) pdmCritSectRwEnterExclFirst(PPDMCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos,
1110 bool fNoVal, RTTHREAD hThreadSelf)
1111{
1112 RT_NOREF(hThreadSelf, fNoVal, pSrcPos);
1113 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1114
1115#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1116 pThis->s.Core.cWriteRecursions = 1;
1117#else
1118 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
1119#endif
1120 Assert(pThis->s.Core.cWriterReads == 0);
1121
1122#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1123 if (!fNoVal)
1124 {
1125 if (hThreadSelf == NIL_RTTHREAD)
1126 hThreadSelf = RTThreadSelfAutoAdopt();
1127 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
1128 }
1129#endif
1130 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1131 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
1132 return VINF_SUCCESS;
1133}
1134
1135
1136#if defined(IN_RING3) || defined(IN_RING0)
1137/**
1138 * Worker for pdmCritSectRwEnterExcl that handles waiting when the section is
1139 * contended.
1140 */
1141static int pdmR3R0CritSectRwEnterExclContended(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECTRW pThis, RTNATIVETHREAD hNativeSelf,
1142 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal, int rcBusy, RTTHREAD hThreadSelf)
1143{
1144 RT_NOREF(hThreadSelf, rcBusy, pSrcPos, fNoVal, pVCpu);
1145
1146 PSUPDRVSESSION const pSession = pVM->pSession;
1147 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pThis->s.Core.hEvtWrite;
1148# ifdef IN_RING0
1149 uint64_t const tsStart = RTTimeNanoTS();
1150 uint64_t cNsMaxTotal = RT_NS_5MIN;
1151 uint32_t cMsMaxOne = RT_MS_5SEC;
1152 bool fNonInterruptible = false;
1153# endif
1154
1155 for (uint32_t iLoop = 0; ; iLoop++)
1156 {
1157 /*
1158 * Wait for our turn.
1159 */
1160 int rc;
1161# ifdef IN_RING3
1162# ifdef PDMCRITSECTRW_STRICT
1163 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
1164 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
1165 if (RT_SUCCESS(rc))
1166 { /* likely */ }
1167 else
1168 return pdmCritSectRwEnterExclBailOut(pThis, rc);
1169# else
1170 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
1171# endif
1172# endif
1173
1174 for (;;)
1175 {
1176 /*
1177 * We always wait with a timeout so we can re-check the structure sanity
1178 * and not get stuck waiting on a corrupt or deleted section.
1179 */
1180# ifdef IN_RING3
1181 rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
1182# else
1183 rc = !fNonInterruptible
1184 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
1185 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
1186 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hNativeWriter=%p)\n",
1187 pThis, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pThis->s.Core.u.s.hNativeWriter));
1188# endif
1189 if (RT_LIKELY(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC))
1190 { /* likely */ }
1191 else
1192 {
1193# ifdef IN_RING3
1194 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1195# endif
1196 return VERR_SEM_DESTROYED;
1197 }
1198 if (RT_LIKELY(rc == VINF_SUCCESS))
1199 break;
1200
1201 /*
1202 * Timeout and interrupted waits needs careful handling in ring-0
1203 * because we're cooperating with ring-3 on this critical section
1204 * and thus need to make absolutely sure we won't get stuck here.
1205 *
1206 * The r0 interrupted case means something is pending (termination,
1207 * signal, APC, debugger, whatever), so we must try our best to
1208 * return to the caller and to ring-3 so it can be dealt with.
1209 */
1210 if (rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED)
1211 {
1212# ifdef IN_RING0
1213 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
1214 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
1215 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
1216 ("rcTerm=%Rrc\n", rcTerm));
1217 if (rcTerm == VERR_NOT_SUPPORTED)
1218 cNsMaxTotal = RT_NS_1MIN;
1219
1220 if (rc == VERR_TIMEOUT)
1221 {
1222 /* Try return get out of here with a non-VINF_SUCCESS status if
1223 the thread is terminating or if the timeout has been exceeded. */
1224 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrTimeout);
1225 if ( rcTerm == VINF_THREAD_IS_TERMINATING
1226 || cNsElapsed > cNsMaxTotal)
1227 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1228 }
1229 else
1230 {
1231 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
1232 we will try non-interruptible sleep for a while to help resolve the issue
1233 w/o guru'ing. */
1234 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclVerrInterrupted);
1235 if ( rcTerm != VINF_THREAD_IS_TERMINATING
1236 && rcBusy == VINF_SUCCESS
1237 && pVCpu != NULL
1238 && cNsElapsed <= cNsMaxTotal)
1239 {
1240 if (!fNonInterruptible)
1241 {
1242 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectRwExclNonInterruptibleWaits);
1243 fNonInterruptible = true;
1244 cMsMaxOne = 32;
1245 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
1246 if (cNsLeft > RT_NS_10SEC)
1247 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
1248 }
1249 }
1250 else
1251 return pdmCritSectRwEnterExclBailOut(pThis, rcBusy != VINF_SUCCESS ? rcBusy : rc);
1252
1253 }
1254# else /* IN_RING3 */
1255 RT_NOREF(pVM, pVCpu, rcBusy);
1256# endif /* IN_RING3 */
1257 }
1258 /*
1259 * Any other return code is fatal.
1260 */
1261 else
1262 {
1263# ifdef IN_RING3
1264 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1265# endif
1266 AssertMsgFailed(("rc=%Rrc\n", rc));
1267 return RT_FAILURE_NP(rc) ? rc : -rc;
1268 }
1269 }
1270
1271# ifdef IN_RING3
1272 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
1273# endif
1274
1275 /*
1276 * Try take exclusive write ownership.
1277 */
1278 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1279 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1280 {
1281 bool fDone;
1282 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1283 if (fDone)
1284 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1285 }
1286 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
1287 }
1288}
1289#endif /* IN_RING3 || IN_RING0 */
1290
1291
1292/**
1293 * Worker that enters a read/write critical section with exclusive access.
1294 *
1295 * @returns VBox status code.
1296 * @param pVM The cross context VM structure.
1297 * @param pThis Pointer to the read/write critical section.
1298 * @param rcBusy The busy return code for ring-0 and ring-3.
1299 * @param fTryOnly Only try enter it, don't wait.
1300 * @param pSrcPos The source position. (Can be NULL.)
1301 * @param fNoVal No validation records.
1302 */
1303static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
1304 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
1305{
1306 /*
1307 * Validate input.
1308 */
1309 AssertPtr(pThis);
1310 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1311
1312 RTTHREAD hThreadSelf = NIL_RTTHREAD;
1313#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1314 if (!fTryOnly)
1315 {
1316 hThreadSelf = RTThreadSelfAutoAdopt();
1317 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
1318 if (RT_FAILURE(rc9))
1319 return rc9;
1320 }
1321#endif
1322
1323 /*
1324 * Check if we're already the owner and just recursing.
1325 */
1326 RTNATIVETHREAD const hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1327 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1328 RTNATIVETHREAD hNativeWriter;
1329 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1330 if (hNativeSelf == hNativeWriter)
1331 {
1332 Assert((PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
1333#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1334 if (!fNoVal)
1335 {
1336 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
1337 if (RT_FAILURE(rc9))
1338 return rc9;
1339 }
1340#endif
1341 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
1342#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1343 uint32_t const cDepth = ++pThis->s.Core.cWriteRecursions;
1344#else
1345 uint32_t const cDepth = ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
1346#endif
1347 AssertReturnStmt(cDepth > 1 && cDepth <= PDM_CRITSECTRW_MAX_RECURSIONS,
1348 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions),
1349 VERR_PDM_CRITSECTRW_TOO_MANY_RECURSIONS);
1350 return VINF_SUCCESS;
1351 }
1352
1353 /*
1354 * First we try grab an idle critical section using 128-bit atomics.
1355 */
1356 /** @todo This could be moved up before the recursion check. */
1357 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1358#ifdef RTASM_HAVE_CMP_WRITE_U128
1359 if ( (u64State & ~RTCSRW_DIR_MASK) == 0
1360 && pdmCritSectRwIsCmpWriteU128Supported())
1361 {
1362 RTCRITSECTRWSTATE OldState;
1363 OldState.s.u64State = u64State;
1364 OldState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1365 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1366
1367 RTCRITSECTRWSTATE NewState;
1368 NewState.s.u64State = (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1369 NewState.s.hNativeWriter = hNativeSelf;
1370
1371 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1372 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1373
1374 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1375 }
1376#endif
1377
1378 /*
1379 * Do it step by step. Update the state to reflect our desire.
1380 */
1381 uint64_t u64OldState = u64State;
1382
1383 for (;;)
1384 {
1385 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1386 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
1387 {
1388 /* It flows in the right direction, try follow it before it changes. */
1389 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1390 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1391 c++;
1392 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1393 u64State &= ~RTCSRW_CNT_WR_MASK;
1394 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1395 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1396 break;
1397 }
1398 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
1399 {
1400 /* Wrong direction, but we're alone here and can simply try switch the direction. */
1401 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1402 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
1403 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1404 break;
1405 }
1406 else if (fTryOnly)
1407 {
1408 /* Wrong direction and we're not supposed to wait, just return. */
1409 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1410 return VERR_SEM_BUSY;
1411 }
1412 else
1413 {
1414 /* Add ourselves to the write count and break out to do the wait. */
1415 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1416 AssertReturn(c < RTCSRW_CNT_MASK, VERR_PDM_CRITSECTRW_TOO_MANY_WRITERS);
1417 c++;
1418 Assert(c < RTCSRW_CNT_WR_MASK / 4);
1419 u64State &= ~RTCSRW_CNT_WR_MASK;
1420 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1421 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1422 break;
1423 }
1424
1425 ASMNopPause();
1426
1427 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1428 { /* likely */ }
1429 else
1430 return VERR_SEM_DESTROYED;
1431
1432 ASMNopPause();
1433 u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1434 u64OldState = u64State;
1435 }
1436
1437 /*
1438 * If we're in write mode now try grab the ownership. Play fair if there
1439 * are threads already waiting.
1440 */
1441 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
1442 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
1443 || fTryOnly);
1444 if (fDone)
1445 {
1446 ASMAtomicCmpXchgHandle(&pThis->s.Core.u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
1447 if (fDone)
1448 return pdmCritSectRwEnterExclFirst(pThis, pSrcPos, fNoVal, hThreadSelf);
1449 }
1450
1451 /*
1452 * Okay, we have contention and will have to wait unless we're just trying.
1453 */
1454 if (fTryOnly)
1455 {
1456 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl)); /** @todo different statistics for this */
1457 return pdmCritSectRwEnterExclBailOut(pThis, VERR_SEM_BUSY);
1458 }
1459
1460 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
1461
1462 /*
1463 * Ring-3 is pretty straight forward.
1464 */
1465#if defined(IN_RING3) && defined(PDMCRITSECTRW_STRICT)
1466 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, hThreadSelf);
1467#elif defined(IN_RING3)
1468 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, RTThreadSelf());
1469
1470#elif defined(IN_RING0)
1471 /*
1472 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
1473 * account when waiting on contended locks.
1474 */
1475 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1476 if (pVCpu)
1477 {
1478 VMMR0EMTBLOCKCTX Ctx;
1479 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pThis, &Ctx);
1480 if (rc == VINF_SUCCESS)
1481 {
1482 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1483
1484 rc = pdmR3R0CritSectRwEnterExclContended(pVM, pVCpu, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1485
1486 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1487 }
1488 else
1489 {
1490 //STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLockBusy);
1491 rc = pdmCritSectRwEnterExclBailOut(pThis, rc);
1492 }
1493 return rc;
1494 }
1495
1496 /* Non-EMT. */
1497 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1498 return pdmR3R0CritSectRwEnterExclContended(pVM, NULL, pThis, hNativeSelf, pSrcPos, fNoVal, rcBusy, NIL_RTTHREAD);
1499
1500#else
1501# error "Unused."
1502 /*
1503 * Raw-mode: Call host and take it there if rcBusy is VINF_SUCCESS.
1504 */
1505 rcBusy = pdmCritSectRwEnterExclBailOut(pThis, rcBusy);
1506 if (rcBusy == VINF_SUCCESS)
1507 {
1508 Assert(!fTryOnly);
1509 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1510 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
1511 * back to ring-3. Goes for both kind of crit sects. */
1512 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
1513 }
1514 return rcBusy;
1515#endif
1516}
1517
1518
1519/**
1520 * Try enter a critical section with exclusive (write) access.
1521 *
1522 * @returns VBox status code.
1523 * @retval VINF_SUCCESS on success.
1524 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1525 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1526 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1527 * during the operation.
1528 *
1529 * @param pVM The cross context VM structure.
1530 * @param pThis Pointer to the read/write critical section.
1531 * @param rcBusy The status code to return when we're in RC or R0 and the
1532 * section is busy. Pass VINF_SUCCESS to acquired the
1533 * critical section thru a ring-3 call if necessary.
1534 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
1535 * PDMCritSectRwTryEnterExclDebug,
1536 * PDMCritSectEnterDebug, PDMCritSectEnter,
1537 * RTCritSectRwEnterExcl.
1538 */
1539VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
1540{
1541#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1542 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1543#else
1544 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1545 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1546#endif
1547}
1548
1549
1550/**
1551 * Try enter a critical section with exclusive (write) access.
1552 *
1553 * @returns VBox status code.
1554 * @retval VINF_SUCCESS on success.
1555 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
1556 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1557 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1558 * during the operation.
1559 *
1560 * @param pVM The cross context VM structure.
1561 * @param pThis Pointer to the read/write critical section.
1562 * @param rcBusy The status code to return when we're in RC or R0 and the
1563 * section is busy. Pass VINF_SUCCESS to acquired the
1564 * critical section thru a ring-3 call if necessary.
1565 * @param uId Where we're entering the section.
1566 * @param SRC_POS The source position.
1567 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1568 * PDMCritSectRwTryEnterExclDebug,
1569 * PDMCritSectEnterDebug, PDMCritSectEnter,
1570 * RTCritSectRwEnterExclDebug.
1571 */
1572VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1573{
1574 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1575#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1576 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1577#else
1578 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1579 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1580#endif
1581}
1582
1583
1584/**
1585 * Try enter a critical section with exclusive (write) access.
1586 *
1587 * @retval VINF_SUCCESS on success.
1588 * @retval VERR_SEM_BUSY if the critsect was owned.
1589 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1590 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1591 * during the operation.
1592 *
1593 * @param pVM The cross context VM structure.
1594 * @param pThis Pointer to the read/write critical section.
1595 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1596 * PDMCritSectRwEnterExclDebug,
1597 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1598 * RTCritSectRwTryEnterExcl.
1599 */
1600VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1601{
1602#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1603 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1604#else
1605 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1606 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1607#endif
1608}
1609
1610
1611/**
1612 * Try enter a critical section with exclusive (write) access.
1613 *
1614 * @retval VINF_SUCCESS on success.
1615 * @retval VERR_SEM_BUSY if the critsect was owned.
1616 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1617 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1618 * during the operation.
1619 *
1620 * @param pVM The cross context VM structure.
1621 * @param pThis Pointer to the read/write critical section.
1622 * @param uId Where we're entering the section.
1623 * @param SRC_POS The source position.
1624 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1625 * PDMCritSectRwEnterExclDebug,
1626 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1627 * RTCritSectRwTryEnterExclDebug.
1628 */
1629VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1630{
1631 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1632#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1633 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1634#else
1635 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1636 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1637#endif
1638}
1639
1640
1641#ifdef IN_RING3
1642/**
1643 * Enters a PDM read/write critical section with exclusive (write) access.
1644 *
1645 * @returns VINF_SUCCESS if entered successfully.
1646 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1647 * during the operation.
1648 *
1649 * @param pVM The cross context VM structure.
1650 * @param pThis Pointer to the read/write critical section.
1651 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1652 */
1653VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1654{
1655 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1656}
1657#endif /* IN_RING3 */
1658
1659
1660/**
1661 * Leave a critical section held exclusively.
1662 *
1663 * @returns VBox status code.
1664 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1665 * during the operation.
1666 * @param pVM The cross context VM structure.
1667 * @param pThis Pointer to the read/write critical section.
1668 * @param fNoVal No validation records (i.e. queued release).
1669 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1670 */
1671static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1672{
1673 /*
1674 * Validate handle.
1675 */
1676 AssertPtr(pThis);
1677 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1678
1679#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1680 NOREF(fNoVal);
1681#endif
1682
1683 /*
1684 * Check ownership.
1685 */
1686 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1687 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
1688
1689 RTNATIVETHREAD hNativeWriter;
1690 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1691 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1692
1693
1694 /*
1695 * Unwind one recursion. Not the last?
1696 */
1697 if (pThis->s.Core.cWriteRecursions != 1)
1698 {
1699#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1700 if (fNoVal)
1701 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1702 else
1703 {
1704 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1705 if (RT_FAILURE(rc9))
1706 return rc9;
1707 }
1708#endif
1709#ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1710 uint32_t const cDepth = --pThis->s.Core.cWriteRecursions;
1711#else
1712 uint32_t const cDepth = ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1713#endif
1714 AssertReturn(cDepth != 0 && cDepth < UINT32_MAX, pdmCritSectRwCorrupted(pThis, "Invalid write recursion value on leave"));
1715 return VINF_SUCCESS;
1716 }
1717
1718
1719 /*
1720 * Final recursion.
1721 */
1722 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1723#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1724 if (fNoVal)
1725 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1726 else
1727 {
1728 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1729 if (RT_FAILURE(rc9))
1730 return rc9;
1731 }
1732#endif
1733
1734
1735#ifdef RTASM_HAVE_CMP_WRITE_U128
1736 /*
1737 * See if we can get out w/o any signalling as this is a common case.
1738 */
1739 if (pdmCritSectRwIsCmpWriteU128Supported())
1740 {
1741 RTCRITSECTRWSTATE OldState;
1742 OldState.s.u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1743 if (OldState.s.u64State == ((UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)))
1744 {
1745 OldState.s.hNativeWriter = hNativeSelf;
1746 AssertCompile(sizeof(OldState.s.hNativeWriter) == sizeof(OldState.u128.s.Lo));
1747
1748 RTCRITSECTRWSTATE NewState;
1749 NewState.s.u64State = RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
1750 NewState.s.hNativeWriter = NIL_RTNATIVETHREAD;
1751
1752# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1753 pThis->s.Core.cWriteRecursions = 0;
1754# else
1755 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1756# endif
1757 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1758
1759 if (ASMAtomicCmpWriteU128U(&pThis->s.Core.u.u128, NewState.u128, OldState.u128))
1760 return VINF_SUCCESS;
1761
1762 /* bail out. */
1763 pThis->s.Core.cWriteRecursions = 1;
1764 }
1765 }
1766#endif /* RTASM_HAVE_CMP_WRITE_U128 */
1767
1768
1769#if defined(IN_RING3) || defined(IN_RING0)
1770 /*
1771 * Ring-3: Straight forward, just update the state and if necessary signal waiters.
1772 * Ring-0: Try leave for real, depends on host and context.
1773 */
1774# ifdef IN_RING0
1775 Assert(RTSemEventIsSignalSafe() == RTSemEventMultiIsSignalSafe());
1776 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1777 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
1778 || VMMRZCallRing3IsEnabled(pVCpu)
1779 || RTSemEventIsSignalSafe()
1780 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
1781 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
1782 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
1783 )
1784# endif
1785 {
1786# ifdef PDMCRITSECTRW_WITH_LESS_ATOMIC_STUFF
1787 pThis->s.Core.cWriteRecursions = 0;
1788# else
1789 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1790# endif
1791 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1792 ASMAtomicWriteHandle(&pThis->s.Core.u.s.hNativeWriter, NIL_RTNATIVETHREAD);
1793
1794 for (;;)
1795 {
1796 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1797 uint64_t u64OldState = u64State;
1798
1799 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1800 AssertReturn(c > 0, pdmCritSectRwCorrupted(pThis, "Invalid write count on leave"));
1801 c--;
1802
1803 if ( c > 0
1804 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1805 {
1806 /*
1807 * Don't change the direction, wake up the next writer if any.
1808 */
1809 u64State &= ~RTCSRW_CNT_WR_MASK;
1810 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1811 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1812 {
1813 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1814 int rc;
1815 if (c == 0)
1816 rc = VINF_SUCCESS;
1817# ifdef IN_RING0
1818 else if (!RTSemEventIsSignalSafe() && pVCpu != NULL)
1819 {
1820 VMMR0EMTBLOCKCTX Ctx;
1821 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1822 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1823
1824 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1825
1826 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1827 }
1828# endif
1829 else
1830 rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1831 AssertRC(rc);
1832 return rc;
1833 }
1834 }
1835 else
1836 {
1837 /*
1838 * Reverse the direction and signal the reader threads.
1839 */
1840 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1841 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1842 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u.s.u64State, u64State, u64OldState))
1843 {
1844 Assert(!pThis->s.Core.fNeedReset);
1845 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1846 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,LeaveExcl));
1847
1848 int rc;
1849# ifdef IN_RING0
1850 if (!RTSemEventMultiIsSignalSafe() && pVCpu != NULL)
1851 {
1852 VMMR0EMTBLOCKCTX Ctx;
1853 rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pThis, &Ctx);
1854 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1855
1856 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1857
1858 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1859 }
1860 else
1861# endif
1862 rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1863 AssertRC(rc);
1864 return rc;
1865 }
1866 }
1867
1868 ASMNopPause();
1869 if (pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC)
1870 { /*likely*/ }
1871 else
1872 return VERR_SEM_DESTROYED;
1873 ASMNopPause();
1874 }
1875 /* not reached! */
1876 }
1877#endif /* IN_RING3 || IN_RING0 */
1878
1879
1880#ifndef IN_RING3
1881 /*
1882 * Queue the requested exit for ring-3 execution.
1883 */
1884# ifndef IN_RING0
1885 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1886# endif
1887 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1888 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1889 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves),
1890 ("i=%u\n", i), VERR_PDM_CRITSECTRW_IPE);
1891 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = pThis->s.pSelfR3;
1892 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1893 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i])
1894 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] & PAGE_OFFSET_MASK)
1895 == ((uintptr_t)pThis & PAGE_OFFSET_MASK),
1896 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i], pThis),
1897 pdmCritSectRwCorrupted(pThis, "Invalid self pointer on queue (excl)"));
1898 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1899 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1900 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1901 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1902 return VINF_SUCCESS;
1903#endif
1904}
1905
1906
1907/**
1908 * Leave a critical section held exclusively.
1909 *
1910 * @returns VBox status code.
1911 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1912 * during the operation.
1913 * @param pVM The cross context VM structure.
1914 * @param pThis Pointer to the read/write critical section.
1915 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1916 */
1917VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1918{
1919 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1920}
1921
1922
1923#if defined(IN_RING3) || defined(IN_RING0)
1924/**
1925 * PDMCritSectBothFF interface.
1926 *
1927 * @param pVM The cross context VM structure.
1928 * @param pThis Pointer to the read/write critical section.
1929 */
1930void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1931{
1932 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1933}
1934#endif
1935
1936
1937/**
1938 * Checks the caller is the exclusive (write) owner of the critical section.
1939 *
1940 * @retval true if owner.
1941 * @retval false if not owner.
1942 * @param pVM The cross context VM structure.
1943 * @param pThis Pointer to the read/write critical section.
1944 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1945 * RTCritSectRwIsWriteOwner.
1946 */
1947VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1948{
1949 /*
1950 * Validate handle.
1951 */
1952 AssertPtr(pThis);
1953 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1954
1955 /*
1956 * Check ownership.
1957 */
1958 RTNATIVETHREAD hNativeWriter;
1959 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hNativeWriter);
1960 if (hNativeWriter == NIL_RTNATIVETHREAD)
1961 return false;
1962 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1963}
1964
1965
1966/**
1967 * Checks if the caller is one of the read owners of the critical section.
1968 *
1969 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1970 * enabled. Meaning, the answer is not trustworhty unless
1971 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1972 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1973 * creating the semaphore. And finally, if you used a locking class,
1974 * don't disable deadlock detection by setting cMsMinDeadlock to
1975 * RT_INDEFINITE_WAIT.
1976 *
1977 * In short, only use this for assertions.
1978 *
1979 * @returns @c true if reader, @c false if not.
1980 * @param pVM The cross context VM structure.
1981 * @param pThis Pointer to the read/write critical section.
1982 * @param fWannaHear What you'd like to hear when lock validation is not
1983 * available. (For avoiding asserting all over the place.)
1984 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1985 */
1986VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1987{
1988 /*
1989 * Validate handle.
1990 */
1991 AssertPtr(pThis);
1992 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1993
1994 /*
1995 * Inspect the state.
1996 */
1997 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
1998 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1999 {
2000 /*
2001 * It's in write mode, so we can only be a reader if we're also the
2002 * current writer.
2003 */
2004 RTNATIVETHREAD hWriter;
2005 ASMAtomicUoReadHandle(&pThis->s.Core.u.s.hNativeWriter, &hWriter);
2006 if (hWriter == NIL_RTNATIVETHREAD)
2007 return false;
2008 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
2009 }
2010
2011 /*
2012 * Read mode. If there are no current readers, then we cannot be a reader.
2013 */
2014 if (!(u64State & RTCSRW_CNT_RD_MASK))
2015 return false;
2016
2017#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
2018 /*
2019 * Ask the lock validator.
2020 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
2021 */
2022 NOREF(fWannaHear);
2023 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
2024#else
2025 /*
2026 * Ok, we don't know, just tell the caller what he want to hear.
2027 */
2028 return fWannaHear;
2029#endif
2030}
2031
2032
2033/**
2034 * Gets the write recursion count.
2035 *
2036 * @returns The write recursion count (0 if bad critsect).
2037 * @param pThis Pointer to the read/write critical section.
2038 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
2039 * RTCritSectRwGetWriteRecursion.
2040 */
2041VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
2042{
2043 /*
2044 * Validate handle.
2045 */
2046 AssertPtr(pThis);
2047 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2048
2049 /*
2050 * Return the requested data.
2051 */
2052 return pThis->s.Core.cWriteRecursions;
2053}
2054
2055
2056/**
2057 * Gets the read recursion count of the current writer.
2058 *
2059 * @returns The read recursion count (0 if bad critsect).
2060 * @param pThis Pointer to the read/write critical section.
2061 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
2062 * RTCritSectRwGetWriterReadRecursion.
2063 */
2064VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
2065{
2066 /*
2067 * Validate handle.
2068 */
2069 AssertPtr(pThis);
2070 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2071
2072 /*
2073 * Return the requested data.
2074 */
2075 return pThis->s.Core.cWriterReads;
2076}
2077
2078
2079/**
2080 * Gets the current number of reads.
2081 *
2082 * This includes all read recursions, so it might be higher than the number of
2083 * read owners. It does not include reads done by the current writer.
2084 *
2085 * @returns The read count (0 if bad critsect).
2086 * @param pThis Pointer to the read/write critical section.
2087 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
2088 * RTCritSectRwGetReadCount.
2089 */
2090VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
2091{
2092 /*
2093 * Validate input.
2094 */
2095 AssertPtr(pThis);
2096 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
2097
2098 /*
2099 * Return the requested data.
2100 */
2101 uint64_t u64State = PDMCRITSECTRW_READ_STATE(&pThis->s.Core.u.s.u64State);
2102 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
2103 return 0;
2104 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
2105}
2106
2107
2108/**
2109 * Checks if the read/write critical section is initialized or not.
2110 *
2111 * @retval true if initialized.
2112 * @retval false if not initialized.
2113 * @param pThis Pointer to the read/write critical section.
2114 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
2115 */
2116VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
2117{
2118 AssertPtr(pThis);
2119 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
2120}
2121
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette