VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/critsectrw-generic.cpp@ 107675

最後變更 在這個檔案從107675是 107609,由 vboxsync 提交於 2 月 前

VMM/PDMCritSectRw,IPRT/RTCritSectRw: Fixed harmless issue in the enter-exclusive path. bugref:3409

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 41.1 KB
 
1/* $Id: critsectrw-generic.cpp 107609 2025-01-09 20:19:40Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define RTCRITSECTRW_WITHOUT_REMAPPING
42#define RTASSERT_QUIET
43#include <iprt/critsect.h>
44#include "internal/iprt.h"
45
46#include <iprt/asm.h>
47#include <iprt/assert.h>
48#include <iprt/err.h>
49#include <iprt/lockvalidator.h>
50#include <iprt/mem.h>
51#include <iprt/semaphore.h>
52#include <iprt/thread.h>
53
54#include "internal/magics.h"
55#include "internal/strict.h"
56
57/* Two issues here, (1) the tracepoint generator uses IPRT, and (2) only one .d
58 file per module. */
59#ifdef IPRT_WITH_DTRACE
60# include IPRT_DTRACE_INCLUDE
61# ifdef IPRT_DTRACE_PREFIX
62# define IPRT_CRITSECTRW_EXCL_ENTERED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_ENTERED)
63# define IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED)
64# define IPRT_CRITSECTRW_EXCL_LEAVING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_LEAVING)
65# define IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED)
66# define IPRT_CRITSECTRW_EXCL_BUSY RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_BUSY)
67# define IPRT_CRITSECTRW_EXCL_WAITING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_WAITING)
68# define IPRT_CRITSECTRW_EXCL_ENTERED_SHARED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_ENTERED_SHARED)
69# define IPRT_CRITSECTRW_EXCL_LEAVING_SHARED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_EXCL_LEAVING_SHARED)
70# define IPRT_CRITSECTRW_SHARED_ENTERED RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_ENTERED)
71# define IPRT_CRITSECTRW_SHARED_LEAVING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_LEAVING)
72# define IPRT_CRITSECTRW_SHARED_BUSY RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_BUSY)
73# define IPRT_CRITSECTRW_SHARED_WAITING RT_CONCAT(IPRT_DTRACE_PREFIX,IPRT_CRITSECTRW_SHARED_WAITING)
74# endif
75#else
76# define IPRT_CRITSECTRW_EXCL_ENTERED(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
77# define IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED() (false)
78# define IPRT_CRITSECTRW_EXCL_LEAVING(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
79# define IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED() (false)
80# define IPRT_CRITSECTRW_EXCL_BUSY( a_pvCritSect, a_pszName, a_fWriteMode, a_cWaitingReaders, a_cReaders, cWriters, a_pvNativeOwnerThread) do {} while (0)
81# define IPRT_CRITSECTRW_EXCL_WAITING(a_pvCritSect, a_pszName, a_fWriteMode, a_cWaitingReaders, a_cReaders, cWriters, a_pvNativeOwnerThread) do {} while (0)
82# define IPRT_CRITSECTRW_EXCL_ENTERED_SHARED(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
83# define IPRT_CRITSECTRW_EXCL_LEAVING_SHARED(a_pvCritSect, a_pszName, a_cNestings, a_cWaitingReaders, a_cWriters) do {} while (0)
84# define IPRT_CRITSECTRW_SHARED_ENTERED(a_pvCritSect, a_pszName, a_cReaders, a_cWaitingWriters) do {} while (0)
85# define IPRT_CRITSECTRW_SHARED_LEAVING(a_pvCritSect, a_pszName, a_cReaders, a_cWaitingWriters) do {} while (0)
86# define IPRT_CRITSECTRW_SHARED_BUSY( a_pvCritSect, a_pszName, a_pvNativeOwnerThread, a_cWaitingReaders, a_cWriters) do {} while (0)
87# define IPRT_CRITSECTRW_SHARED_WAITING(a_pvCritSect, a_pszName, a_pvNativeOwnerThread, a_cWaitingReaders, a_cWriters) do {} while (0)
88#endif
89
90
91
92RTDECL(int) RTCritSectRwInit(PRTCRITSECTRW pThis)
93{
94 return RTCritSectRwInitEx(pThis, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTCritSectRw");
95}
96RT_EXPORT_SYMBOL(RTCritSectRwInit);
97
98
99RTDECL(int) RTCritSectRwInitNamed(PRTCRITSECTRW pThis, const char *pszName)
100{
101 return RTCritSectRwInitEx(pThis, 0, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "%s", pszName);
102}
103RT_EXPORT_SYMBOL(RTCritSectRwInitNamed);
104
105
106RTDECL(int) RTCritSectRwInitEx(PRTCRITSECTRW pThis, uint32_t fFlags,
107 RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...)
108{
109 int rc;
110 AssertReturn(!(fFlags & ~( RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_BOOTSTRAP_HACK
111 | RTCRITSECT_FLAGS_NOP )),
112 VERR_INVALID_PARAMETER);
113 RT_NOREF_PV(hClass); RT_NOREF_PV(uSubClass); RT_NOREF_PV(pszNameFmt);
114
115
116 /*
117 * Initialize the structure, allocate the lock validator stuff and sems.
118 */
119 pThis->u32Magic = RTCRITSECTRW_MAGIC_DEAD;
120 pThis->fNeedReset = false;
121#ifdef IN_RING0
122 pThis->fFlags = (uint16_t)(fFlags | RTCRITSECT_FLAGS_RING0);
123#else
124 pThis->fFlags = (uint16_t)(fFlags & ~RTCRITSECT_FLAGS_RING0);
125#endif
126 pThis->u.u128.s.Hi = 0;
127 pThis->u.u128.s.Lo = 0;
128 pThis->u.s.hNativeWriter= NIL_RTNATIVETHREAD;
129 AssertCompile(sizeof(pThis->u.u128) >= sizeof(pThis->u.s));
130 pThis->cWriterReads = 0;
131 pThis->cWriteRecursions = 0;
132 pThis->hEvtWrite = NIL_RTSEMEVENT;
133 pThis->hEvtRead = NIL_RTSEMEVENTMULTI;
134 pThis->pValidatorWrite = NULL;
135 pThis->pValidatorRead = NULL;
136
137#ifdef RTCRITSECTRW_STRICT
138 bool const fLVEnabled = !(fFlags & RTCRITSECT_FLAGS_NO_LOCK_VAL);
139 if (!pszNameFmt)
140 {
141 static uint32_t volatile s_iAnon = 0;
142 uint32_t i = ASMAtomicIncU32(&s_iAnon) - 1;
143 rc = RTLockValidatorRecExclCreate(&pThis->pValidatorWrite, hClass, uSubClass, pThis,
144 fLVEnabled, "RTCritSectRw-%u", i);
145 if (RT_SUCCESS(rc))
146 rc = RTLockValidatorRecSharedCreate(&pThis->pValidatorRead, hClass, uSubClass, pThis,
147 false /*fSignaller*/, fLVEnabled, "RTCritSectRw-%u", i);
148 }
149 else
150 {
151 va_list va;
152 va_start(va, pszNameFmt);
153 rc = RTLockValidatorRecExclCreateV(&pThis->pValidatorWrite, hClass, uSubClass, pThis,
154 fLVEnabled, pszNameFmt, va);
155 va_end(va);
156 if (RT_SUCCESS(rc))
157 {
158 va_start(va, pszNameFmt);
159 RTLockValidatorRecSharedCreateV(&pThis->pValidatorRead, hClass, uSubClass, pThis,
160 false /*fSignaller*/, fLVEnabled, pszNameFmt, va);
161 va_end(va);
162 }
163 }
164 if (RT_SUCCESS(rc))
165 rc = RTLockValidatorRecMakeSiblings(&pThis->pValidatorWrite->Core, &pThis->pValidatorRead->Core);
166
167 if (RT_SUCCESS(rc))
168#endif
169 {
170 rc = RTSemEventMultiCreate(&pThis->hEvtRead);
171 if (RT_SUCCESS(rc))
172 {
173 rc = RTSemEventCreate(&pThis->hEvtWrite);
174 if (RT_SUCCESS(rc))
175 {
176 pThis->u32Magic = RTCRITSECTRW_MAGIC;
177 return VINF_SUCCESS;
178 }
179 RTSemEventMultiDestroy(pThis->hEvtRead);
180 }
181 }
182
183#ifdef RTCRITSECTRW_STRICT
184 RTLockValidatorRecSharedDestroy(&pThis->pValidatorRead);
185 RTLockValidatorRecExclDestroy(&pThis->pValidatorWrite);
186#endif
187 return rc;
188}
189RT_EXPORT_SYMBOL(RTCritSectRwInitEx);
190
191
192RTDECL(uint32_t) RTCritSectRwSetSubClass(PRTCRITSECTRW pThis, uint32_t uSubClass)
193{
194 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
195 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
196#ifdef IN_RING0
197 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
198#else
199 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
200#endif
201#ifdef RTCRITSECTRW_STRICT
202 AssertReturn(!(pThis->fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
203
204 RTLockValidatorRecSharedSetSubClass(pThis->pValidatorRead, uSubClass);
205 return RTLockValidatorRecExclSetSubClass(pThis->pValidatorWrite, uSubClass);
206#else
207 NOREF(uSubClass);
208 return RTLOCKVAL_SUB_CLASS_INVALID;
209#endif
210}
211RT_EXPORT_SYMBOL(RTCritSectRwSetSubClass);
212
213
214static int rtCritSectRwEnterShared(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
215{
216 /*
217 * Validate input.
218 */
219 AssertPtr(pThis);
220 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
221#ifdef IN_RING0
222 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
223#else
224 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
225#endif
226 RT_NOREF_PV(pSrcPos);
227
228#ifdef RTCRITSECTRW_STRICT
229 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
230 if (!fTryOnly)
231 {
232 int rc9;
233 RTNATIVETHREAD hNativeWriter;
234 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
235 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf())
236 rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
237 else
238 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
239 if (RT_FAILURE(rc9))
240 return rc9;
241 }
242#endif
243
244 /*
245 * Get cracking...
246 */
247 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
248 uint64_t u64OldState = u64State;
249
250 for (;;)
251 {
252 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
253 {
254 /* It flows in the right direction, try follow it before it changes. */
255 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
256 c++;
257 Assert(c < RTCSRW_CNT_MASK / 2);
258 u64State &= ~RTCSRW_CNT_RD_MASK;
259 u64State |= c << RTCSRW_CNT_RD_SHIFT;
260 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
261 {
262#ifdef RTCRITSECTRW_STRICT
263 RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
264#endif
265 break;
266 }
267 }
268 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
269 {
270 /* Wrong direction, but we're alone here and can simply try switch the direction. */
271 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
272 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
273 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
274 {
275 Assert(!pThis->fNeedReset);
276#ifdef RTCRITSECTRW_STRICT
277 RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
278#endif
279 break;
280 }
281 }
282 else
283 {
284 /* Is the writer perhaps doing a read recursion? */
285 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
286 RTNATIVETHREAD hNativeWriter;
287 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
288 if (hNativeSelf == hNativeWriter)
289 {
290#ifdef RTCRITSECTRW_STRICT
291 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->pValidatorWrite, &pThis->pValidatorRead->Core, pSrcPos);
292 if (RT_FAILURE(rc9))
293 return rc9;
294#endif
295 Assert(pThis->cWriterReads < UINT32_MAX / 2);
296 uint32_t const cReads = ASMAtomicIncU32(&pThis->cWriterReads); NOREF(cReads);
297 IPRT_CRITSECTRW_EXCL_ENTERED_SHARED(pThis, NULL,
298 cReads + pThis->cWriteRecursions,
299 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
300 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
301
302 return VINF_SUCCESS; /* don't break! */
303 }
304
305 /* If we're only trying, return already. */
306 if (fTryOnly)
307 {
308 IPRT_CRITSECTRW_SHARED_BUSY(pThis, NULL,
309 (void *)pThis->u.s.hNativeWriter,
310 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
311 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
312 return VERR_SEM_BUSY;
313 }
314
315 /* Add ourselves to the queue and wait for the direction to change. */
316 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
317 c++;
318 Assert(c < RTCSRW_CNT_MASK / 2);
319
320 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
321 cWait++;
322 Assert(cWait <= c);
323 Assert(cWait < RTCSRW_CNT_MASK / 2);
324
325 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
326 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
327
328 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
329 {
330 IPRT_CRITSECTRW_SHARED_WAITING(pThis, NULL,
331 (void *)pThis->u.s.hNativeWriter,
332 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
333 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
334 for (uint32_t iLoop = 0; ; iLoop++)
335 {
336 int rc;
337#ifdef RTCRITSECTRW_STRICT
338 rc = RTLockValidatorRecSharedCheckBlocking(pThis->pValidatorRead, hThreadSelf, pSrcPos, true,
339 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
340 if (RT_SUCCESS(rc))
341#elif defined(IN_RING3)
342 RTTHREAD hThreadSelf = RTThreadSelf();
343 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
344#endif
345 {
346 rc = RTSemEventMultiWait(pThis->hEvtRead, RT_INDEFINITE_WAIT);
347#ifdef IN_RING3
348 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
349#endif
350 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
351 return VERR_SEM_DESTROYED;
352 }
353 if (RT_FAILURE(rc))
354 {
355 /* Decrement the counts and return the error. */
356 for (;;)
357 {
358 u64OldState = u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
359 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
360 c--;
361 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
362 cWait--;
363 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
364 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
365 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
366 break;
367 }
368 return rc;
369 }
370
371 Assert(pThis->fNeedReset);
372 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
373 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
374 break;
375 AssertMsg(iLoop < 1, ("%u\n", iLoop));
376 }
377
378 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
379 for (;;)
380 {
381 u64OldState = u64State;
382
383 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
384 Assert(cWait > 0);
385 cWait--;
386 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
387 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
388
389 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
390 {
391 if (cWait == 0)
392 {
393 if (ASMAtomicXchgBool(&pThis->fNeedReset, false))
394 {
395 int rc = RTSemEventMultiReset(pThis->hEvtRead);
396 AssertRCReturn(rc, rc);
397 }
398 }
399 break;
400 }
401 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
402 }
403
404#ifdef RTCRITSECTRW_STRICT
405 RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos);
406#endif
407 break;
408 }
409 }
410
411 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
412 return VERR_SEM_DESTROYED;
413
414 ASMNopPause();
415 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
416 u64OldState = u64State;
417 }
418
419 /* got it! */
420 Assert((ASMAtomicReadU64(&pThis->u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
421 IPRT_CRITSECTRW_SHARED_ENTERED(pThis, NULL,
422 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
423 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
424 return VINF_SUCCESS;
425}
426
427
428RTDECL(int) RTCritSectRwEnterShared(PRTCRITSECTRW pThis)
429{
430#ifndef RTCRITSECTRW_STRICT
431 return rtCritSectRwEnterShared(pThis, NULL, false /*fTryOnly*/);
432#else
433 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
434 return rtCritSectRwEnterShared(pThis, &SrcPos, false /*fTryOnly*/);
435#endif
436}
437RT_EXPORT_SYMBOL(RTCritSectRwEnterShared);
438
439
440RTDECL(int) RTCritSectRwEnterSharedDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
441{
442 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
443 return rtCritSectRwEnterShared(pThis, &SrcPos, false /*fTryOnly*/);
444}
445RT_EXPORT_SYMBOL(RTCritSectRwEnterSharedDebug);
446
447
448RTDECL(int) RTCritSectRwTryEnterShared(PRTCRITSECTRW pThis)
449{
450#ifndef RTCRITSECTRW_STRICT
451 return rtCritSectRwEnterShared(pThis, NULL, true /*fTryOnly*/);
452#else
453 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
454 return rtCritSectRwEnterShared(pThis, &SrcPos, true /*fTryOnly*/);
455#endif
456}
457RT_EXPORT_SYMBOL(RTCritSectRwEnterShared);
458
459
460RTDECL(int) RTCritSectRwTryEnterSharedDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
461{
462 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
463 return rtCritSectRwEnterShared(pThis, &SrcPos, true /*fTryOnly*/);
464}
465RT_EXPORT_SYMBOL(RTCritSectRwEnterSharedDebug);
466
467
468
469RTDECL(int) RTCritSectRwLeaveShared(PRTCRITSECTRW pThis)
470{
471 /*
472 * Validate handle.
473 */
474 AssertPtr(pThis);
475 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
476#ifdef IN_RING0
477 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
478#else
479 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
480#endif
481
482 /*
483 * Check the direction and take action accordingly.
484 */
485 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
486 uint64_t u64OldState = u64State;
487 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
488 {
489#ifdef RTCRITSECTRW_STRICT
490 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->pValidatorRead, NIL_RTTHREAD);
491 if (RT_FAILURE(rc9))
492 return rc9;
493#endif
494 IPRT_CRITSECTRW_SHARED_LEAVING(pThis, NULL,
495 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT) - 1,
496 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
497
498 for (;;)
499 {
500 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
501 AssertReturn(c > 0, VERR_NOT_OWNER);
502 c--;
503
504 if ( c > 0
505 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
506 {
507 /* Don't change the direction. */
508 u64State &= ~RTCSRW_CNT_RD_MASK;
509 u64State |= c << RTCSRW_CNT_RD_SHIFT;
510 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
511 break;
512 }
513 else
514 {
515 /* Reverse the direction and signal the reader threads. */
516 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
517 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
518 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
519 {
520 int rc = RTSemEventSignal(pThis->hEvtWrite);
521 AssertRC(rc);
522 break;
523 }
524 }
525
526 ASMNopPause();
527 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
528 u64OldState = u64State;
529 }
530 }
531 else
532 {
533 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
534 RTNATIVETHREAD hNativeWriter;
535 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
536 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
537 AssertReturn(pThis->cWriterReads > 0, VERR_NOT_OWNER);
538#ifdef RTCRITSECTRW_STRICT
539 int rc = RTLockValidatorRecExclUnwindMixed(pThis->pValidatorWrite, &pThis->pValidatorRead->Core);
540 if (RT_FAILURE(rc))
541 return rc;
542#endif
543 uint32_t cReads = ASMAtomicDecU32(&pThis->cWriterReads); NOREF(cReads);
544 IPRT_CRITSECTRW_EXCL_LEAVING_SHARED(pThis, NULL,
545 cReads + pThis->cWriteRecursions,
546 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
547 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
548 }
549
550 return VINF_SUCCESS;
551}
552RT_EXPORT_SYMBOL(RTCritSectRwLeaveShared);
553
554
555static int rtCritSectRwEnterExcl(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly)
556{
557 /*
558 * Validate input.
559 */
560 AssertPtr(pThis);
561 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
562#ifdef IN_RING0
563 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
564#else
565 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
566#endif
567 RT_NOREF_PV(pSrcPos);
568
569#ifdef RTCRITSECTRW_STRICT
570 RTTHREAD hThreadSelf = NIL_RTTHREAD;
571 if (!fTryOnly)
572 {
573 hThreadSelf = RTThreadSelfAutoAdopt();
574 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
575 if (RT_FAILURE(rc9))
576 return rc9;
577 }
578#endif
579
580 /*
581 * Check if we're already the owner and just recursing.
582 */
583 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
584 RTNATIVETHREAD hNativeWriter;
585 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
586 if (hNativeSelf == hNativeWriter)
587 {
588 Assert((ASMAtomicReadU64(&pThis->u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
589#ifdef RTCRITSECTRW_STRICT
590 int rc9 = RTLockValidatorRecExclRecursion(pThis->pValidatorWrite, pSrcPos);
591 if (RT_FAILURE(rc9))
592 return rc9;
593#endif
594 Assert(pThis->cWriteRecursions < UINT32_MAX / 2);
595 uint32_t cNestings = ASMAtomicIncU32(&pThis->cWriteRecursions); NOREF(cNestings);
596
597#ifdef IPRT_WITH_DTRACE
598 if (IPRT_CRITSECTRW_EXCL_ENTERED_ENABLED())
599 {
600 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
601 IPRT_CRITSECTRW_EXCL_ENTERED(pThis, NULL, cNestings + pThis->cWriterReads,
602 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
603 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
604 }
605#endif
606 return VINF_SUCCESS;
607 }
608
609 /*
610 * Get cracking.
611 */
612 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
613 uint64_t u64OldState = u64State;
614
615 for (;;)
616 {
617 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
618 {
619 /* It flows in the right direction, try follow it before it changes. */
620 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
621 c++;
622 Assert(c < RTCSRW_CNT_MASK / 2);
623 u64State &= ~RTCSRW_CNT_WR_MASK;
624 u64State |= c << RTCSRW_CNT_WR_SHIFT;
625 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
626 break;
627 }
628 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
629 {
630 /* Wrong direction, but we're alone here and can simply try switch the direction. */
631 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
632 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
633 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
634 break;
635 }
636 else if (fTryOnly)
637 /* Wrong direction and we're not supposed to wait, just return. */
638 return VERR_SEM_BUSY;
639 else
640 {
641 /* Add ourselves to the write count and break out to do the wait. */
642 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
643 c++;
644 Assert(c < RTCSRW_CNT_MASK / 2);
645 u64State &= ~RTCSRW_CNT_WR_MASK;
646 u64State |= c << RTCSRW_CNT_WR_SHIFT;
647 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
648 break;
649 }
650
651 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
652 return VERR_SEM_DESTROYED;
653
654 ASMNopPause();
655 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
656 u64OldState = u64State;
657 }
658
659 /*
660 * If we're in write mode now try grab the ownership. Play fair if there
661 * are threads already waiting.
662 */
663 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
664 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
665 || fTryOnly);
666 if (fDone)
667 ASMAtomicCmpXchgHandle(&pThis->u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
668 if (!fDone)
669 {
670 /*
671 * If only trying, undo the above writer incrementation and return.
672 */
673 if (fTryOnly)
674 {
675 for (;;)
676 {
677 u64OldState = u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
678 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
679 c--;
680 u64State &= ~RTCSRW_CNT_WR_MASK;
681 u64State |= c << RTCSRW_CNT_WR_SHIFT;
682 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
683 break;
684 }
685 IPRT_CRITSECTRW_EXCL_BUSY(pThis, NULL,
686 (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) /*fWrite*/,
687 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
688 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
689 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT),
690 (void *)pThis->u.s.hNativeWriter);
691 return VERR_SEM_BUSY;
692 }
693
694 /*
695 * Wait for our turn.
696 */
697 IPRT_CRITSECTRW_EXCL_WAITING(pThis, NULL,
698 (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT) /*fWrite*/,
699 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
700 (uint32_t)((u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT),
701 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT),
702 (void *)pThis->u.s.hNativeWriter);
703 for (uint32_t iLoop = 0; ; iLoop++)
704 {
705 int rc;
706#ifdef RTCRITSECTRW_STRICT
707 if (hThreadSelf == NIL_RTTHREAD)
708 hThreadSelf = RTThreadSelfAutoAdopt();
709 rc = RTLockValidatorRecExclCheckBlocking(pThis->pValidatorWrite, hThreadSelf, pSrcPos, true,
710 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
711 if (RT_SUCCESS(rc))
712#elif defined(IN_RING3)
713 RTTHREAD hThreadSelf = RTThreadSelf();
714 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
715#endif
716 {
717 rc = RTSemEventWait(pThis->hEvtWrite, RT_INDEFINITE_WAIT);
718#ifdef IN_RING3
719 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
720#endif
721 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
722 return VERR_SEM_DESTROYED;
723 }
724 if (RT_FAILURE(rc))
725 {
726 /* Decrement the counts and return the error. */
727 for (;;)
728 {
729 u64OldState = u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
730 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
731 c--;
732 u64State &= ~RTCSRW_CNT_WR_MASK;
733 u64State |= c << RTCSRW_CNT_WR_SHIFT;
734 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
735 break;
736 }
737 return rc;
738 }
739
740 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
741 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
742 {
743 ASMAtomicCmpXchgHandle(&pThis->u.s.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
744 if (fDone)
745 break;
746 }
747 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
748 }
749 }
750
751 /*
752 * Got it!
753 */
754 Assert((ASMAtomicReadU64(&pThis->u.s.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
755 ASMAtomicWriteU32(&pThis->cWriteRecursions, 1);
756 Assert(pThis->cWriterReads == 0);
757#ifdef RTCRITSECTRW_STRICT
758 RTLockValidatorRecExclSetOwner(pThis->pValidatorWrite, hThreadSelf, pSrcPos, true);
759#endif
760 IPRT_CRITSECTRW_EXCL_ENTERED(pThis, NULL, 1,
761 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
762 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
763
764 return VINF_SUCCESS;
765}
766
767
768RTDECL(int) RTCritSectRwEnterExcl(PRTCRITSECTRW pThis)
769{
770#ifndef RTCRITSECTRW_STRICT
771 return rtCritSectRwEnterExcl(pThis, NULL, false /*fTryAgain*/);
772#else
773 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
774 return rtCritSectRwEnterExcl(pThis, &SrcPos, false /*fTryAgain*/);
775#endif
776}
777RT_EXPORT_SYMBOL(RTCritSectRwEnterExcl);
778
779
780RTDECL(int) RTCritSectRwEnterExclDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
781{
782 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
783 return rtCritSectRwEnterExcl(pThis, &SrcPos, false /*fTryAgain*/);
784}
785RT_EXPORT_SYMBOL(RTCritSectRwEnterExclDebug);
786
787
788RTDECL(int) RTCritSectRwTryEnterExcl(PRTCRITSECTRW pThis)
789{
790#ifndef RTCRITSECTRW_STRICT
791 return rtCritSectRwEnterExcl(pThis, NULL, true /*fTryAgain*/);
792#else
793 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
794 return rtCritSectRwEnterExcl(pThis, &SrcPos, true /*fTryAgain*/);
795#endif
796}
797RT_EXPORT_SYMBOL(RTCritSectRwTryEnterExcl);
798
799
800RTDECL(int) RTCritSectRwTryEnterExclDebug(PRTCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
801{
802 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
803 return rtCritSectRwEnterExcl(pThis, &SrcPos, true /*fTryAgain*/);
804}
805RT_EXPORT_SYMBOL(RTCritSectRwTryEnterExclDebug);
806
807
808RTDECL(int) RTCritSectRwLeaveExcl(PRTCRITSECTRW pThis)
809{
810 /*
811 * Validate handle.
812 */
813 AssertPtr(pThis);
814 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
815#ifdef IN_RING0
816 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
817#else
818 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
819#endif
820
821 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
822 RTNATIVETHREAD hNativeWriter;
823 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
824 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
825
826 /*
827 * Unwind a recursion.
828 */
829 if (pThis->cWriteRecursions == 1)
830 {
831 AssertReturn(pThis->cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
832#ifdef RTCRITSECTRW_STRICT
833 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->pValidatorWrite, true);
834 if (RT_FAILURE(rc9))
835 return rc9;
836#endif
837 /*
838 * Update the state.
839 */
840 ASMAtomicWriteU32(&pThis->cWriteRecursions, 0);
841 ASMAtomicWriteHandle(&pThis->u.s.hNativeWriter, NIL_RTNATIVETHREAD);
842
843 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
844 IPRT_CRITSECTRW_EXCL_LEAVING(pThis, NULL, 0,
845 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
846 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
847
848 for (;;)
849 {
850 uint64_t u64OldState = u64State;
851
852 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
853 Assert(c > 0);
854 c--;
855
856 if ( c > 0
857 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
858 {
859 /* Don't change the direction, wait up the next writer if any. */
860 u64State &= ~RTCSRW_CNT_WR_MASK;
861 u64State |= c << RTCSRW_CNT_WR_SHIFT;
862 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
863 {
864 if (c > 0)
865 {
866 int rc = RTSemEventSignal(pThis->hEvtWrite);
867 AssertRC(rc);
868 }
869 break;
870 }
871 }
872 else
873 {
874 /* Reverse the direction and signal the reader threads. */
875 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
876 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
877 if (ASMAtomicCmpXchgU64(&pThis->u.s.u64State, u64State, u64OldState))
878 {
879 Assert(!pThis->fNeedReset);
880 ASMAtomicWriteBool(&pThis->fNeedReset, true);
881 int rc = RTSemEventMultiSignal(pThis->hEvtRead);
882 AssertRC(rc);
883 break;
884 }
885 }
886
887 ASMNopPause();
888 if (pThis->u32Magic != RTCRITSECTRW_MAGIC)
889 return VERR_SEM_DESTROYED;
890 u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
891 }
892 }
893 else
894 {
895 Assert(pThis->cWriteRecursions != 0);
896#ifdef RTCRITSECTRW_STRICT
897 int rc9 = RTLockValidatorRecExclUnwind(pThis->pValidatorWrite);
898 if (RT_FAILURE(rc9))
899 return rc9;
900#endif
901 uint32_t cNestings = ASMAtomicDecU32(&pThis->cWriteRecursions); NOREF(cNestings);
902#ifdef IPRT_WITH_DTRACE
903 if (IPRT_CRITSECTRW_EXCL_LEAVING_ENABLED())
904 {
905 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
906 IPRT_CRITSECTRW_EXCL_LEAVING(pThis, NULL, cNestings + pThis->cWriterReads,
907 (uint32_t)((u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT),
908 (uint32_t)((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT));
909 }
910#endif
911 }
912
913 return VINF_SUCCESS;
914}
915RT_EXPORT_SYMBOL(RTCritSectRwLeaveExcl);
916
917
918RTDECL(bool) RTCritSectRwIsWriteOwner(PRTCRITSECTRW pThis)
919{
920 /*
921 * Validate handle.
922 */
923 AssertPtr(pThis);
924 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, false);
925#ifdef IN_RING0
926 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
927#else
928 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
929#endif
930
931 /*
932 * Check ownership.
933 */
934 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
935 RTNATIVETHREAD hNativeWriter;
936 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hNativeWriter);
937 return hNativeWriter == hNativeSelf;
938}
939RT_EXPORT_SYMBOL(RTCritSectRwIsWriteOwner);
940
941
942RTDECL(bool) RTCritSectRwIsReadOwner(PRTCRITSECTRW pThis, bool fWannaHear)
943{
944 RT_NOREF_PV(fWannaHear);
945
946 /*
947 * Validate handle.
948 */
949 AssertPtr(pThis);
950 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, false);
951#ifdef IN_RING0
952 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
953#else
954 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
955#endif
956
957 /*
958 * Inspect the state.
959 */
960 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
961 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
962 {
963 /*
964 * It's in write mode, so we can only be a reader if we're also the
965 * current writer.
966 */
967 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
968 RTNATIVETHREAD hWriter;
969 ASMAtomicUoReadHandle(&pThis->u.s.hNativeWriter, &hWriter);
970 return hWriter == hNativeSelf;
971 }
972
973 /*
974 * Read mode. If there are no current readers, then we cannot be a reader.
975 */
976 if (!(u64State & RTCSRW_CNT_RD_MASK))
977 return false;
978
979#ifdef RTCRITSECTRW_STRICT
980 /*
981 * Ask the lock validator.
982 */
983 return RTLockValidatorRecSharedIsOwner(pThis->pValidatorRead, NIL_RTTHREAD);
984#else
985 /*
986 * Ok, we don't know, just tell the caller what he want to hear.
987 */
988 return fWannaHear;
989#endif
990}
991RT_EXPORT_SYMBOL(RTCritSectRwIsReadOwner);
992
993
994RTDECL(uint32_t) RTCritSectRwGetWriteRecursion(PRTCRITSECTRW pThis)
995{
996 /*
997 * Validate handle.
998 */
999 AssertPtr(pThis);
1000 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, 0);
1001
1002 /*
1003 * Return the requested data.
1004 */
1005 return pThis->cWriteRecursions;
1006}
1007RT_EXPORT_SYMBOL(RTCritSectRwGetWriteRecursion);
1008
1009
1010RTDECL(uint32_t) RTCritSectRwGetWriterReadRecursion(PRTCRITSECTRW pThis)
1011{
1012 /*
1013 * Validate handle.
1014 */
1015 AssertPtr(pThis);
1016 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, 0);
1017
1018 /*
1019 * Return the requested data.
1020 */
1021 return pThis->cWriterReads;
1022}
1023RT_EXPORT_SYMBOL(RTCritSectRwGetWriterReadRecursion);
1024
1025
1026RTDECL(uint32_t) RTCritSectRwGetReadCount(PRTCRITSECTRW pThis)
1027{
1028 /*
1029 * Validate input.
1030 */
1031 AssertPtr(pThis);
1032 AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, 0);
1033
1034 /*
1035 * Return the requested data.
1036 */
1037 uint64_t u64State = ASMAtomicReadU64(&pThis->u.s.u64State);
1038 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1039 return 0;
1040 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1041}
1042RT_EXPORT_SYMBOL(RTCritSectRwGetReadCount);
1043
1044
1045RTDECL(int) RTCritSectRwDelete(PRTCRITSECTRW pThis)
1046{
1047 /*
1048 * Assert free waiters and so on.
1049 */
1050 AssertPtr(pThis);
1051 Assert(pThis->u32Magic == RTCRITSECTRW_MAGIC);
1052 //Assert(pThis->cNestings == 0);
1053 //Assert(pThis->cLockers == -1);
1054 Assert(pThis->u.s.hNativeWriter == NIL_RTNATIVETHREAD);
1055#ifdef IN_RING0
1056 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0);
1057#else
1058 Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0));
1059#endif
1060
1061 /*
1062 * Invalidate the structure and free the semaphores.
1063 */
1064 if (!ASMAtomicCmpXchgU32(&pThis->u32Magic, RTCRITSECTRW_MAGIC_DEAD, RTCRITSECTRW_MAGIC))
1065 return VERR_INVALID_PARAMETER;
1066
1067 pThis->fFlags = 0;
1068 pThis->u.s.u64State = 0;
1069
1070 RTSEMEVENT hEvtWrite = pThis->hEvtWrite;
1071 pThis->hEvtWrite = NIL_RTSEMEVENT;
1072 RTSEMEVENTMULTI hEvtRead = pThis->hEvtRead;
1073 pThis->hEvtRead = NIL_RTSEMEVENTMULTI;
1074
1075 int rc1 = RTSemEventDestroy(hEvtWrite); AssertRC(rc1);
1076 int rc2 = RTSemEventMultiDestroy(hEvtRead); AssertRC(rc2);
1077
1078#ifndef IN_RING0
1079 RTLockValidatorRecSharedDestroy(&pThis->pValidatorRead);
1080 RTLockValidatorRecExclDestroy(&pThis->pValidatorWrite);
1081#endif
1082
1083 return RT_SUCCESS(rc1) ? rc2 : rc1;
1084}
1085RT_EXPORT_SYMBOL(RTCritSectRwDelete);
1086
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette