VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSectRw.cpp@ 90380

最後變更 在這個檔案從90380是 90348,由 vboxsync 提交於 4 年 前

VMM: Removed the VM pointers from the internal critsect structures. bugref:9218 bugref:10074

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 55.3 KB
 
1/* $Id: PDMAllCritSectRw.cpp 90348 2021-07-26 21:01:38Z vboxsync $ */
2/** @file
3 * IPRT - Read/Write Critical Section, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsectrw.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39#if defined(IN_RING3) || defined(IN_RING0)
40# include <iprt/thread.h>
41#endif
42
43
44/*********************************************************************************************************************************
45* Defined Constants And Macros *
46*********************************************************************************************************************************/
47/** The number loops to spin for shared access in ring-3. */
48#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R3 20
49/** The number loops to spin for shared access in ring-0. */
50#define PDMCRITSECTRW_SHRD_SPIN_COUNT_R0 128
51/** The number loops to spin for shared access in the raw-mode context. */
52#define PDMCRITSECTRW_SHRD_SPIN_COUNT_RC 128
53
54/** The number loops to spin for exclusive access in ring-3. */
55#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R3 20
56/** The number loops to spin for exclusive access in ring-0. */
57#define PDMCRITSECTRW_EXCL_SPIN_COUNT_R0 256
58/** The number loops to spin for exclusive access in the raw-mode context. */
59#define PDMCRITSECTRW_EXCL_SPIN_COUNT_RC 256
60
61
62/* Undefine the automatic VBOX_STRICT API mappings. */
63#undef PDMCritSectRwEnterExcl
64#undef PDMCritSectRwTryEnterExcl
65#undef PDMCritSectRwEnterShared
66#undef PDMCritSectRwTryEnterShared
67
68
69/**
70 * Gets the ring-3 native thread handle of the calling thread.
71 *
72 * @returns native thread handle (ring-3).
73 * @param pThis The read/write critical section. This is only used in
74 * R0 and RC.
75 */
76DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectRwGetNativeSelf(PVMCC pVM, PCPDMCRITSECTRW pThis)
77{
78#ifdef IN_RING3
79 RT_NOREF(pVM, pThis);
80 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
81#else
82 AssertMsgReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, ("%RX32\n", pThis->s.Core.u32Magic),
83 NIL_RTNATIVETHREAD);
84 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
85 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD;
86 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92
93
94
95#ifdef IN_RING3
96/**
97 * Changes the lock validator sub-class of the read/write critical section.
98 *
99 * It is recommended to try make sure that nobody is using this critical section
100 * while changing the value.
101 *
102 * @returns The old sub-class. RTLOCKVAL_SUB_CLASS_INVALID is returns if the
103 * lock validator isn't compiled in or either of the parameters are
104 * invalid.
105 * @param pThis Pointer to the read/write critical section.
106 * @param uSubClass The new sub-class value.
107 */
108VMMDECL(uint32_t) PDMR3CritSectRwSetSubClass(PPDMCRITSECTRW pThis, uint32_t uSubClass)
109{
110 AssertPtrReturn(pThis, RTLOCKVAL_SUB_CLASS_INVALID);
111 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, RTLOCKVAL_SUB_CLASS_INVALID);
112# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
113 AssertReturn(!(pThis->s.Core.fFlags & RTCRITSECT_FLAGS_NOP), RTLOCKVAL_SUB_CLASS_INVALID);
114
115 RTLockValidatorRecSharedSetSubClass(pThis->s.Core.pValidatorRead, uSubClass);
116 return RTLockValidatorRecExclSetSubClass(pThis->s.Core.pValidatorWrite, uSubClass);
117# else
118 NOREF(uSubClass);
119 return RTLOCKVAL_SUB_CLASS_INVALID;
120# endif
121}
122#endif /* IN_RING3 */
123
124
125#ifdef IN_RING0
126/**
127 * Go back to ring-3 so the kernel can do signals, APCs and other fun things.
128 *
129 * @param pVM The cross context VM structure.
130 */
131static void pdmR0CritSectRwYieldToRing3(PVMCC pVM)
132{
133 PVMCPUCC pVCpu = VMMGetCpu(pVM);
134 AssertPtrReturnVoid(pVCpu);
135 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_PREEMPT, NULL);
136 AssertRC(rc);
137}
138#endif /* IN_RING0 */
139
140
141/**
142 * Worker that enters a read/write critical section with shard access.
143 *
144 * @returns VBox status code.
145 * @param pVM The cross context VM structure.
146 * @param pThis Pointer to the read/write critical section.
147 * @param rcBusy The busy return code for ring-0 and ring-3.
148 * @param fTryOnly Only try enter it, don't wait.
149 * @param pSrcPos The source position. (Can be NULL.)
150 * @param fNoVal No validation records.
151 */
152static int pdmCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
153 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
154{
155 /*
156 * Validate input.
157 */
158 AssertPtr(pThis);
159 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
160
161#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
162 NOREF(pSrcPos);
163 NOREF(fNoVal);
164#endif
165#ifdef IN_RING3
166 NOREF(rcBusy);
167 NOREF(pVM);
168#endif
169
170#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
171 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
172 if (!fTryOnly)
173 {
174 int rc9;
175 RTNATIVETHREAD hNativeWriter;
176 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
177 if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis))
178 rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
179 else
180 rc9 = RTLockValidatorRecSharedCheckOrder(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
181 if (RT_FAILURE(rc9))
182 return rc9;
183 }
184#endif
185
186 /*
187 * Get cracking...
188 */
189 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
190 uint64_t u64OldState = u64State;
191
192 for (;;)
193 {
194 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
195 {
196 /* It flows in the right direction, try follow it before it changes. */
197 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
198 c++;
199 Assert(c < RTCSRW_CNT_MASK / 2);
200 u64State &= ~RTCSRW_CNT_RD_MASK;
201 u64State |= c << RTCSRW_CNT_RD_SHIFT;
202 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
203 {
204#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
205 if (!fNoVal)
206 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
207#endif
208 break;
209 }
210 }
211 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
212 {
213 /* Wrong direction, but we're alone here and can simply try switch the direction. */
214 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
215 u64State |= (UINT64_C(1) << RTCSRW_CNT_RD_SHIFT) | (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT);
216 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
217 {
218 Assert(!pThis->s.Core.fNeedReset);
219#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
220 if (!fNoVal)
221 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
222#endif
223 break;
224 }
225 }
226 else
227 {
228 /* Is the writer perhaps doing a read recursion? */
229 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
230 RTNATIVETHREAD hNativeWriter;
231 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
232 if (hNativeSelf == hNativeWriter)
233 {
234#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
235 if (!fNoVal)
236 {
237 int rc9 = RTLockValidatorRecExclRecursionMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core, pSrcPos);
238 if (RT_FAILURE(rc9))
239 return rc9;
240 }
241#endif
242 Assert(pThis->s.Core.cWriterReads < UINT32_MAX / 2);
243 ASMAtomicIncU32(&pThis->s.Core.cWriterReads);
244 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
245 return VINF_SUCCESS; /* don't break! */
246 }
247
248 /*
249 * If we're only trying, return already.
250 */
251 if (fTryOnly)
252 {
253 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
254 return VERR_SEM_BUSY;
255 }
256
257#if defined(IN_RING3) || defined(IN_RING0)
258# ifdef IN_RING0
259 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
260 && ASMIntAreEnabled())
261# endif
262 {
263 /*
264 * Add ourselves to the queue and wait for the direction to change.
265 */
266 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
267 c++;
268 Assert(c < RTCSRW_CNT_MASK / 2);
269
270 uint64_t cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
271 cWait++;
272 Assert(cWait <= c);
273 Assert(cWait < RTCSRW_CNT_MASK / 2);
274
275 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
276 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
277
278 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
279 {
280 for (uint32_t iLoop = 0; ; iLoop++)
281 {
282 int rc;
283# ifdef IN_RING3
284# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
285 rc = RTLockValidatorRecSharedCheckBlocking(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos, true,
286 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_READ, false);
287 if (RT_SUCCESS(rc))
288# else
289 RTTHREAD hThreadSelf = RTThreadSelf();
290 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_READ, false);
291# endif
292# endif
293 {
294 for (;;)
295 {
296 rc = SUPSemEventMultiWaitNoResume(pVM->pSession,
297 (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead,
298 RT_INDEFINITE_WAIT);
299 if ( rc != VERR_INTERRUPTED
300 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
301 break;
302# ifdef IN_RING0
303 pdmR0CritSectRwYieldToRing3(pVM);
304# endif
305 }
306# ifdef IN_RING3
307 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_READ);
308# endif
309 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
310 return VERR_SEM_DESTROYED;
311 }
312 if (RT_FAILURE(rc))
313 {
314 /* Decrement the counts and return the error. */
315 for (;;)
316 {
317 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
318 c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; Assert(c > 0);
319 c--;
320 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT; Assert(cWait > 0);
321 cWait--;
322 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_WAIT_CNT_RD_MASK);
323 u64State |= (c << RTCSRW_CNT_RD_SHIFT) | (cWait << RTCSRW_WAIT_CNT_RD_SHIFT);
324 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
325 break;
326 }
327 return rc;
328 }
329
330 Assert(pThis->s.Core.fNeedReset);
331 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
332 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
333 break;
334 AssertMsg(iLoop < 1, ("%u\n", iLoop));
335 }
336
337 /* Decrement the wait count and maybe reset the semaphore (if we're last). */
338 for (;;)
339 {
340 u64OldState = u64State;
341
342 cWait = (u64State & RTCSRW_WAIT_CNT_RD_MASK) >> RTCSRW_WAIT_CNT_RD_SHIFT;
343 Assert(cWait > 0);
344 cWait--;
345 u64State &= ~RTCSRW_WAIT_CNT_RD_MASK;
346 u64State |= cWait << RTCSRW_WAIT_CNT_RD_SHIFT;
347
348 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
349 {
350 if (cWait == 0)
351 {
352 if (ASMAtomicXchgBool(&pThis->s.Core.fNeedReset, false))
353 {
354 int rc = SUPSemEventMultiReset(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
355 AssertRCReturn(rc, rc);
356 }
357 }
358 break;
359 }
360 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
361 }
362
363# if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
364 if (!fNoVal)
365 RTLockValidatorRecSharedAddOwner(pThis->s.Core.pValidatorRead, hThreadSelf, pSrcPos);
366# endif
367 break;
368 }
369 }
370#endif /* IN_RING3 || IN_RING3 */
371#ifndef IN_RING3
372# ifdef IN_RING0
373 else
374# endif
375 {
376 /*
377 * We cannot call SUPSemEventMultiWaitNoResume in this context. Go
378 * back to ring-3 and do it there or return rcBusy.
379 */
380 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterShared));
381 if (rcBusy == VINF_SUCCESS)
382 {
383 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
384 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
385 * back to ring-3. Goes for both kind of crit sects. */
386 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_SHARED, MMHyperCCToR3(pVM, pThis));
387 }
388 return rcBusy;
389 }
390#endif /* !IN_RING3 */
391 }
392
393 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
394 return VERR_SEM_DESTROYED;
395
396 ASMNopPause();
397 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
398 u64OldState = u64State;
399 }
400
401 /* got it! */
402 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterShared));
403 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT));
404 return VINF_SUCCESS;
405
406}
407
408
409/**
410 * Enter a critical section with shared (read) access.
411 *
412 * @returns VBox status code.
413 * @retval VINF_SUCCESS on success.
414 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
415 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
416 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
417 * during the operation.
418 *
419 * @param pVM The cross context VM structure.
420 * @param pThis Pointer to the read/write critical section.
421 * @param rcBusy The status code to return when we're in RC or R0 and the
422 * section is busy. Pass VINF_SUCCESS to acquired the
423 * critical section thru a ring-3 call if necessary.
424 * @sa PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterShared,
425 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
426 * RTCritSectRwEnterShared.
427 */
428VMMDECL(int) PDMCritSectRwEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
429{
430#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
431 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
432#else
433 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
434 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
435#endif
436}
437
438
439/**
440 * Enter a critical section with shared (read) access.
441 *
442 * @returns VBox status code.
443 * @retval VINF_SUCCESS on success.
444 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
445 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
446 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
447 * during the operation.
448 *
449 * @param pVM The cross context VM structure.
450 * @param pThis Pointer to the read/write critical section.
451 * @param rcBusy The status code to return when we're in RC or R0 and the
452 * section is busy. Pass VINF_SUCCESS to acquired the
453 * critical section thru a ring-3 call if necessary.
454 * @param uId Where we're entering the section.
455 * @param SRC_POS The source position.
456 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
457 * PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwLeaveShared,
458 * RTCritSectRwEnterSharedDebug.
459 */
460VMMDECL(int) PDMCritSectRwEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
461{
462 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
463#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
464 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, NULL, false /*fNoVal*/);
465#else
466 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
467 return pdmCritSectRwEnterShared(pVM, pThis, rcBusy, false /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
468#endif
469}
470
471
472/**
473 * Try enter a critical section with shared (read) access.
474 *
475 * @returns VBox status code.
476 * @retval VINF_SUCCESS on success.
477 * @retval VERR_SEM_BUSY if the critsect was owned.
478 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
479 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
480 * during the operation.
481 *
482 * @param pVM The cross context VM structure.
483 * @param pThis Pointer to the read/write critical section.
484 * @sa PDMCritSectRwTryEnterSharedDebug, PDMCritSectRwEnterShared,
485 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
486 * RTCritSectRwTryEnterShared.
487 */
488VMMDECL(int) PDMCritSectRwTryEnterShared(PVMCC pVM, PPDMCRITSECTRW pThis)
489{
490#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
491 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
492#else
493 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
494 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
495#endif
496}
497
498
499/**
500 * Try enter a critical section with shared (read) access.
501 *
502 * @returns VBox status code.
503 * @retval VINF_SUCCESS on success.
504 * @retval VERR_SEM_BUSY if the critsect was owned.
505 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
506 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
507 * during the operation.
508 *
509 * @param pVM The cross context VM structure.
510 * @param pThis Pointer to the read/write critical section.
511 * @param uId Where we're entering the section.
512 * @param SRC_POS The source position.
513 * @sa PDMCritSectRwTryEnterShared, PDMCritSectRwEnterShared,
514 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwLeaveShared,
515 * RTCritSectRwTryEnterSharedDebug.
516 */
517VMMDECL(int) PDMCritSectRwTryEnterSharedDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
518{
519 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
520#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
521 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, NULL, false /*fNoVal*/);
522#else
523 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
524 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, true /*fTryOnly*/, &SrcPos, false /*fNoVal*/);
525#endif
526}
527
528
529#ifdef IN_RING3
530/**
531 * Enters a PDM read/write critical section with shared (read) access.
532 *
533 * @returns VINF_SUCCESS if entered successfully.
534 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
535 * during the operation.
536 *
537 * @param pVM The cross context VM structure.
538 * @param pThis Pointer to the read/write critical section.
539 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
540 */
541VMMR3DECL(int) PDMR3CritSectRwEnterSharedEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
542{
543 return pdmCritSectRwEnterShared(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3);
544}
545#endif
546
547
548/**
549 * Leave a critical section held with shared access.
550 *
551 * @returns VBox status code.
552 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
553 * during the operation.
554 * @param pVM The cross context VM structure.
555 * @param pThis Pointer to the read/write critical section.
556 * @param fNoVal No validation records (i.e. queued release).
557 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
558 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
559 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
560 */
561static int pdmCritSectRwLeaveSharedWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
562{
563 /*
564 * Validate handle.
565 */
566 AssertPtr(pThis);
567 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
568
569#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
570 NOREF(fNoVal);
571#endif
572
573 /*
574 * Check the direction and take action accordingly.
575 */
576 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
577 uint64_t u64OldState = u64State;
578 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
579 {
580#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
581 if (fNoVal)
582 Assert(!RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD));
583 else
584 {
585 int rc9 = RTLockValidatorRecSharedCheckAndRelease(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
586 if (RT_FAILURE(rc9))
587 return rc9;
588 }
589#endif
590 for (;;)
591 {
592 uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
593 AssertReturn(c > 0, VERR_NOT_OWNER);
594 c--;
595
596 if ( c > 0
597 || (u64State & RTCSRW_CNT_WR_MASK) == 0)
598 {
599 /* Don't change the direction. */
600 u64State &= ~RTCSRW_CNT_RD_MASK;
601 u64State |= c << RTCSRW_CNT_RD_SHIFT;
602 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
603 break;
604 }
605 else
606 {
607#if defined(IN_RING3) || defined(IN_RING0)
608# ifdef IN_RING0
609 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
610 && ASMIntAreEnabled())
611# endif
612 {
613 /* Reverse the direction and signal the writer threads. */
614 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_DIR_MASK);
615 u64State |= RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT;
616 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
617 {
618 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
619 AssertRC(rc);
620 break;
621 }
622 }
623#endif /* IN_RING3 || IN_RING0 */
624#ifndef IN_RING3
625# ifdef IN_RING0
626 else
627# endif
628 {
629 /* Queue the exit request (ring-3). */
630 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
631 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwShrdLeaves++;
632 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3 c=%d (%#llx)\n", i, pThis, c, u64State));
633 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves));
634 pVCpu->pdm.s.apQueuedCritSectRwShrdLeaves[i] = MMHyperCCToR3(pVM, pThis);
635 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
636 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
637 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
638 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveShared);
639 break;
640 }
641#endif
642 }
643
644 ASMNopPause();
645 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
646 u64OldState = u64State;
647 }
648 }
649 else
650 {
651 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
652 RTNATIVETHREAD hNativeWriter;
653 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
654 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
655 AssertReturn(pThis->s.Core.cWriterReads > 0, VERR_NOT_OWNER);
656#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
657 if (!fNoVal)
658 {
659 int rc = RTLockValidatorRecExclUnwindMixed(pThis->s.Core.pValidatorWrite, &pThis->s.Core.pValidatorRead->Core);
660 if (RT_FAILURE(rc))
661 return rc;
662 }
663#endif
664 ASMAtomicDecU32(&pThis->s.Core.cWriterReads);
665 }
666
667 return VINF_SUCCESS;
668}
669
670/**
671 * Leave a critical section held with shared access.
672 *
673 * @returns VBox status code.
674 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
675 * during the operation.
676 * @param pVM The cross context VM structure.
677 * @param pThis Pointer to the read/write critical section.
678 * @sa PDMCritSectRwEnterShared, PDMCritSectRwTryEnterShared,
679 * PDMCritSectRwEnterSharedDebug, PDMCritSectRwTryEnterSharedDebug,
680 * PDMCritSectRwLeaveExcl, RTCritSectRwLeaveShared.
681 */
682VMMDECL(int) PDMCritSectRwLeaveShared(PVMCC pVM, PPDMCRITSECTRW pThis)
683{
684 return pdmCritSectRwLeaveSharedWorker(pVM, pThis, false /*fNoVal*/);
685}
686
687
688#if defined(IN_RING3) || defined(IN_RING0)
689/**
690 * PDMCritSectBothFF interface.
691 *
692 * @param pVM The cross context VM structure.
693 * @param pThis Pointer to the read/write critical section.
694 */
695void pdmCritSectRwLeaveSharedQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
696{
697 pdmCritSectRwLeaveSharedWorker(pVM, pThis, true /*fNoVal*/);
698}
699#endif
700
701
702/**
703 * Worker that enters a read/write critical section with exclusive access.
704 *
705 * @returns VBox status code.
706 * @param pVM The cross context VM structure.
707 * @param pThis Pointer to the read/write critical section.
708 * @param rcBusy The busy return code for ring-0 and ring-3.
709 * @param fTryOnly Only try enter it, don't wait.
710 * @param pSrcPos The source position. (Can be NULL.)
711 * @param fNoVal No validation records.
712 */
713static int pdmCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, bool fTryOnly,
714 PCRTLOCKVALSRCPOS pSrcPos, bool fNoVal)
715{
716 /*
717 * Validate input.
718 */
719 AssertPtr(pThis);
720 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
721
722#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
723 NOREF(pSrcPos);
724 NOREF(fNoVal);
725#endif
726#ifdef IN_RING3
727 NOREF(rcBusy);
728 NOREF(pVM);
729#endif
730
731#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
732 RTTHREAD hThreadSelf = NIL_RTTHREAD;
733 if (!fTryOnly)
734 {
735 hThreadSelf = RTThreadSelfAutoAdopt();
736 int rc9 = RTLockValidatorRecExclCheckOrder(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
737 if (RT_FAILURE(rc9))
738 return rc9;
739 }
740#endif
741
742 /*
743 * Check if we're already the owner and just recursing.
744 */
745 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
746 RTNATIVETHREAD hNativeWriter;
747 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
748 if (hNativeSelf == hNativeWriter)
749 {
750 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
751#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
752 if (!fNoVal)
753 {
754 int rc9 = RTLockValidatorRecExclRecursion(pThis->s.Core.pValidatorWrite, pSrcPos);
755 if (RT_FAILURE(rc9))
756 return rc9;
757 }
758#endif
759 Assert(pThis->s.Core.cWriteRecursions < UINT32_MAX / 2);
760 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
761 ASMAtomicIncU32(&pThis->s.Core.cWriteRecursions);
762 return VINF_SUCCESS;
763 }
764
765 /*
766 * Get cracking.
767 */
768 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
769 uint64_t u64OldState = u64State;
770
771 for (;;)
772 {
773 if ( (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
774 || (u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) != 0)
775 {
776 /* It flows in the right direction, try follow it before it changes. */
777 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
778 c++;
779 Assert(c < RTCSRW_CNT_MASK / 2);
780 u64State &= ~RTCSRW_CNT_WR_MASK;
781 u64State |= c << RTCSRW_CNT_WR_SHIFT;
782 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
783 break;
784 }
785 else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
786 {
787 /* Wrong direction, but we're alone here and can simply try switch the direction. */
788 u64State &= ~(RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
789 u64State |= (UINT64_C(1) << RTCSRW_CNT_WR_SHIFT) | (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT);
790 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
791 break;
792 }
793 else if (fTryOnly)
794 {
795 /* Wrong direction and we're not supposed to wait, just return. */
796 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
797 return VERR_SEM_BUSY;
798 }
799 else
800 {
801 /* Add ourselves to the write count and break out to do the wait. */
802 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
803 c++;
804 Assert(c < RTCSRW_CNT_MASK / 2);
805 u64State &= ~RTCSRW_CNT_WR_MASK;
806 u64State |= c << RTCSRW_CNT_WR_SHIFT;
807 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
808 break;
809 }
810
811 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
812 return VERR_SEM_DESTROYED;
813
814 ASMNopPause();
815 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
816 u64OldState = u64State;
817 }
818
819 /*
820 * If we're in write mode now try grab the ownership. Play fair if there
821 * are threads already waiting.
822 */
823 bool fDone = (u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT)
824#if defined(IN_RING3)
825 && ( ((u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT) == 1
826 || fTryOnly)
827#endif
828 ;
829 if (fDone)
830 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
831 if (!fDone)
832 {
833 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(StatContention,EnterExcl));
834
835#if defined(IN_RING3) || defined(IN_RING0)
836 if ( !fTryOnly
837# ifdef IN_RING0
838 && RTThreadPreemptIsEnabled(NIL_RTTHREAD)
839 && ASMIntAreEnabled()
840# endif
841 )
842 {
843
844 /*
845 * Wait for our turn.
846 */
847 for (uint32_t iLoop = 0; ; iLoop++)
848 {
849 int rc;
850# ifdef IN_RING3
851# ifdef PDMCRITSECTRW_STRICT
852 if (hThreadSelf == NIL_RTTHREAD)
853 hThreadSelf = RTThreadSelfAutoAdopt();
854 rc = RTLockValidatorRecExclCheckBlocking(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true,
855 RT_INDEFINITE_WAIT, RTTHREADSTATE_RW_WRITE, false);
856 if (RT_SUCCESS(rc))
857# else
858 RTTHREAD hThreadSelf = RTThreadSelf();
859 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, false);
860# endif
861# endif
862 {
863 for (;;)
864 {
865 rc = SUPSemEventWaitNoResume(pVM->pSession,
866 (SUPSEMEVENT)pThis->s.Core.hEvtWrite,
867 RT_INDEFINITE_WAIT);
868 if ( rc != VERR_INTERRUPTED
869 || pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
870 break;
871# ifdef IN_RING0
872 pdmR0CritSectRwYieldToRing3(pVM);
873# endif
874 }
875# ifdef IN_RING3
876 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE);
877# endif
878 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
879 return VERR_SEM_DESTROYED;
880 }
881 if (RT_FAILURE(rc))
882 {
883 /* Decrement the counts and return the error. */
884 for (;;)
885 {
886 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
887 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
888 c--;
889 u64State &= ~RTCSRW_CNT_WR_MASK;
890 u64State |= c << RTCSRW_CNT_WR_SHIFT;
891 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
892 break;
893 }
894 return rc;
895 }
896
897 u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
898 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
899 {
900 ASMAtomicCmpXchgHandle(&pThis->s.Core.hNativeWriter, hNativeSelf, NIL_RTNATIVETHREAD, fDone);
901 if (fDone)
902 break;
903 }
904 AssertMsg(iLoop < 1000, ("%u\n", iLoop)); /* may loop a few times here... */
905 }
906
907 }
908 else
909#endif /* IN_RING3 || IN_RING0 */
910 {
911#ifdef IN_RING3
912 /* TryEnter call - decrement the number of (waiting) writers. */
913#else
914 /* We cannot call SUPSemEventWaitNoResume in this context. Go back to
915 ring-3 and do it there or return rcBusy. */
916#endif
917
918 for (;;)
919 {
920 u64OldState = u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
921 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT; Assert(c > 0);
922 c--;
923 u64State &= ~RTCSRW_CNT_WR_MASK;
924 u64State |= c << RTCSRW_CNT_WR_SHIFT;
925 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
926 break;
927 }
928
929#ifdef IN_RING3
930 return VERR_SEM_BUSY;
931#else
932 if (rcBusy == VINF_SUCCESS)
933 {
934 Assert(!fTryOnly);
935 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
936 /** @todo Should actually do this in via VMMR0.cpp instead of going all the way
937 * back to ring-3. Goes for both kind of crit sects. */
938 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_RW_ENTER_EXCL, MMHyperCCToR3(pVM, pThis));
939 }
940 return rcBusy;
941#endif
942 }
943 }
944
945 /*
946 * Got it!
947 */
948 Assert((ASMAtomicReadU64(&pThis->s.Core.u64State) & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT));
949 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 1);
950 Assert(pThis->s.Core.cWriterReads == 0);
951#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
952 if (!fNoVal)
953 RTLockValidatorRecExclSetOwner(pThis->s.Core.pValidatorWrite, hThreadSelf, pSrcPos, true);
954#endif
955 STAM_REL_COUNTER_INC(&pThis->s.CTX_MID_Z(Stat,EnterExcl));
956 STAM_PROFILE_ADV_START(&pThis->s.StatWriteLocked, swl);
957
958 return VINF_SUCCESS;
959}
960
961
962/**
963 * Try enter a critical section with exclusive (write) access.
964 *
965 * @returns VBox status code.
966 * @retval VINF_SUCCESS on success.
967 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
968 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
969 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
970 * during the operation.
971 *
972 * @param pVM The cross context VM structure.
973 * @param pThis Pointer to the read/write critical section.
974 * @param rcBusy The status code to return when we're in RC or R0 and the
975 * section is busy. Pass VINF_SUCCESS to acquired the
976 * critical section thru a ring-3 call if necessary.
977 * @sa PDMCritSectRwEnterExclDebug, PDMCritSectRwTryEnterExcl,
978 * PDMCritSectRwTryEnterExclDebug,
979 * PDMCritSectEnterDebug, PDMCritSectEnter,
980 * RTCritSectRwEnterExcl.
981 */
982VMMDECL(int) PDMCritSectRwEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy)
983{
984#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
985 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
986#else
987 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
988 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
989#endif
990}
991
992
993/**
994 * Try enter a critical section with exclusive (write) access.
995 *
996 * @returns VBox status code.
997 * @retval VINF_SUCCESS on success.
998 * @retval rcBusy if in ring-0 or raw-mode context and it is busy.
999 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1000 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1001 * during the operation.
1002 *
1003 * @param pVM The cross context VM structure.
1004 * @param pThis Pointer to the read/write critical section.
1005 * @param rcBusy The status code to return when we're in RC or R0 and the
1006 * section is busy. Pass VINF_SUCCESS to acquired the
1007 * critical section thru a ring-3 call if necessary.
1008 * @param uId Where we're entering the section.
1009 * @param SRC_POS The source position.
1010 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExcl,
1011 * PDMCritSectRwTryEnterExclDebug,
1012 * PDMCritSectEnterDebug, PDMCritSectEnter,
1013 * RTCritSectRwEnterExclDebug.
1014 */
1015VMMDECL(int) PDMCritSectRwEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1016{
1017 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1018#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1019 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, NULL, false /*fNoVal*/);
1020#else
1021 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1022 return pdmCritSectRwEnterExcl(pVM, pThis, rcBusy, false /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1023#endif
1024}
1025
1026
1027/**
1028 * Try enter a critical section with exclusive (write) access.
1029 *
1030 * @retval VINF_SUCCESS on success.
1031 * @retval VERR_SEM_BUSY if the critsect was owned.
1032 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1033 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1034 * during the operation.
1035 *
1036 * @param pVM The cross context VM structure.
1037 * @param pThis Pointer to the read/write critical section.
1038 * @sa PDMCritSectRwEnterExcl, PDMCritSectRwTryEnterExclDebug,
1039 * PDMCritSectRwEnterExclDebug,
1040 * PDMCritSectTryEnter, PDMCritSectTryEnterDebug,
1041 * RTCritSectRwTryEnterExcl.
1042 */
1043VMMDECL(int) PDMCritSectRwTryEnterExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1044{
1045#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1046 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1047#else
1048 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
1049 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1050#endif
1051}
1052
1053
1054/**
1055 * Try enter a critical section with exclusive (write) access.
1056 *
1057 * @retval VINF_SUCCESS on success.
1058 * @retval VERR_SEM_BUSY if the critsect was owned.
1059 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
1060 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1061 * during the operation.
1062 *
1063 * @param pVM The cross context VM structure.
1064 * @param pThis Pointer to the read/write critical section.
1065 * @param uId Where we're entering the section.
1066 * @param SRC_POS The source position.
1067 * @sa PDMCritSectRwTryEnterExcl, PDMCritSectRwEnterExcl,
1068 * PDMCritSectRwEnterExclDebug,
1069 * PDMCritSectTryEnterDebug, PDMCritSectTryEnter,
1070 * RTCritSectRwTryEnterExclDebug.
1071 */
1072VMMDECL(int) PDMCritSectRwTryEnterExclDebug(PVMCC pVM, PPDMCRITSECTRW pThis, RTHCUINTPTR uId, RT_SRC_POS_DECL)
1073{
1074 NOREF(uId); NOREF(pszFile); NOREF(iLine); NOREF(pszFunction);
1075#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1076 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, NULL, false /*fNoVal*/);
1077#else
1078 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
1079 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, true /*fTryAgain*/, &SrcPos, false /*fNoVal*/);
1080#endif
1081}
1082
1083
1084#ifdef IN_RING3
1085/**
1086 * Enters a PDM read/write critical section with exclusive (write) access.
1087 *
1088 * @returns VINF_SUCCESS if entered successfully.
1089 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1090 * during the operation.
1091 *
1092 * @param pVM The cross context VM structure.
1093 * @param pThis Pointer to the read/write critical section.
1094 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
1095 */
1096VMMR3DECL(int) PDMR3CritSectRwEnterExclEx(PVM pVM, PPDMCRITSECTRW pThis, bool fCallRing3)
1097{
1098 return pdmCritSectRwEnterExcl(pVM, pThis, VERR_SEM_BUSY, false /*fTryAgain*/, NULL, fCallRing3 /*fNoVal*/);
1099}
1100#endif /* IN_RING3 */
1101
1102
1103/**
1104 * Leave a critical section held exclusively.
1105 *
1106 * @returns VBox status code.
1107 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1108 * during the operation.
1109 * @param pVM The cross context VM structure.
1110 * @param pThis Pointer to the read/write critical section.
1111 * @param fNoVal No validation records (i.e. queued release).
1112 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1113 */
1114static int pdmCritSectRwLeaveExclWorker(PVMCC pVM, PPDMCRITSECTRW pThis, bool fNoVal)
1115{
1116 /*
1117 * Validate handle.
1118 */
1119 AssertPtr(pThis);
1120 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED);
1121
1122#if !defined(PDMCRITSECTRW_STRICT) || !defined(IN_RING3)
1123 NOREF(fNoVal);
1124#endif
1125
1126 RTNATIVETHREAD hNativeSelf = pdmCritSectRwGetNativeSelf(pVM, pThis);
1127 RTNATIVETHREAD hNativeWriter;
1128 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1129 AssertReturn(hNativeSelf == hNativeWriter, VERR_NOT_OWNER);
1130
1131 /*
1132 * Unwind one recursion. Is it the final one?
1133 */
1134 if (pThis->s.Core.cWriteRecursions == 1)
1135 {
1136 AssertReturn(pThis->s.Core.cWriterReads == 0, VERR_WRONG_ORDER); /* (must release all read recursions before the final write.) */
1137#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1138 if (fNoVal)
1139 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1140 else
1141 {
1142 int rc9 = RTLockValidatorRecExclReleaseOwner(pThis->s.Core.pValidatorWrite, true);
1143 if (RT_FAILURE(rc9))
1144 return rc9;
1145 }
1146#endif
1147 /*
1148 * Update the state.
1149 */
1150#if defined(IN_RING3) || defined(IN_RING0)
1151# ifdef IN_RING0
1152 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
1153 && ASMIntAreEnabled())
1154# endif
1155 {
1156 ASMAtomicWriteU32(&pThis->s.Core.cWriteRecursions, 0);
1157 STAM_PROFILE_ADV_STOP(&pThis->s.StatWriteLocked, swl);
1158 ASMAtomicWriteHandle(&pThis->s.Core.hNativeWriter, NIL_RTNATIVETHREAD);
1159
1160 for (;;)
1161 {
1162 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1163 uint64_t u64OldState = u64State;
1164
1165 uint64_t c = (u64State & RTCSRW_CNT_WR_MASK) >> RTCSRW_CNT_WR_SHIFT;
1166 Assert(c > 0);
1167 c--;
1168
1169 if ( c > 0
1170 || (u64State & RTCSRW_CNT_RD_MASK) == 0)
1171 {
1172 /* Don't change the direction, wake up the next writer if any. */
1173 u64State &= ~RTCSRW_CNT_WR_MASK;
1174 u64State |= c << RTCSRW_CNT_WR_SHIFT;
1175 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1176 {
1177 if (c > 0)
1178 {
1179 int rc = SUPSemEventSignal(pVM->pSession, (SUPSEMEVENT)pThis->s.Core.hEvtWrite);
1180 AssertRC(rc);
1181 }
1182 break;
1183 }
1184 }
1185 else
1186 {
1187 /* Reverse the direction and signal the reader threads. */
1188 u64State &= ~(RTCSRW_CNT_WR_MASK | RTCSRW_DIR_MASK);
1189 u64State |= RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT;
1190 if (ASMAtomicCmpXchgU64(&pThis->s.Core.u64State, u64State, u64OldState))
1191 {
1192 Assert(!pThis->s.Core.fNeedReset);
1193 ASMAtomicWriteBool(&pThis->s.Core.fNeedReset, true);
1194 int rc = SUPSemEventMultiSignal(pVM->pSession, (SUPSEMEVENTMULTI)pThis->s.Core.hEvtRead);
1195 AssertRC(rc);
1196 break;
1197 }
1198 }
1199
1200 ASMNopPause();
1201 if (pThis->s.Core.u32Magic != RTCRITSECTRW_MAGIC)
1202 return VERR_SEM_DESTROYED;
1203 }
1204 }
1205#endif /* IN_RING3 || IN_RING0 */
1206#ifndef IN_RING3
1207# ifdef IN_RING0
1208 else
1209# endif
1210 {
1211 /*
1212 * We cannot call neither SUPSemEventSignal nor SUPSemEventMultiSignal,
1213 * so queue the exit request (ring-3).
1214 */
1215 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
1216 uint32_t i = pVCpu->pdm.s.cQueuedCritSectRwExclLeaves++;
1217 LogFlow(("PDMCritSectRwLeaveShared: [%d]=%p => R3\n", i, pThis));
1218 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves));
1219 pVCpu->pdm.s.apQueuedCritSectRwExclLeaves[i] = MMHyperCCToR3(pVM, pThis);
1220 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
1221 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
1222 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1223 STAM_REL_COUNTER_INC(&pThis->s.StatContentionRZLeaveExcl);
1224 }
1225#endif
1226 }
1227 else
1228 {
1229 /*
1230 * Not the final recursion.
1231 */
1232 Assert(pThis->s.Core.cWriteRecursions != 0);
1233#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1234 if (fNoVal)
1235 Assert(pThis->s.Core.pValidatorWrite->hThread == NIL_RTTHREAD);
1236 else
1237 {
1238 int rc9 = RTLockValidatorRecExclUnwind(pThis->s.Core.pValidatorWrite);
1239 if (RT_FAILURE(rc9))
1240 return rc9;
1241 }
1242#endif
1243 ASMAtomicDecU32(&pThis->s.Core.cWriteRecursions);
1244 }
1245
1246 return VINF_SUCCESS;
1247}
1248
1249
1250/**
1251 * Leave a critical section held exclusively.
1252 *
1253 * @returns VBox status code.
1254 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
1255 * during the operation.
1256 * @param pVM The cross context VM structure.
1257 * @param pThis Pointer to the read/write critical section.
1258 * @sa PDMCritSectRwLeaveShared, RTCritSectRwLeaveExcl.
1259 */
1260VMMDECL(int) PDMCritSectRwLeaveExcl(PVMCC pVM, PPDMCRITSECTRW pThis)
1261{
1262 return pdmCritSectRwLeaveExclWorker(pVM, pThis, false /*fNoVal*/);
1263}
1264
1265
1266#if defined(IN_RING3) || defined(IN_RING0)
1267/**
1268 * PDMCritSectBothFF interface.
1269 *
1270 * @param pVM The cross context VM structure.
1271 * @param pThis Pointer to the read/write critical section.
1272 */
1273void pdmCritSectRwLeaveExclQueued(PVMCC pVM, PPDMCRITSECTRW pThis)
1274{
1275 pdmCritSectRwLeaveExclWorker(pVM, pThis, true /*fNoVal*/);
1276}
1277#endif
1278
1279
1280/**
1281 * Checks the caller is the exclusive (write) owner of the critical section.
1282 *
1283 * @retval true if owner.
1284 * @retval false if not owner.
1285 * @param pVM The cross context VM structure.
1286 * @param pThis Pointer to the read/write critical section.
1287 * @sa PDMCritSectRwIsReadOwner, PDMCritSectIsOwner,
1288 * RTCritSectRwIsWriteOwner.
1289 */
1290VMMDECL(bool) PDMCritSectRwIsWriteOwner(PVMCC pVM, PPDMCRITSECTRW pThis)
1291{
1292 /*
1293 * Validate handle.
1294 */
1295 AssertPtr(pThis);
1296 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1297
1298 /*
1299 * Check ownership.
1300 */
1301 RTNATIVETHREAD hNativeWriter;
1302 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hNativeWriter);
1303 if (hNativeWriter == NIL_RTNATIVETHREAD)
1304 return false;
1305 return hNativeWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1306}
1307
1308
1309/**
1310 * Checks if the caller is one of the read owners of the critical section.
1311 *
1312 * @note !CAUTION! This API doesn't work reliably if lock validation isn't
1313 * enabled. Meaning, the answer is not trustworhty unless
1314 * RT_LOCK_STRICT or PDMCRITSECTRW_STRICT was defined at build time.
1315 * Also, make sure you do not use RTCRITSECTRW_FLAGS_NO_LOCK_VAL when
1316 * creating the semaphore. And finally, if you used a locking class,
1317 * don't disable deadlock detection by setting cMsMinDeadlock to
1318 * RT_INDEFINITE_WAIT.
1319 *
1320 * In short, only use this for assertions.
1321 *
1322 * @returns @c true if reader, @c false if not.
1323 * @param pVM The cross context VM structure.
1324 * @param pThis Pointer to the read/write critical section.
1325 * @param fWannaHear What you'd like to hear when lock validation is not
1326 * available. (For avoiding asserting all over the place.)
1327 * @sa PDMCritSectRwIsWriteOwner, RTCritSectRwIsReadOwner.
1328 */
1329VMMDECL(bool) PDMCritSectRwIsReadOwner(PVMCC pVM, PPDMCRITSECTRW pThis, bool fWannaHear)
1330{
1331 /*
1332 * Validate handle.
1333 */
1334 AssertPtr(pThis);
1335 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, false);
1336
1337 /*
1338 * Inspect the state.
1339 */
1340 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1341 if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_WRITE << RTCSRW_DIR_SHIFT))
1342 {
1343 /*
1344 * It's in write mode, so we can only be a reader if we're also the
1345 * current writer.
1346 */
1347 RTNATIVETHREAD hWriter;
1348 ASMAtomicUoReadHandle(&pThis->s.Core.hNativeWriter, &hWriter);
1349 if (hWriter == NIL_RTNATIVETHREAD)
1350 return false;
1351 return hWriter == pdmCritSectRwGetNativeSelf(pVM, pThis);
1352 }
1353
1354 /*
1355 * Read mode. If there are no current readers, then we cannot be a reader.
1356 */
1357 if (!(u64State & RTCSRW_CNT_RD_MASK))
1358 return false;
1359
1360#if defined(PDMCRITSECTRW_STRICT) && defined(IN_RING3)
1361 /*
1362 * Ask the lock validator.
1363 * Note! It doesn't know everything, let's deal with that if it becomes an issue...
1364 */
1365 NOREF(fWannaHear);
1366 return RTLockValidatorRecSharedIsOwner(pThis->s.Core.pValidatorRead, NIL_RTTHREAD);
1367#else
1368 /*
1369 * Ok, we don't know, just tell the caller what he want to hear.
1370 */
1371 return fWannaHear;
1372#endif
1373}
1374
1375
1376/**
1377 * Gets the write recursion count.
1378 *
1379 * @returns The write recursion count (0 if bad critsect).
1380 * @param pThis Pointer to the read/write critical section.
1381 * @sa PDMCritSectRwGetWriterReadRecursion, PDMCritSectRwGetReadCount,
1382 * RTCritSectRwGetWriteRecursion.
1383 */
1384VMMDECL(uint32_t) PDMCritSectRwGetWriteRecursion(PPDMCRITSECTRW pThis)
1385{
1386 /*
1387 * Validate handle.
1388 */
1389 AssertPtr(pThis);
1390 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1391
1392 /*
1393 * Return the requested data.
1394 */
1395 return pThis->s.Core.cWriteRecursions;
1396}
1397
1398
1399/**
1400 * Gets the read recursion count of the current writer.
1401 *
1402 * @returns The read recursion count (0 if bad critsect).
1403 * @param pThis Pointer to the read/write critical section.
1404 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetReadCount,
1405 * RTCritSectRwGetWriterReadRecursion.
1406 */
1407VMMDECL(uint32_t) PDMCritSectRwGetWriterReadRecursion(PPDMCRITSECTRW pThis)
1408{
1409 /*
1410 * Validate handle.
1411 */
1412 AssertPtr(pThis);
1413 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1414
1415 /*
1416 * Return the requested data.
1417 */
1418 return pThis->s.Core.cWriterReads;
1419}
1420
1421
1422/**
1423 * Gets the current number of reads.
1424 *
1425 * This includes all read recursions, so it might be higher than the number of
1426 * read owners. It does not include reads done by the current writer.
1427 *
1428 * @returns The read count (0 if bad critsect).
1429 * @param pThis Pointer to the read/write critical section.
1430 * @sa PDMCritSectRwGetWriteRecursion, PDMCritSectRwGetWriterReadRecursion,
1431 * RTCritSectRwGetReadCount.
1432 */
1433VMMDECL(uint32_t) PDMCritSectRwGetReadCount(PPDMCRITSECTRW pThis)
1434{
1435 /*
1436 * Validate input.
1437 */
1438 AssertPtr(pThis);
1439 AssertReturn(pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC, 0);
1440
1441 /*
1442 * Return the requested data.
1443 */
1444 uint64_t u64State = ASMAtomicReadU64(&pThis->s.Core.u64State);
1445 if ((u64State & RTCSRW_DIR_MASK) != (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT))
1446 return 0;
1447 return (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT;
1448}
1449
1450
1451/**
1452 * Checks if the read/write critical section is initialized or not.
1453 *
1454 * @retval true if initialized.
1455 * @retval false if not initialized.
1456 * @param pThis Pointer to the read/write critical section.
1457 * @sa PDMCritSectIsInitialized, RTCritSectRwIsInitialized.
1458 */
1459VMMDECL(bool) PDMCritSectRwIsInitialized(PCPDMCRITSECTRW pThis)
1460{
1461 AssertPtr(pThis);
1462 return pThis->s.Core.u32Magic == RTCRITSECTRW_MAGIC;
1463}
1464
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette