VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/linux/semevent-linux.cpp@ 92777

最後變更 在這個檔案從92777是 92777,由 vboxsync 提交於 3 年 前

IPRT/semevent-linux: Implemented missing RTSemEventWaitEx and made use of FUTEX_WAIT_BITSET to avoid absolute to relative time conversion on kernels >= 2.6.25. bugref:10138

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 22.9 KB
 
1/* $Id: semevent-linux.cpp 92777 2021-12-07 01:25:16Z vboxsync $ */
2/** @file
3 * IPRT - Event Semaphore, Linux (2.6.0 and later).
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27#include <features.h>
28#if __GLIBC_PREREQ(2,6) && !defined(IPRT_WITH_FUTEX_BASED_SEMS)
29
30/*
31 * glibc 2.6 fixed a serious bug in the mutex implementation. We wrote this
32 * linux specific event semaphores code in order to work around the bug. We
33 * will fall back on the pthread-based implementation if glibc is known to
34 * contain the bug fix.
35 *
36 * The external reference to epoll_pwait is a hack which prevents that we link
37 * against glibc < 2.6.
38 */
39# include "../posix/semevent-posix.cpp"
40__asm__ (".global epoll_pwait");
41
42#else /* glibc < 2.6 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#include <iprt/semaphore.h>
49#include "internal/iprt.h"
50
51#include <iprt/asm.h>
52#include <iprt/assert.h>
53#include <iprt/err.h>
54#include <iprt/lockvalidator.h>
55#include <iprt/mem.h>
56#include <iprt/time.h>
57#include "internal/magics.h"
58#include "internal/mem.h"
59#include "internal/strict.h"
60
61#include <errno.h>
62#include <limits.h>
63#include <pthread.h>
64#include <unistd.h>
65#include <sys/time.h>
66#include <sys/syscall.h>
67#if 0 /* With 2.6.17 futex.h has become C++ unfriendly. */
68# include <linux/futex.h>
69#else
70# define FUTEX_WAIT 0
71# define FUTEX_WAKE 1
72# define FUTEX_WAIT_BITSET 9 /**< @since 2.6.25 - uses absolute timeout. */
73#endif
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79/**
80 * Linux (single wakup) event semaphore.
81 */
82struct RTSEMEVENTINTERNAL
83{
84 /** Magic value. */
85 intptr_t volatile iMagic;
86 /** The futex state variable.
87 * 0 means not signalled.
88 1 means signalled. */
89 uint32_t volatile fSignalled;
90 /** The number of waiting threads */
91 int32_t volatile cWaiters;
92#ifdef RTSEMEVENT_STRICT
93 /** Signallers. */
94 RTLOCKVALRECSHRD Signallers;
95 /** Indicates that lock validation should be performed. */
96 bool volatile fEverHadSignallers;
97#endif
98 /** The creation flags. */
99 uint32_t fFlags;
100};
101
102
103/*********************************************************************************************************************************
104* Global Variables *
105*********************************************************************************************************************************/
106static int volatile g_fCanUseWaitBitSet = -1;
107
108
109/**
110 * Wrapper for the futex syscall.
111 */
112static long sys_futex(uint32_t volatile *uaddr, int op, int val, struct timespec *utime, int32_t *uaddr2, int val3)
113{
114 errno = 0;
115 long rc = syscall(__NR_futex, uaddr, op, val, utime, uaddr2, val3);
116 if (rc < 0)
117 {
118 Assert(rc == -1);
119 rc = -errno;
120 }
121 return rc;
122}
123
124
125DECLINLINE(void) rtSemLinuxCheckForFutexWaitBitSetSlow(int volatile *pfCanUseWaitBitSet)
126{
127 uint32_t uTestVar = UINT32_MAX;
128 long rc = sys_futex(&uTestVar, FUTEX_WAIT_BITSET, UINT32_C(0xf0f0f0f0), NULL, NULL, UINT32_MAX);
129 *pfCanUseWaitBitSet = rc == -EAGAIN;
130 AssertMsg(rc == -ENOSYS || rc == -EAGAIN, ("%d\n", rc));
131}
132
133
134DECLINLINE(void) rtSemLinuxCheckForFutexWaitBitSet(int volatile *pfCanUseWaitBitSet)
135{
136 if (*pfCanUseWaitBitSet != -1)
137 { /* likely */ }
138 else
139 rtSemLinuxCheckForFutexWaitBitSetSlow(pfCanUseWaitBitSet);
140}
141
142
143
144RTDECL(int) RTSemEventCreate(PRTSEMEVENT phEventSem)
145{
146 return RTSemEventCreateEx(phEventSem, 0 /*fFlags*/, NIL_RTLOCKVALCLASS, NULL);
147}
148
149
150RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...)
151{
152 AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER);
153 Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL));
154
155 /*
156 * Make sure we know whether FUTEX_WAIT_BITSET works.
157 */
158 rtSemLinuxCheckForFutexWaitBitSet(&g_fCanUseWaitBitSet);
159#if defined(DEBUG_bird) && !defined(IN_GUEST)
160 Assert(g_fCanUseWaitBitSet == true);
161#endif
162
163 /*
164 * Allocate semaphore handle.
165 */
166 struct RTSEMEVENTINTERNAL *pThis;
167 if (!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK))
168 pThis = (struct RTSEMEVENTINTERNAL *)RTMemAlloc(sizeof(struct RTSEMEVENTINTERNAL));
169 else
170 pThis = (struct RTSEMEVENTINTERNAL *)rtMemBaseAlloc(sizeof(struct RTSEMEVENTINTERNAL));
171 if (pThis)
172 {
173 pThis->iMagic = RTSEMEVENT_MAGIC;
174 pThis->cWaiters = 0;
175 pThis->fSignalled = 0;
176 pThis->fFlags = fFlags;
177#ifdef RTSEMEVENT_STRICT
178 if (!pszNameFmt)
179 {
180 static uint32_t volatile s_iSemEventAnon = 0;
181 RTLockValidatorRecSharedInit(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis,
182 true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL),
183 "RTSemEvent-%u", ASMAtomicIncU32(&s_iSemEventAnon) - 1);
184 }
185 else
186 {
187 va_list va;
188 va_start(va, pszNameFmt);
189 RTLockValidatorRecSharedInitV(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis,
190 true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL),
191 pszNameFmt, va);
192 va_end(va);
193 }
194 pThis->fEverHadSignallers = false;
195#else
196 RT_NOREF(hClass, pszNameFmt);
197#endif
198
199 *phEventSem = pThis;
200 return VINF_SUCCESS;
201 }
202 return VERR_NO_MEMORY;
203}
204
205
206RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem)
207{
208 /*
209 * Validate input.
210 */
211 struct RTSEMEVENTINTERNAL *pThis = hEventSem;
212 if (pThis == NIL_RTSEMEVENT)
213 return VINF_SUCCESS;
214 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
215 AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);
216
217 /*
218 * Invalidate the semaphore and wake up anyone waiting on it.
219 */
220 ASMAtomicXchgSize(&pThis->iMagic, RTSEMEVENT_MAGIC | UINT32_C(0x80000000));
221 if (ASMAtomicXchgS32(&pThis->cWaiters, INT32_MIN / 2) > 0)
222 {
223 sys_futex(&pThis->fSignalled, FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
224 usleep(1000);
225 }
226
227 /*
228 * Free the semaphore memory and be gone.
229 */
230#ifdef RTSEMEVENT_STRICT
231 RTLockValidatorRecSharedDelete(&pThis->Signallers);
232#endif
233 if (!(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK))
234 RTMemFree(pThis);
235 else
236 rtMemBaseFree(pThis);
237 return VINF_SUCCESS;
238}
239
240
241RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem)
242{
243 /*
244 * Validate input.
245 */
246 struct RTSEMEVENTINTERNAL *pThis = hEventSem;
247 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
248 AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE);
249
250#ifdef RTSEMEVENT_STRICT
251 if (pThis->fEverHadSignallers)
252 {
253 int rc9 = RTLockValidatorRecSharedCheckSignaller(&pThis->Signallers, NIL_RTTHREAD);
254 if (RT_FAILURE(rc9))
255 return rc9;
256 }
257#endif
258
259 ASMAtomicWriteU32(&pThis->fSignalled, 1);
260 if (ASMAtomicReadS32(&pThis->cWaiters) < 1)
261 return VINF_SUCCESS;
262
263 /* somebody is waiting, try wake up one of them. */
264 long cWoken = sys_futex(&pThis->fSignalled, FUTEX_WAKE, 1, NULL, NULL, 0);
265 if (RT_LIKELY(cWoken >= 0))
266 return VINF_SUCCESS;
267
268 if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC))
269 return VERR_SEM_DESTROYED;
270
271 return VERR_INVALID_PARAMETER;
272}
273
274
275/**
276 * Performs an indefinite wait on the event.
277 */
278static int rtSemEventLinuxWaitIndefinite(struct RTSEMEVENTINTERNAL *pThis, uint32_t fFlags, PCRTLOCKVALSRCPOS pSrcPos)
279{
280 RT_NOREF_PV(pSrcPos);
281
282 /*
283 * Quickly check whether it's signaled and there are no other waiters.
284 */
285 uint32_t cWaiters = ASMAtomicIncS32(&pThis->cWaiters);
286 if ( cWaiters == 1
287 && ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
288 {
289 ASMAtomicDecS32(&pThis->cWaiters);
290 return VINF_SUCCESS;
291 }
292
293 /*
294 * The wait loop.
295 */
296#ifdef RTSEMEVENT_STRICT
297 RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
298 ? RTThreadSelfAutoAdopt()
299 : RTThreadSelf();
300#else
301 RTTHREAD hThreadSelf = RTThreadSelf();
302#endif
303 int rc = VINF_SUCCESS;
304 for (;;)
305 {
306#ifdef RTSEMEVENT_STRICT
307 if (pThis->fEverHadSignallers)
308 {
309 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
310 RT_INDEFINITE_WAIT, RTTHREADSTATE_EVENT, true);
311 if (RT_FAILURE(rc))
312 break;
313 }
314#endif
315 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
316 long lrc = sys_futex(&pThis->fSignalled, FUTEX_WAIT, 0, NULL /*pTimeout*/, NULL, 0);
317 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
318 if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC))
319 {
320 rc = VERR_SEM_DESTROYED;
321 break;
322 }
323
324 if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK))
325 {
326 /* successful wakeup or fSignalled > 0 in the meantime */
327 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
328 break;
329 }
330 else if (lrc == -ETIMEDOUT)
331 {
332 rc = VERR_TIMEOUT;
333 break;
334 }
335 else if (lrc == -EINTR)
336 {
337 if (fFlags & RTSEMWAIT_FLAGS_NORESUME)
338 {
339 rc = VERR_INTERRUPTED;
340 break;
341 }
342 }
343 else
344 {
345 /* this shouldn't happen! */
346 AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno));
347 rc = RTErrConvertFromErrno(lrc);
348 break;
349 }
350 }
351
352 ASMAtomicDecS32(&pThis->cWaiters);
353 return rc;
354}
355
356
357static int rtSemEventLinuxWaitPoll(struct RTSEMEVENTINTERNAL *pThis)
358{
359 /*
360 * What we do here is isn't quite fair to anyone else waiting on it, however
361 * it might not be as bad as all that for callers making repeated poll calls
362 * because they cannot block, as that would be a virtual wait but without the
363 * chance of a permanept queue position. So, I hope we can live with this.
364 */
365 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
366 return VINF_SUCCESS;
367 return VERR_TIMEOUT;
368}
369
370
371static int rtSemEventLinuxWaitTimed(struct RTSEMEVENTINTERNAL *pThis, uint32_t fFlags,
372 uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos)
373{
374 RT_NOREF_PV(pSrcPos);
375
376 /*
377 * Convert the timeout value.
378 */
379 int iWaitOp;
380 uint32_t uWaitVal3;
381 timespec TsTimeout;
382 uint64_t uAbsTimeout = uTimeout; /* Note! only relevant for relative waits (FUTEX_WAIT). */
383 if (fFlags & RTSEMWAIT_FLAGS_RELATIVE)
384 {
385 if (!uTimeout)
386 return rtSemEventLinuxWaitPoll(pThis);
387
388 if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
389 {
390 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t)
391 || uTimeout < (uint64_t)UINT32_MAX * RT_MS_1SEC)
392 {
393 TsTimeout.tv_sec = uTimeout / RT_MS_1SEC;
394 TsTimeout.tv_nsec = (uTimeout % RT_MS_1SEC) & RT_NS_1MS;
395 uAbsTimeout *= RT_NS_1MS;
396 }
397 else
398 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
399 }
400 else
401 {
402 Assert(fFlags & RTSEMWAIT_FLAGS_NANOSECS);
403 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t)
404 || uTimeout < (uint64_t)UINT32_MAX * RT_NS_1SEC)
405 {
406 TsTimeout.tv_sec = uTimeout / RT_NS_1SEC;
407 TsTimeout.tv_nsec = uTimeout % RT_NS_1SEC;
408 }
409 else
410 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
411 }
412
413 if (fFlags & RTSEMWAIT_FLAGS_RESUME)
414 uAbsTimeout += RTTimeNanoTS();
415
416 iWaitOp = FUTEX_WAIT;
417 uWaitVal3 = 0;
418 }
419 else
420 {
421 /* Absolute deadline: */
422 Assert(fFlags & RTSEMWAIT_FLAGS_ABSOLUTE);
423 if (g_fCanUseWaitBitSet == true)
424 {
425 if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
426 {
427 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t)
428 || uTimeout < (uint64_t)UINT32_MAX * RT_MS_1SEC)
429 {
430 TsTimeout.tv_sec = uTimeout / RT_MS_1SEC;
431 TsTimeout.tv_nsec = (uTimeout % RT_MS_1SEC) & RT_NS_1MS;
432 }
433 else
434 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
435 }
436 else
437 {
438 Assert(fFlags & RTSEMWAIT_FLAGS_NANOSECS);
439 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t)
440 || uTimeout < (uint64_t)UINT32_MAX * RT_NS_1SEC)
441 {
442 TsTimeout.tv_sec = uTimeout / RT_NS_1SEC;
443 TsTimeout.tv_nsec = uTimeout % RT_NS_1SEC;
444 }
445 else
446 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
447 }
448 iWaitOp = FUTEX_WAIT_BITSET;
449 uWaitVal3 = UINT32_MAX;
450 }
451 else
452 {
453 /* Recalculate it as an relative timeout: */
454 if (fFlags & RTSEMWAIT_FLAGS_MILLISECS)
455 {
456 if (uTimeout < UINT64_MAX / RT_NS_1MS)
457 uAbsTimeout = uTimeout *= RT_NS_1MS;
458 else
459 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
460 }
461
462 uint64_t const u64Now = RTTimeNanoTS();
463 if (u64Now < uTimeout)
464 uTimeout -= u64Now;
465 else
466 return rtSemEventLinuxWaitPoll(pThis);
467
468 if ( sizeof(TsTimeout.tv_sec) >= sizeof(uint64_t)
469 || uTimeout < (uint64_t)UINT32_MAX * RT_NS_1SEC)
470 {
471 TsTimeout.tv_sec = uTimeout / RT_NS_1SEC;
472 TsTimeout.tv_nsec = uTimeout % RT_NS_1SEC;
473 }
474 else
475 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
476
477 iWaitOp = FUTEX_WAIT;
478 uWaitVal3 = 0;
479 }
480 }
481
482 /*
483 * Quickly check whether it's signaled and there are no other waiters.
484 */
485 uint32_t cWaiters = ASMAtomicIncS32(&pThis->cWaiters);
486 if ( cWaiters == 1
487 && ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
488 {
489 ASMAtomicDecS32(&pThis->cWaiters);
490 return VINF_SUCCESS;
491 }
492
493 /*
494 * The wait loop.
495 */
496#ifdef RTSEMEVENT_STRICT
497 RTTHREAD hThreadSelf = !(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)
498 ? RTThreadSelfAutoAdopt()
499 : RTThreadSelf();
500#else
501 RTTHREAD hThreadSelf = RTThreadSelf();
502#endif
503 int rc = VINF_SUCCESS;
504 for (;;)
505 {
506#ifdef RTSEMEVENT_STRICT
507 if (pThis->fEverHadSignallers)
508 {
509 rc = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false,
510 iWaitOp == FUTEX_WAIT ? uTimeout / RT_NS_1MS : RT_MS_1HOUR /*whatever*/,
511 RTTHREADSTATE_EVENT, true);
512 if (RT_FAILURE(rc))
513 break;
514 }
515#endif
516 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT, true);
517 long lrc = sys_futex(&pThis->fSignalled, iWaitOp, 0, &TsTimeout, NULL, uWaitVal3);
518 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT);
519 if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC))
520 {
521 rc = VERR_SEM_DESTROYED;
522 break;
523 }
524
525 if (RT_LIKELY(lrc == 0 || lrc == -EWOULDBLOCK))
526 {
527 /* successful wakeup or fSignalled > 0 in the meantime */
528 if (ASMAtomicCmpXchgU32(&pThis->fSignalled, 0, 1))
529 break;
530 }
531 else if (lrc == -ETIMEDOUT)
532 {
533 rc = VERR_TIMEOUT;
534 break;
535 }
536 else if (lrc == -EINTR)
537 {
538 if (fFlags & RTSEMWAIT_FLAGS_NORESUME)
539 {
540 rc = VERR_INTERRUPTED;
541 break;
542 }
543 }
544 else
545 {
546 /* this shouldn't happen! */
547 AssertMsgFailed(("rc=%ld errno=%d\n", lrc, errno));
548 rc = RTErrConvertFromErrno(lrc);
549 break;
550 }
551
552 /* adjust the relative timeout */
553 if (iWaitOp == FUTEX_WAIT)
554 {
555 int64_t i64Diff = uAbsTimeout - RTTimeSystemNanoTS();
556 if (i64Diff < 1000)
557 {
558 rc = VERR_TIMEOUT;
559 break;
560 }
561 TsTimeout.tv_sec = (uint64_t)i64Diff / RT_NS_1SEC;
562 TsTimeout.tv_nsec = (uint64_t)i64Diff % RT_NS_1SEC;
563 }
564 }
565
566 ASMAtomicDecS32(&pThis->cWaiters);
567 return rc;
568}
569
570
571/**
572 * Internal wait worker function.
573 */
574DECLINLINE(int) rtSemEventLinuxWait(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos)
575{
576 /*
577 * Validate input.
578 */
579 struct RTSEMEVENTINTERNAL *pThis = hEventSem;
580 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
581 uint32_t fSignalled = pThis->fSignalled;
582 AssertReturn(fSignalled == false || fSignalled == true, VERR_INVALID_HANDLE);
583 AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER);
584
585 /*
586 * Timed or indefinite wait?
587 */
588 if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)
589 return rtSemEventLinuxWaitIndefinite(pThis, fFlags, pSrcPos);
590 return rtSemEventLinuxWaitTimed(hEventSem, fFlags, uTimeout, pSrcPos);
591}
592
593
594RTDECL(int) RTSemEventWait(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies)
595{
596 int rc;
597#ifndef RTSEMEVENT_STRICT
598 if (cMillies == RT_INDEFINITE_WAIT)
599 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, NULL);
600 else
601 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS,
602 cMillies, NULL);
603#else
604 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
605 if (cMillies == RT_INDEFINITE_WAIT)
606 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, &SrcPos);
607 else
608 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_RESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS,
609 cMillies, &SrcPos);
610#endif
611 Assert(rc != VERR_INTERRUPTED);
612 return rc;
613}
614
615
616RTDECL(int) RTSemEventWaitNoResume(RTSEMEVENT hEventSem, RTMSINTERVAL cMillies)
617{
618 int rc;
619#ifndef RTSEMEVENT_STRICT
620 if (cMillies == RT_INDEFINITE_WAIT)
621 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, NULL);
622 else
623 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS,
624 cMillies, NULL);
625#else
626 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
627 if (cMillies == RT_INDEFINITE_WAIT)
628 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_INDEFINITE, 0, &SrcPos);
629 else
630 rc = rtSemEventLinuxWait(hEventSem, RTSEMWAIT_FLAGS_NORESUME | RTSEMWAIT_FLAGS_RELATIVE | RTSEMWAIT_FLAGS_MILLISECS,
631 cMillies, &SrcPos);
632#endif
633 Assert(rc != VERR_INTERRUPTED);
634 return rc;
635}
636
637
638RTDECL(int) RTSemEventWaitEx(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout)
639{
640#ifndef RTSEMEVENT_STRICT
641 return rtSemEventLinuxWait(hEventSem, fFlags, uTimeout, NULL);
642#else
643 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
644 return rtSemEventLinuxWait(hEventSem, fFlags, uTimeout, &SrcPos);
645#endif
646}
647
648
649RTDECL(int) RTSemEventWaitExDebug(RTSEMEVENT hEventSem, uint32_t fFlags, uint64_t uTimeout,
650 RTHCUINTPTR uId, RT_SRC_POS_DECL)
651{
652 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
653 return rtSemEventLinuxWait(hEventSem, fFlags, uTimeout, &SrcPos);
654}
655
656
657RTDECL(uint32_t) RTSemEventGetResolution(void)
658{
659 /** @todo we have 1ns parameter resolution, but need to verify that this is what
660 * the kernel actually will use when setting the timer. Most likely
661 * it's rounded a little, but hopefully not to a multiple of HZ. */
662 return 1;
663}
664
665
666RTDECL(void) RTSemEventSetSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread)
667{
668#ifdef RTSEMEVENT_STRICT
669 struct RTSEMEVENTINTERNAL *pThis = hEventSem;
670 AssertPtrReturnVoid(pThis);
671 AssertReturnVoid(pThis->iMagic == RTSEMEVENT_MAGIC);
672
673 ASMAtomicWriteBool(&pThis->fEverHadSignallers, true);
674 RTLockValidatorRecSharedResetOwner(&pThis->Signallers, hThread, NULL);
675#else
676 RT_NOREF(hEventSem, hThread);
677#endif
678}
679
680
681RTDECL(void) RTSemEventAddSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread)
682{
683#ifdef RTSEMEVENT_STRICT
684 struct RTSEMEVENTINTERNAL *pThis = hEventSem;
685 AssertPtrReturnVoid(pThis);
686 AssertReturnVoid(pThis->iMagic == RTSEMEVENT_MAGIC);
687
688 ASMAtomicWriteBool(&pThis->fEverHadSignallers, true);
689 RTLockValidatorRecSharedAddOwner(&pThis->Signallers, hThread, NULL);
690#else
691 RT_NOREF(hEventSem, hThread);
692#endif
693}
694
695
696RTDECL(void) RTSemEventRemoveSignaller(RTSEMEVENT hEventSem, RTTHREAD hThread)
697{
698#ifdef RTSEMEVENT_STRICT
699 struct RTSEMEVENTINTERNAL *pThis = hEventSem;
700 AssertPtrReturnVoid(pThis);
701 AssertReturnVoid(pThis->iMagic == RTSEMEVENT_MAGIC);
702
703 RTLockValidatorRecSharedRemoveOwner(&pThis->Signallers, hThread);
704#else
705 RT_NOREF(hEventSem, hThread);
706#endif
707}
708
709#endif /* glibc < 2.6 || IPRT_WITH_FUTEX_BASED_SEMS */
710
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette