VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/timer-r0drv-nt.cpp@ 92857

最後變更 在這個檔案從92857是 92857,由 vboxsync 提交於 3 年 前

IPRT/timer-r0drv-nt.cpp: Implemented RTTimerChangeInterval, improved high-res accuracy by using KeQueryInterruptTimePrecise, fixed potentially (unlikely) incorrect master DPC pointer when reschduling omni timers, and adjusted the catchup logic.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 34.8 KB
 
1/* $Id: timer-r0drv-nt.cpp 92857 2021-12-10 00:43:03Z vboxsync $ */
2/** @file
3 * IPRT - Timers, Ring-0 Driver, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-nt-kernel.h"
32
33#include <iprt/timer.h>
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/assert.h>
39#include <iprt/mem.h>
40#include <iprt/thread.h>
41
42#include "internal-r0drv-nt.h"
43#include "internal/magics.h"
44
45
46/*********************************************************************************************************************************
47* Defined Constants And Macros *
48*********************************************************************************************************************************/
49/** This seems to provide better accuracy. */
50#define RTR0TIMER_NT_MANUAL_RE_ARM 1
51
52#if !defined(IN_GUEST) || defined(DOXYGEN_RUNNING)
53/** This using high resolution timers introduced with windows 8.1. */
54# define RTR0TIMER_NT_HIGH_RES 1
55#endif
56
57
58/*********************************************************************************************************************************
59* Structures and Typedefs *
60*********************************************************************************************************************************/
61/**
62 * A sub timer structure.
63 *
64 * This is used for keeping the per-cpu tick and DPC object.
65 */
66typedef struct RTTIMERNTSUBTIMER
67{
68 /** The tick counter. */
69 uint64_t iTick;
70 /** Pointer to the parent timer. */
71 PRTTIMER pParent;
72 /** Thread active executing the worker function, NIL if inactive. */
73 RTNATIVETHREAD volatile hActiveThread;
74 /** The NT DPC object. */
75 KDPC NtDpc;
76} RTTIMERNTSUBTIMER;
77/** Pointer to a NT sub-timer structure. */
78typedef RTTIMERNTSUBTIMER *PRTTIMERNTSUBTIMER;
79
80/**
81 * The internal representation of an Linux timer handle.
82 */
83typedef struct RTTIMER
84{
85 /** Magic.
86 * This is RTTIMER_MAGIC, but changes to something else before the timer
87 * is destroyed to indicate clearly that thread should exit. */
88 uint32_t volatile u32Magic;
89 /** Suspend count down for single shot omnit timers. */
90 int32_t volatile cOmniSuspendCountDown;
91 /** Flag indicating the timer is suspended. */
92 bool volatile fSuspended;
93 /** Whether the timer must run on one specific CPU or not. */
94 bool fSpecificCpu;
95 /** Whether the timer must run on all CPUs or not. */
96 bool fOmniTimer;
97 /** The CPU it must run on if fSpecificCpu is set.
98 * The master CPU for an omni-timer. */
99 RTCPUID idCpu;
100 /** Callback. */
101 PFNRTTIMER pfnTimer;
102 /** User argument. */
103 void *pvUser;
104
105 /** @name Periodic scheduling / RTTimerChangeInterval.
106 * @{ */
107 /** Spinlock protecting the u64NanoInterval, iMasterTick, uNtStartTime,
108 * uNtDueTime and (at least for updating) fSuspended. */
109 KSPIN_LOCK Spinlock;
110 /** The timer interval. 0 if one-shot. */
111 uint64_t volatile u64NanoInterval;
112 /** The the current master tick. This does not necessarily follow that of
113 * the subtimer, as RTTimerChangeInterval may cause it to reset. */
114 uint64_t volatile iMasterTick;
115#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
116 /** The desired NT time of the first tick. */
117 uint64_t volatile uNtStartTime;
118 /** The current due time (absolute interrupt time). */
119 uint64_t volatile uNtDueTime;
120#endif
121 /** @} */
122
123 /** The NT timer object. */
124 KTIMER NtTimer;
125#ifdef RTR0TIMER_NT_HIGH_RES
126 /** High resolution timer. If not NULL, this must be used instead of NtTimer. */
127 PEX_TIMER pHighResTimer;
128#endif
129 /** The number of sub-timers. */
130 RTCPUID cSubTimers;
131 /** Sub-timers.
132 * Normally there is just one, but for RTTIMER_FLAGS_CPU_ALL this will contain
133 * an entry for all possible cpus. In that case the index will be the same as
134 * for the RTCpuSet. */
135 RTTIMERNTSUBTIMER aSubTimers[1];
136} RTTIMER;
137
138
139#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
140
141/**
142 * Get current NT interrupt time.
143 * @return NT interrupt time
144 */
145static uint64_t rtTimerNtQueryInterruptTime(void)
146{
147# ifdef RT_ARCH_AMD64
148 return KeQueryInterruptTime(); /* macro */
149# else
150 if (g_pfnrtKeQueryInterruptTime)
151 return g_pfnrtKeQueryInterruptTime();
152
153 /* NT4 */
154 ULARGE_INTEGER InterruptTime;
155 do
156 {
157 InterruptTime.HighPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High1Time;
158 InterruptTime.LowPart = ((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.LowPart;
159 } while (((KUSER_SHARED_DATA volatile *)SharedUserData)->InterruptTime.High2Time != (LONG)InterruptTime.HighPart);
160 return InterruptTime.QuadPart;
161# endif
162}
163
164/**
165 * Get current NT interrupt time, high resolution variant.
166 * @return High resolution NT interrupt time
167 */
168static uint64_t rtTimerNtQueryInterruptTimeHighRes(void)
169{
170 if (g_pfnrtKeQueryInterruptTimePrecise)
171 {
172 ULONG64 uQpcIgnored;
173 return g_pfnrtKeQueryInterruptTimePrecise(&uQpcIgnored);
174 }
175 return rtTimerNtQueryInterruptTime();
176}
177
178#endif /* RTR0TIMER_NT_MANUAL_RE_ARM */
179
180
181/**
182 * Worker for rtTimerNtRearmInternval that calculates the next due time.
183 *
184 * @returns The next due time (relative, so always negative).
185 * @param uNtNow The current time.
186 * @param uNtStartTime The start time of the timer.
187 * @param iTick The next tick number (zero being @a uNtStartTime).
188 * @param cNtInterval The timer interval in NT ticks.
189 * @param cNtNegDueSaftyMargin The due time safety margin in negative NT
190 * ticks.
191 * @param cNtMinNegInterval The minium interval to use when in catchup
192 * mode, also negative NT ticks.
193 */
194DECLINLINE(int64_t) rtTimerNtCalcNextDueTime(uint64_t uNtNow, uint64_t uNtStartTime, uint64_t iTick, uint64_t cNtInterval,
195 int32_t const cNtNegDueSaftyMargin, int32_t const cNtMinNegInterval)
196{
197 /* Calculate the actual time elapsed since timer start: */
198 int64_t iDueTime = uNtNow - uNtStartTime;
199 if (iDueTime < 0)
200 iDueTime = 0;
201
202 /* Now calculate the nominal time since timer start for the next tick: */
203 uint64_t const uNtNextRelStart = iTick * cNtInterval;
204
205 /* Calulate now much time we have to the next tick: */
206 iDueTime -= uNtNextRelStart;
207
208 /* If we haven't already overshot the due time, including some safety margin, we're good: */
209 if (iDueTime < cNtNegDueSaftyMargin)
210 return iDueTime;
211
212 /* Okay, we've overshot it and are in catchup mode: */
213 if (iDueTime < (int64_t)cNtInterval)
214 iDueTime = -(int64_t)(cNtInterval / 2); /* double time */
215 else if (iDueTime < (int64_t)(cNtInterval * 4))
216 iDueTime = -(int64_t)(cNtInterval / 4); /* quadruple time */
217 else
218 return cNtMinNegInterval;
219
220 /* Make sure we don't try intervals smaller than the minimum specified by the caller: */
221 if (iDueTime > cNtMinNegInterval)
222 iDueTime = cNtMinNegInterval;
223 return iDueTime;
224}
225
226/**
227 * Manually re-arms an internval timer.
228 *
229 * Turns out NT doesn't necessarily do a very good job at re-arming timers
230 * accurately, this is in part due to KeSetTimerEx API taking the interval in
231 * milliseconds.
232 *
233 * @param pTimer The timer.
234 * @param pMasterDpc The master timer DPC for passing to KeSetTimerEx
235 * in low-resolution mode. Ignored for high-res.
236 */
237static void rtTimerNtRearmInternval(PRTTIMER pTimer, PKDPC pMasterDpc)
238{
239#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
240 Assert(pTimer->u64NanoInterval);
241
242 /*
243 * For simplicity we acquire the spinlock for the whole operation.
244 * This should be perfectly fine as it doesn't change the IRQL.
245 */
246 Assert(KeGetCurrentIrql() >= DISPATCH_LEVEL);
247 KeAcquireSpinLockAtDpcLevel(&pTimer->Spinlock);
248
249 /*
250 * Make sure it wasn't suspended
251 */
252 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
253 {
254 uint64_t const cNtInterval = ASMAtomicUoReadU64(&pTimer->u64NanoInterval) / 100;
255 uint64_t const uNtStartTime = ASMAtomicUoReadU64(&pTimer->uNtStartTime);
256 uint64_t const iTick = ++pTimer->iMasterTick;
257
258 /*
259 * Calculate the deadline for the next timer tick and arm the timer.
260 * We always use a relative tick, i.e. negative DueTime value. This is
261 * crucial for the the high resolution API as it will bugcheck otherwise.
262 */
263 int64_t iDueTime;
264 uint64_t uNtNow;
265# ifdef RTR0TIMER_NT_HIGH_RES
266 if (pTimer->pHighResTimer)
267 {
268 /* Must use highres time here. */
269 uNtNow = rtTimerNtQueryInterruptTimeHighRes();
270 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
271 -100 /* 10us safety */, -2000 /* 200us min interval*/);
272 g_pfnrtExSetTimer(pTimer->pHighResTimer, iDueTime, 0, NULL);
273 }
274 else
275# endif
276 {
277 /* Expect interrupt time and timers to expire at the same time, so
278 don't use high res time api here. */
279 uNtNow = rtTimerNtQueryInterruptTime();
280 iDueTime = rtTimerNtCalcNextDueTime(uNtNow, uNtStartTime, iTick, cNtInterval,
281 -100 /* 10us safety */, -2500 /* 250us min interval*/); /** @todo use max interval here */
282 LARGE_INTEGER DueTime;
283 DueTime.QuadPart = iDueTime;
284 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
285 }
286
287 pTimer->uNtDueTime = uNtNow + -iDueTime;
288 }
289
290 KeReleaseSpinLockFromDpcLevel(&pTimer->Spinlock);
291#else
292 RT_NOREF(pTimer, iTick, pMasterDpc);
293#endif
294}
295
296
297/**
298 * Common timer callback worker for the non-omni timers.
299 *
300 * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer.
301 * @param pTimer The timer.
302 */
303static void rtTimerNtSimpleCallbackWorker(PRTTIMER pTimer)
304{
305 /*
306 * Check that we haven't been suspended before doing the callout.
307 */
308 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
309 && pTimer->u32Magic == RTTIMER_MAGIC)
310 {
311 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf());
312
313 if (!pTimer->u64NanoInterval)
314 ASMAtomicWriteBool(&pTimer->fSuspended, true);
315 uint64_t iTick = ++pTimer->aSubTimers[0].iTick;
316
317 pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick);
318
319 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
320 or change the interval, which would mean doing extra work. */
321 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
322 rtTimerNtRearmInternval(pTimer, &pTimer->aSubTimers[0].NtDpc);
323
324 ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD);
325 }
326}
327
328
329/**
330 * Timer callback function for the low-resolution non-omni timers.
331 *
332 * @param pDpc Pointer to the DPC.
333 * @param pvUser Pointer to our internal timer structure.
334 * @param SystemArgument1 Some system argument.
335 * @param SystemArgument2 Some system argument.
336 */
337static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
338{
339 PRTTIMER pTimer = (PRTTIMER)pvUser;
340 AssertPtr(pTimer);
341#ifdef RT_STRICT
342 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
343 RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
344#endif
345
346 rtTimerNtSimpleCallbackWorker(pTimer);
347
348 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
349}
350
351
352#ifdef RTR0TIMER_NT_HIGH_RES
353/**
354 * Timer callback function for the high-resolution non-omni timers.
355 *
356 * @param pExTimer The windows timer.
357 * @param pvUser Pointer to our internal timer structure.
358 */
359static void _stdcall rtTimerNtHighResSimpleCallback(PEX_TIMER pExTimer, void *pvUser)
360{
361 PRTTIMER pTimer = (PRTTIMER)pvUser;
362 AssertPtr(pTimer);
363 Assert(pTimer->pHighResTimer == pExTimer);
364# ifdef RT_STRICT
365 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
366 RTAssertMsg2Weak("rtTimerNtHighResSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
367# endif
368
369 /* If we're not on the desired CPU, trigger the DPC. That will rearm the
370 timer and such. */
371 if ( !pTimer->fSpecificCpu
372 || pTimer->idCpu == RTMpCpuId())
373 rtTimerNtSimpleCallbackWorker(pTimer);
374 else
375 KeInsertQueueDpc(&pTimer->aSubTimers[0].NtDpc, 0, 0);
376
377 RT_NOREF(pExTimer);
378}
379#endif /* RTR0TIMER_NT_HIGH_RES */
380
381
382/**
383 * The slave DPC callback for an omni timer.
384 *
385 * @param pDpc The DPC object.
386 * @param pvUser Pointer to the sub-timer.
387 * @param SystemArgument1 Some system stuff.
388 * @param SystemArgument2 Some system stuff.
389 */
390static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
391{
392 PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
393 PRTTIMER pTimer = pSubTimer->pParent;
394
395 AssertPtr(pTimer);
396#ifdef RT_STRICT
397 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
398 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
399 int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
400 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
401 RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
402#endif
403
404 /*
405 * Check that we haven't been suspended before doing the callout.
406 */
407 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
408 && pTimer->u32Magic == RTTIMER_MAGIC)
409 {
410 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
411
412 if (!pTimer->u64NanoInterval)
413 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
414 ASMAtomicWriteBool(&pTimer->fSuspended, true);
415
416 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
417
418 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
419 }
420
421 NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2);
422}
423
424
425/**
426 * Common timer callback worker for omni-timers.
427 *
428 * This is responsible for queueing the DPCs for the other CPUs and
429 * perform the callback on the CPU on which it is called.
430 *
431 * @param pTimer The timer.
432 * @param pSubTimer The sub-timer of the calling CPU.
433 * @param iCpuSelf The set index of the CPU we're running on.
434 */
435static void rtTimerNtOmniMasterCallbackWorker(PRTTIMER pTimer, PRTTIMERNTSUBTIMER pSubTimer, int iCpuSelf)
436{
437 /*
438 * Check that we haven't been suspended before scheduling the other DPCs
439 * and doing the callout.
440 */
441 if ( !ASMAtomicUoReadBool(&pTimer->fSuspended)
442 && pTimer->u32Magic == RTTIMER_MAGIC)
443 {
444 RTCPUSET OnlineSet;
445 RTMpGetOnlineSet(&OnlineSet);
446
447 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf());
448
449 if (pTimer->u64NanoInterval)
450 {
451 /*
452 * Recurring timer.
453 */
454 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
455 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
456 && iCpuSelf != iCpu)
457 KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0);
458
459 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
460
461 /* We re-arm the timer after calling pfnTimer, as it may stop the timer
462 or change the interval, which would mean doing extra work. */
463 if (!pTimer->fSuspended && pTimer->u64NanoInterval)
464 rtTimerNtRearmInternval(pTimer, &pSubTimer->NtDpc);
465 }
466 else
467 {
468 /*
469 * Single shot timers gets complicated wrt to fSuspended maintance.
470 */
471 uint32_t cCpus = 0;
472 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
473 if (RTCpuSetIsMemberByIndex(&OnlineSet, iCpu))
474 cCpus++;
475 ASMAtomicAddS32(&pTimer->cOmniSuspendCountDown, cCpus);
476
477 for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++)
478 if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu)
479 && iCpuSelf != iCpu)
480 if (!KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0))
481 ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown); /* already queued and counted. */
482
483 if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0)
484 ASMAtomicWriteBool(&pTimer->fSuspended, true);
485
486 pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick);
487 }
488
489 ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD);
490 }
491}
492
493
494/**
495 * The timer callback for an omni-timer, low-resolution.
496 *
497 * @param pDpc The DPC object.
498 * @param pvUser Pointer to the sub-timer.
499 * @param SystemArgument1 Some system stuff.
500 * @param SystemArgument2 Some system stuff.
501 */
502static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
503{
504 PRTTIMERNTSUBTIMER const pSubTimer = (PRTTIMERNTSUBTIMER)pvUser;
505 PRTTIMER const pTimer = pSubTimer->pParent;
506 RTCPUID idCpu = RTMpCpuId();
507 int const iCpuSelf = RTMpCpuIdToSetIndex(idCpu);
508
509 AssertPtr(pTimer);
510#ifdef RT_STRICT
511 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
512 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
513 /* We must be called on the master CPU or the tick variable goes south. */
514 if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf)
515 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]);
516 if (pTimer->idCpu != idCpu)
517 RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: pTimer->idCpu=%d vs idCpu=%d\n", pTimer->idCpu, idCpu);
518#endif
519
520 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
521
522 RT_NOREF(pDpc, SystemArgument1, SystemArgument2);
523}
524
525
526#ifdef RTR0TIMER_NT_HIGH_RES
527/**
528 * The timer callback for an high-resolution omni-timer.
529 *
530 * @param pExTimer The windows timer.
531 * @param pvUser Pointer to our internal timer structure.
532 */
533static void __stdcall rtTimerNtHighResOmniCallback(PEX_TIMER pExTimer, void *pvUser)
534{
535 PRTTIMER const pTimer = (PRTTIMER)pvUser;
536 int const iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId());
537 PRTTIMERNTSUBTIMER const pSubTimer = &pTimer->aSubTimers[iCpuSelf];
538
539 AssertPtr(pTimer);
540 Assert(pTimer->pHighResTimer == pExTimer);
541# ifdef RT_STRICT
542 if (KeGetCurrentIrql() < DISPATCH_LEVEL)
543 RTAssertMsg2Weak("rtTimerNtHighResOmniCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL);
544# endif
545
546 rtTimerNtOmniMasterCallbackWorker(pTimer, pSubTimer, iCpuSelf);
547
548 RT_NOREF(pExTimer);
549}
550#endif /* RTR0TIMER_NT_HIGH_RES */
551
552
553RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First)
554{
555 /*
556 * Validate.
557 */
558 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
559 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
560
561 /*
562 * The operation is protected by the spinlock.
563 */
564 KIRQL bSavedIrql;
565 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
566
567 /*
568 * Check the state.
569 */
570 if (ASMAtomicUoReadBool(&pTimer->fSuspended))
571 { /* likely */ }
572 else
573 {
574 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
575 return VERR_TIMER_ACTIVE;
576 }
577 if ( !pTimer->fSpecificCpu
578 || RTMpIsCpuOnline(pTimer->idCpu))
579 { /* likely */ }
580 else
581 {
582 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
583 return VERR_CPU_OFFLINE;
584 }
585
586 /*
587 * Do the starting.
588 */
589#ifndef RTR0TIMER_NT_MANUAL_RE_ARM
590 /* Calculate the interval time: */
591 uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */
592 ULONG ulInterval = (ULONG)u64Interval;
593 if (ulInterval != u64Interval)
594 ulInterval = MAXLONG;
595 else if (!ulInterval && pTimer->u64NanoInterval)
596 ulInterval = 1;
597#endif
598
599 /* Translate u64First to a DueTime: */
600 LARGE_INTEGER DueTime;
601 DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */
602 if (!DueTime.QuadPart)
603 DueTime.QuadPart = -10; /* 1us */
604
605 /* Reset tick counters: */
606 unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1;
607 for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++)
608 pTimer->aSubTimers[iCpu].iTick = 0;
609 pTimer->iMasterTick = 0;
610
611 /* Update timer state: */
612#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
613 pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + -DueTime.QuadPart;
614#endif
615 ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0);
616 ASMAtomicWriteBool(&pTimer->fSuspended, false);
617
618 /*
619 * Finally start the NT timer.
620 *
621 * We do this without holding the spinlock to err on the side of
622 * caution in case ExSetTimer or KeSetTimerEx ever should have the idea
623 * of running the callback before returning.
624 */
625 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
626
627#ifdef RTR0TIMER_NT_HIGH_RES
628 if (pTimer->pHighResTimer)
629 {
630# ifdef RTR0TIMER_NT_MANUAL_RE_ARM
631 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, 0, NULL);
632# else
633 g_pfnrtExSetTimer(pTimer->pHighResTimer, DueTime.QuadPart, RT_MIN(pTimer->u64NanoInterval / 100, MAXLONG), NULL);
634# endif
635 }
636 else
637#endif
638 {
639 PKDPC const pMasterDpc = &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc;
640#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
641 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc);
642#else
643 KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc);
644#endif
645 }
646 return VINF_SUCCESS;
647}
648
649
650/**
651 * Worker function that stops an active timer.
652 *
653 * Shared by RTTimerStop and RTTimerDestroy.
654 *
655 * @param pTimer The active timer.
656 */
657static int rtTimerNtStopWorker(PRTTIMER pTimer)
658{
659 /*
660 * Update the state from with the spinlock context.
661 */
662 KIRQL bSavedIrql;
663 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
664
665 bool const fWasSuspended = ASMAtomicXchgBool(&pTimer->fSuspended, true);
666
667 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
668 if (!fWasSuspended)
669 {
670 /*
671 * We should cacnel the timer and dequeue DPCs.
672 */
673#ifdef RTR0TIMER_NT_HIGH_RES
674 if (pTimer->pHighResTimer)
675 g_pfnrtExCancelTimer(pTimer->pHighResTimer, NULL);
676 else
677#endif
678 KeCancelTimer(&pTimer->NtTimer);
679
680 for (RTCPUID iCpu = 0; iCpu < pTimer->cSubTimers; iCpu++)
681 KeRemoveQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc);
682 return VINF_SUCCESS;
683 }
684 return VERR_TIMER_SUSPENDED;
685}
686
687
688RTDECL(int) RTTimerStop(PRTTIMER pTimer)
689{
690 /*
691 * Validate.
692 */
693 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
694 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
695
696 /*
697 * Call the worker we share with RTTimerDestroy.
698 */
699 return rtTimerNtStopWorker(pTimer);
700}
701
702
703RTDECL(int) RTTimerChangeInterval(PRTTIMER pTimer, uint64_t u64NanoInterval)
704{
705 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
706 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
707
708 /*
709 * We do all the state changes while holding the spinlock.
710 */
711 int rc = VINF_SUCCESS;
712 KIRQL bSavedIrql;
713 KeAcquireSpinLock(&pTimer->Spinlock, &bSavedIrql);
714
715 /*
716 * When the timer isn't running, this is an simple job:
717 */
718 if (!ASMAtomicUoReadBool(&pTimer->fSuspended))
719 pTimer->u64NanoInterval = u64NanoInterval;
720 else
721 {
722 /*
723 * We only implement changing the interval in RTR0TIMER_NT_MANUAL_RE_ARM
724 * mode right now. We typically let the new interval take effect after
725 * the next timer callback, unless that's too far ahead.
726 */
727#ifdef RTR0TIMER_NT_MANUAL_RE_ARM
728 pTimer->u64NanoInterval = u64NanoInterval;
729 pTimer->iMasterTick = 0;
730# ifdef RTR0TIMER_NT_HIGH_RES
731 uint64_t const uNtNow = pTimer->pHighResTimer ? rtTimerNtQueryInterruptTimeHighRes() : rtTimerNtQueryInterruptTime();
732# else
733 uint64_t const uNtNow = rtTimerNtQueryInterruptTime();
734# endif
735 if (uNtNow >= pTimer->uNtDueTime)
736 pTimer->uNtStartTime = uNtNow;
737 else
738 {
739 pTimer->uNtStartTime = pTimer->uNtDueTime;
740
741 /*
742 * Re-arm the timer if the next DueTime is both more than 1.25 new
743 * intervals and at least 0.5 ms ahead.
744 */
745 uint64_t cNtToNext = pTimer->uNtDueTime - uNtNow;
746 if ( cNtToNext >= RT_NS_1MS / 2 / 100 /* 0.5 ms */
747 && cNtToNext * 100 > u64NanoInterval + u64NanoInterval / 4)
748 {
749 pTimer->uNtStartTime = pTimer->uNtDueTime = uNtNow + u64NanoInterval / 100;
750# ifdef RTR0TIMER_NT_HIGH_RES
751 if (pTimer->pHighResTimer)
752 g_pfnrtExSetTimer(pTimer->pHighResTimer, -(int64_t)u64NanoInterval / 100, 0, NULL);
753 else
754# endif
755 {
756 LARGE_INTEGER DueTime;
757 DueTime.QuadPart = -(int64_t)u64NanoInterval / 100;
758 KeSetTimerEx(&pTimer->NtTimer, DueTime, 0,
759 &pTimer->aSubTimers[pTimer->fOmniTimer ? RTMpCpuIdToSetIndex(pTimer->idCpu) : 0].NtDpc);
760 }
761 }
762 }
763#else
764 rc = VERR_NOT_SUPPORTED;
765#endif
766 }
767
768 KeReleaseSpinLock(&pTimer->Spinlock, bSavedIrql);
769
770 return rc;
771}
772
773
774RTDECL(int) RTTimerDestroy(PRTTIMER pTimer)
775{
776 /* It's ok to pass NULL pointer. */
777 if (pTimer == /*NIL_RTTIMER*/ NULL)
778 return VINF_SUCCESS;
779 AssertPtrReturn(pTimer, VERR_INVALID_HANDLE);
780 AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE);
781
782 /*
783 * We do not support destroying a timer from the callback because it is
784 * not 101% safe since we cannot flush DPCs. Solaris has the same restriction.
785 */
786 AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT);
787
788 /*
789 * Invalidate the timer, stop it if it's running and finally free up the memory.
790 */
791 ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC);
792 rtTimerNtStopWorker(pTimer);
793
794#ifdef RTR0TIMER_NT_HIGH_RES
795 /*
796 * Destroy the high-resolution timer before flushing DPCs.
797 */
798 if (pTimer->pHighResTimer)
799 {
800 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, TRUE /*fCancel*/, TRUE /*fWait*/, NULL);
801 pTimer->pHighResTimer = NULL;
802 }
803#endif
804
805 /*
806 * Flush DPCs to be on the safe side.
807 */
808 if (g_pfnrtNtKeFlushQueuedDpcs)
809 g_pfnrtNtKeFlushQueuedDpcs();
810
811 RTMemFree(pTimer);
812
813 return VINF_SUCCESS;
814}
815
816
817RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser)
818{
819 *ppTimer = NULL;
820
821 /*
822 * Validate flags.
823 */
824 if (!RTTIMER_FLAGS_ARE_VALID(fFlags))
825 return VERR_INVALID_PARAMETER;
826 if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC)
827 && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL
828 && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK)))
829 return VERR_CPU_NOT_FOUND;
830
831 /*
832 * Allocate the timer handler.
833 */
834 RTCPUID cSubTimers = 1;
835 if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL)
836 {
837 cSubTimers = RTMpGetMaxCpuId() + 1;
838 Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */
839 }
840
841 PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_UOFFSETOF_DYN(RTTIMER, aSubTimers[cSubTimers]));
842 if (!pTimer)
843 return VERR_NO_MEMORY;
844
845 /*
846 * Initialize it.
847 *
848 * Note! The difference between a SynchronizationTimer and a NotificationTimer
849 * (KeInitializeTimer) is, as far as I can gather, only that the former
850 * will wake up exactly one waiting thread and the latter will wake up
851 * everyone. Since we don't do any waiting on the NtTimer, that is not
852 * relevant to us.
853 */
854 pTimer->u32Magic = RTTIMER_MAGIC;
855 pTimer->cOmniSuspendCountDown = 0;
856 pTimer->fSuspended = true;
857 pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL;
858 pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL;
859 pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID;
860 pTimer->cSubTimers = cSubTimers;
861 pTimer->pfnTimer = pfnTimer;
862 pTimer->pvUser = pvUser;
863 KeInitializeSpinLock(&pTimer->Spinlock);
864 pTimer->u64NanoInterval = u64NanoInterval;
865
866 int rc = VINF_SUCCESS;
867#ifdef RTR0TIMER_NT_HIGH_RES
868 if ( (fFlags & RTTIMER_FLAGS_HIGH_RES)
869 && RTTimerCanDoHighResolution())
870 {
871 pTimer->pHighResTimer = g_pfnrtExAllocateTimer(pTimer->fOmniTimer ? rtTimerNtHighResOmniCallback
872 : rtTimerNtHighResSimpleCallback, pTimer,
873 EX_TIMER_HIGH_RESOLUTION | EX_TIMER_NOTIFICATION);
874 if (!pTimer->pHighResTimer)
875 rc = VERR_OUT_OF_RESOURCES;
876 }
877 else
878#endif
879 {
880 if (g_pfnrtKeInitializeTimerEx) /** @todo just call KeInitializeTimer. */
881 g_pfnrtKeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer);
882 else
883 KeInitializeTimer(&pTimer->NtTimer);
884 }
885 if (RT_SUCCESS(rc))
886 {
887 if (pTimer->fOmniTimer)
888 {
889 /*
890 * Initialize the per-cpu "sub-timers", select the first online cpu to be
891 * the master. This ASSUMES that no cpus will ever go offline.
892 *
893 * Note! For the high-resolution scenario, all DPC callbacks are slaves as
894 * we have a dedicated timer callback, set above during allocation,
895 * and don't control which CPU it (rtTimerNtHighResOmniCallback) is
896 * called on.
897 */
898 pTimer->iMasterTick = 0;
899 pTimer->idCpu = NIL_RTCPUID;
900 for (unsigned iCpu = 0; iCpu < cSubTimers && RT_SUCCESS(rc); iCpu++)
901 {
902 pTimer->aSubTimers[iCpu].iTick = 0;
903 pTimer->aSubTimers[iCpu].pParent = pTimer;
904
905 if ( pTimer->idCpu == NIL_RTCPUID
906 && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu)))
907 {
908 pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu);
909#ifdef RTR0TIMER_NT_HIGH_RES
910 if (pTimer->pHighResTimer)
911 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
912 else
913#endif
914 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]);
915 }
916 else
917 KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]);
918 if (g_pfnrtKeSetImportanceDpc)
919 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance);
920 rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, iCpu);
921 }
922 Assert(pTimer->idCpu != NIL_RTCPUID);
923 }
924 else
925 {
926 /*
927 * Initialize the first "sub-timer", target the DPC on a specific processor
928 * if requested to do so.
929 */
930 pTimer->iMasterTick = 0;
931 pTimer->aSubTimers[0].iTick = 0;
932 pTimer->aSubTimers[0].pParent = pTimer;
933
934 KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer);
935 if (g_pfnrtKeSetImportanceDpc)
936 g_pfnrtKeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance);
937 if (pTimer->fSpecificCpu)
938 rc = rtMpNtSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu);
939 }
940 if (RT_SUCCESS(rc))
941 {
942 *ppTimer = pTimer;
943 return VINF_SUCCESS;
944 }
945
946#ifdef RTR0TIMER_NT_HIGH_RES
947 if (pTimer->pHighResTimer)
948 {
949 g_pfnrtExDeleteTimer(pTimer->pHighResTimer, FALSE, FALSE, NULL);
950 pTimer->pHighResTimer = NULL;
951 }
952#endif
953 }
954
955 RTMemFree(pTimer);
956 return rc;
957}
958
959
960RTDECL(int) RTTimerRequestSystemGranularity(uint32_t u32Request, uint32_t *pu32Granted)
961{
962 if (!g_pfnrtNtExSetTimerResolution)
963 return VERR_NOT_SUPPORTED;
964
965 ULONG ulGranted = g_pfnrtNtExSetTimerResolution(u32Request / 100, TRUE);
966 if (pu32Granted)
967 *pu32Granted = ulGranted * 100; /* NT -> ns */
968 return VINF_SUCCESS;
969}
970
971
972RTDECL(int) RTTimerReleaseSystemGranularity(uint32_t u32Granted)
973{
974 if (!g_pfnrtNtExSetTimerResolution)
975 return VERR_NOT_SUPPORTED;
976
977 g_pfnrtNtExSetTimerResolution(0 /* ignored */, FALSE);
978 NOREF(u32Granted);
979 return VINF_SUCCESS;
980}
981
982
983RTDECL(bool) RTTimerCanDoHighResolution(void)
984{
985#ifdef RTR0TIMER_NT_HIGH_RES
986 return g_pfnrtExAllocateTimer != NULL
987 && g_pfnrtExDeleteTimer != NULL
988 && g_pfnrtExSetTimer != NULL
989 && g_pfnrtExCancelTimer != NULL;
990#else
991 return false;
992#endif
993}
994
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette