VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp@ 80268

最後變更 在這個檔案從80268是 80268,由 vboxsync 提交於 6 年 前

VMM: Refactoring VMMAll/* to use VMCC & VMMCPUCC. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 36.2 KB
 
1/* $Id: TMAllVirtual.cpp 80268 2019-08-14 11:25:13Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, Virtual Time, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define VBOX_BUGREF_9217_PART_I
23#define LOG_GROUP LOG_GROUP_TM
24#include <VBox/vmm/tm.h>
25#include <VBox/vmm/dbgftrace.h>
26#ifdef IN_RING3
27# ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29# endif
30# include <iprt/thread.h>
31#endif
32#include "TMInternal.h"
33#include <VBox/vmm/vmcc.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/sup.h>
38
39#include <iprt/time.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/asm-math.h>
43
44
45
46/**
47 * @interface_method_impl{RTTIMENANOTSDATA,pfnBad}
48 */
49DECLCALLBACK(DECLEXPORT(void)) tmVirtualNanoTSBad(PRTTIMENANOTSDATA pData, uint64_t u64NanoTS, uint64_t u64DeltaPrev,
50 uint64_t u64PrevNanoTS)
51{
52 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
53 pData->cBadPrev++;
54 if ((int64_t)u64DeltaPrev < 0)
55 LogRel(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p\n",
56 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
57 else
58 Log(("TM: u64DeltaPrev=%RI64 u64PrevNanoTS=0x%016RX64 u64NanoTS=0x%016RX64 pVM=%p (debugging?)\n",
59 u64DeltaPrev, u64PrevNanoTS, u64NanoTS, pVM));
60}
61
62
63/**
64 * @interface_method_impl{RTTIMENANOTSDATA,pfnRediscover}
65 *
66 * This is the initial worker, so the first call in each context ends up here.
67 * It is also used should the delta rating of the host CPUs change or if the
68 * fGetGipCpu feature the current worker relies upon becomes unavailable. The
69 * last two events may occur as CPUs are taken online.
70 */
71DECLCALLBACK(DECLEXPORT(uint64_t)) tmVirtualNanoTSRediscover(PRTTIMENANOTSDATA pData)
72{
73 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
74
75 /*
76 * We require a valid GIP for the selection below. Invalid GIP is fatal.
77 */
78 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
79 AssertFatalMsg(RT_VALID_PTR(pGip), ("pVM=%p pGip=%p\n", pVM, pGip));
80 AssertFatalMsg(pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC, ("pVM=%p pGip=%p u32Magic=%#x\n", pVM, pGip, pGip->u32Magic));
81 AssertFatalMsg(pGip->u32Mode > SUPGIPMODE_INVALID && pGip->u32Mode < SUPGIPMODE_END,
82 ("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
83
84 /*
85 * Determine the new worker.
86 */
87 PFNTIMENANOTSINTERNAL pfnWorker;
88 bool const fLFence = RT_BOOL(ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2);
89 switch (pGip->u32Mode)
90 {
91 case SUPGIPMODE_SYNC_TSC:
92 case SUPGIPMODE_INVARIANT_TSC:
93#ifdef IN_RING0
94 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO)
95 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta;
96 else
97 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarWithDelta : RTTimeNanoTSLegacySyncInvarWithDelta;
98#else
99 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
100 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
101 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
102 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim;
103 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
104 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO
105 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
106 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp;
107 else
108 pfnWorker = pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO
109 ? fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta
110 : fLFence ? RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId;
111#endif
112 break;
113
114 case SUPGIPMODE_ASYNC_TSC:
115#ifdef IN_RING0
116 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync;
117#else
118 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS)
119 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseIdtrLim : RTTimeNanoTSLegacyAsyncUseIdtrLim;
120 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS)
121 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscp : RTTimeNanoTSLegacyAsyncUseRdtscp;
122 else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_GROUP_IN_CH_NUMBER_IN_CL)
123 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseRdtscpGroupChNumCl : RTTimeNanoTSLegacyAsyncUseRdtscpGroupChNumCl;
124 else
125 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsyncUseApicId : RTTimeNanoTSLegacyAsyncUseApicId;
126#endif
127 break;
128
129 default:
130 AssertFatalMsgFailed(("pVM=%p pGip=%p u32Mode=%#x\n", pVM, pGip, pGip->u32Mode));
131 }
132
133 /*
134 * Update the pfnVirtualGetRaw pointer and call the worker we selected.
135 */
136 ASMAtomicWritePtr((void * volatile *)&CTX_SUFF(pVM->tm.s.pfnVirtualGetRaw), (void *)(uintptr_t)pfnWorker);
137 return pfnWorker(pData);
138}
139
140
141/**
142 * @interface_method_impl{RTTIMENANOTSDATA,pfnBadCpuIndex}
143 */
144DECLEXPORT(uint64_t) tmVirtualNanoTSBadCpuIndex(PRTTIMENANOTSDATA pData, uint16_t idApic, uint16_t iCpuSet, uint16_t iGipCpu)
145{
146 PVM pVM = RT_FROM_MEMBER(pData, VM, CTX_SUFF(tm.s.VirtualGetRawData));
147 AssertFatalMsgFailed(("pVM=%p idApic=%#x iCpuSet=%#x iGipCpu=%#x\n", pVM, idApic, iCpuSet, iGipCpu));
148#ifndef _MSC_VER
149 return UINT64_MAX;
150#endif
151}
152
153
154/**
155 * Wrapper around the IPRT GIP time methods.
156 */
157DECLINLINE(uint64_t) tmVirtualGetRawNanoTS(PVM pVM)
158{
159# ifdef IN_RING3
160 uint64_t u64 = CTXALLSUFF(pVM->tm.s.pfnVirtualGetRaw)(&CTXALLSUFF(pVM->tm.s.VirtualGetRawData));
161# else /* !IN_RING3 */
162 uint32_t cPrevSteps = pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps;
163 uint64_t u64 = pVM->tm.s.CTX_SUFF(pfnVirtualGetRaw)(&pVM->tm.s.CTX_SUFF(VirtualGetRawData));
164 if (cPrevSteps != pVM->tm.s.CTX_SUFF(VirtualGetRawData).c1nsSteps)
165 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
166# endif /* !IN_RING3 */
167 /*DBGFTRACE_POS_U64(pVM, u64);*/
168 return u64;
169}
170
171
172/**
173 * Get the time when we're not running at 100%
174 *
175 * @returns The timestamp.
176 * @param pVM The cross context VM structure.
177 */
178static uint64_t tmVirtualGetRawNonNormal(PVM pVM)
179{
180 /*
181 * Recalculate the RTTimeNanoTS() value for the period where
182 * warp drive has been enabled.
183 */
184 uint64_t u64 = tmVirtualGetRawNanoTS(pVM);
185 u64 -= pVM->tm.s.u64VirtualWarpDriveStart;
186 u64 *= pVM->tm.s.u32VirtualWarpDrivePercentage;
187 u64 /= 100;
188 u64 += pVM->tm.s.u64VirtualWarpDriveStart;
189
190 /*
191 * Now we apply the virtual time offset.
192 * (Which is the negated tmVirtualGetRawNanoTS() value for when the virtual
193 * machine started if it had been running continuously without any suspends.)
194 */
195 u64 -= pVM->tm.s.u64VirtualOffset;
196 return u64;
197}
198
199
200/**
201 * Get the raw virtual time.
202 *
203 * @returns The current time stamp.
204 * @param pVM The cross context VM structure.
205 */
206DECLINLINE(uint64_t) tmVirtualGetRaw(PVM pVM)
207{
208 if (RT_LIKELY(!pVM->tm.s.fVirtualWarpDrive))
209 return tmVirtualGetRawNanoTS(pVM) - pVM->tm.s.u64VirtualOffset;
210 return tmVirtualGetRawNonNormal(pVM);
211}
212
213
214/**
215 * Inlined version of tmVirtualGetEx.
216 */
217DECLINLINE(uint64_t) tmVirtualGet(PVMCC pVM, bool fCheckTimers)
218{
219 uint64_t u64;
220 if (RT_LIKELY(pVM->tm.s.cVirtualTicking))
221 {
222 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGet);
223 u64 = tmVirtualGetRaw(pVM);
224
225 /*
226 * Use the chance to check for expired timers.
227 */
228 if (fCheckTimers)
229 {
230 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
231 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
232 && !pVM->tm.s.fRunningQueues
233 && ( pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64
234 || ( pVM->tm.s.fVirtualSyncTicking
235 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64 - pVM->tm.s.offVirtualSync
236 )
237 )
238 && !pVM->tm.s.fRunningQueues
239 )
240 {
241 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualGetSetFF);
242 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
243 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
244#ifdef IN_RING3
245# ifdef VBOX_WITH_REM
246 REMR3NotifyTimerPending(pVM, pVCpuDst);
247# endif
248 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
249#endif
250 }
251 }
252 }
253 else
254 u64 = pVM->tm.s.u64Virtual;
255 return u64;
256}
257
258
259/**
260 * Gets the current TMCLOCK_VIRTUAL time
261 *
262 * @returns The timestamp.
263 * @param pVM The cross context VM structure.
264 *
265 * @remark While the flow of time will never go backwards, the speed of the
266 * progress varies due to inaccurate RTTimeNanoTS and TSC. The latter can be
267 * influenced by power saving (SpeedStep, PowerNow!), while the former
268 * makes use of TSC and kernel timers.
269 */
270VMM_INT_DECL(uint64_t) TMVirtualGet(PVMCC pVM)
271{
272 return tmVirtualGet(pVM, true /*fCheckTimers*/);
273}
274
275
276/**
277 * Gets the current TMCLOCK_VIRTUAL time without checking
278 * timers or anything.
279 *
280 * Meaning, this has no side effect on FFs like TMVirtualGet may have.
281 *
282 * @returns The timestamp.
283 * @param pVM The cross context VM structure.
284 *
285 * @remarks See TMVirtualGet.
286 */
287VMM_INT_DECL(uint64_t) TMVirtualGetNoCheck(PVMCC pVM)
288{
289 return tmVirtualGet(pVM, false /*fCheckTimers*/);
290}
291
292
293/**
294 * Converts the dead line interval from TMCLOCK_VIRTUAL to host nano seconds.
295 *
296 * @returns Host nano second count.
297 * @param pVM The cross context VM structure.
298 * @param cVirtTicksToDeadline The TMCLOCK_VIRTUAL interval.
299 */
300DECLINLINE(uint64_t) tmVirtualVirtToNsDeadline(PVM pVM, uint64_t cVirtTicksToDeadline)
301{
302 if (RT_UNLIKELY(pVM->tm.s.fVirtualWarpDrive))
303 return ASMMultU64ByU32DivByU32(cVirtTicksToDeadline, 100, pVM->tm.s.u32VirtualWarpDrivePercentage);
304 return cVirtTicksToDeadline;
305}
306
307
308/**
309 * tmVirtualSyncGetLocked worker for handling catch-up when owning the lock.
310 *
311 * @returns The timestamp.
312 * @param pVM The cross context VM structure.
313 * @param u64 raw virtual time.
314 * @param off offVirtualSync.
315 * @param pcNsToDeadline Where to return the number of nano seconds to
316 * the next virtual sync timer deadline. Can be
317 * NULL.
318 */
319DECLINLINE(uint64_t) tmVirtualSyncGetHandleCatchUpLocked(PVMCC pVM, uint64_t u64, uint64_t off, uint64_t *pcNsToDeadline)
320{
321 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
322
323 /*
324 * Don't make updates until we've check the timer queue.
325 */
326 bool fUpdatePrev = true;
327 bool fUpdateOff = true;
328 bool fStop = false;
329 const uint64_t u64Prev = pVM->tm.s.u64VirtualSyncCatchUpPrev;
330 uint64_t u64Delta = u64 - u64Prev;
331 if (RT_LIKELY(!(u64Delta >> 32)))
332 {
333 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
334 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
335 {
336 off -= u64Sub;
337 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [vsghcul]\n", u64 - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
338 }
339 else
340 {
341 /* we've completely caught up. */
342 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
343 off = pVM->tm.s.offVirtualSyncGivenUp;
344 fStop = true;
345 Log4(("TM: %'RU64/0: caught up [vsghcul]\n", u64));
346 }
347 }
348 else
349 {
350 /* More than 4 seconds since last time (or negative), ignore it. */
351 fUpdateOff = false;
352 fUpdatePrev = !(u64Delta & RT_BIT_64(63));
353 Log(("TMVirtualGetSync: u64Delta=%RX64\n", u64Delta));
354 }
355
356 /*
357 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
358 * approach is to never pass the head timer. So, when we do stop the clock and
359 * set the timer pending flag.
360 */
361 u64 -= off;
362
363 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
364 if (u64Last > u64)
365 {
366 u64 = u64Last + 1;
367 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
368 }
369
370 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
371 if (u64 < u64Expire)
372 {
373 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
374 if (fUpdateOff)
375 ASMAtomicWriteU64(&pVM->tm.s.offVirtualSync, off);
376 if (fStop)
377 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
378 if (fUpdatePrev)
379 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev, u64);
380 if (pcNsToDeadline)
381 {
382 uint64_t cNsToDeadline = u64Expire - u64;
383 if (pVM->tm.s.fVirtualSyncCatchUp)
384 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
385 pVM->tm.s.u32VirtualSyncCatchUpPercentage + 100);
386 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
387 }
388 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
389 }
390 else
391 {
392 u64 = u64Expire;
393 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
394 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
395
396 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
397 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
398 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
399 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
400 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsghcul]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
401 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
402
403 if (pcNsToDeadline)
404 *pcNsToDeadline = 0;
405#ifdef IN_RING3
406# ifdef VBOX_WITH_REM
407 REMR3NotifyTimerPending(pVM, pVCpuDst);
408# endif
409 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
410#endif
411 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
412 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
413 }
414 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
415
416 Log6(("tmVirtualSyncGetHandleCatchUpLocked -> %'RU64\n", u64));
417 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetHandleCatchUpLocked");
418 return u64;
419}
420
421
422/**
423 * tmVirtualSyncGetEx worker for when we get the lock.
424 *
425 * @returns timesamp.
426 * @param pVM The cross context VM structure.
427 * @param u64 The virtual clock timestamp.
428 * @param pcNsToDeadline Where to return the number of nano seconds to
429 * the next virtual sync timer deadline. Can be
430 * NULL.
431 */
432DECLINLINE(uint64_t) tmVirtualSyncGetLocked(PVMCC pVM, uint64_t u64, uint64_t *pcNsToDeadline)
433{
434 /*
435 * Not ticking?
436 */
437 if (!pVM->tm.s.fVirtualSyncTicking)
438 {
439 u64 = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
440 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
441 if (pcNsToDeadline)
442 *pcNsToDeadline = 0;
443 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
444 Log6(("tmVirtualSyncGetLocked -> %'RU64 [stopped]\n", u64));
445 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked-stopped");
446 return u64;
447 }
448
449 /*
450 * Handle catch up in a separate function.
451 */
452 uint64_t off = ASMAtomicUoReadU64(&pVM->tm.s.offVirtualSync);
453 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
454 return tmVirtualSyncGetHandleCatchUpLocked(pVM, u64, off, pcNsToDeadline);
455
456 /*
457 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
458 * approach is to never pass the head timer. So, when we do stop the clock and
459 * set the timer pending flag.
460 */
461 u64 -= off;
462
463 uint64_t u64Last = ASMAtomicUoReadU64(&pVM->tm.s.u64VirtualSync);
464 if (u64Last > u64)
465 {
466 u64 = u64Last + 1;
467 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetAdjLast);
468 }
469
470 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
471 if (u64 < u64Expire)
472 {
473 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
474 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
475 if (pcNsToDeadline)
476 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - u64);
477 }
478 else
479 {
480 u64 = u64Expire;
481 ASMAtomicWriteU64(&pVM->tm.s.u64VirtualSync, u64);
482 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
483
484 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC);
485 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
486 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
487 Log5(("TMAllVirtual(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
488 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [vsgl]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
489 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
490
491#ifdef IN_RING3
492# ifdef VBOX_WITH_REM
493 REMR3NotifyTimerPending(pVM, pVCpuDst);
494# endif
495 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
496#endif
497 if (pcNsToDeadline)
498 *pcNsToDeadline = 0;
499 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
500 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
501 }
502 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLocked);
503 Log6(("tmVirtualSyncGetLocked -> %'RU64\n", u64));
504 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetLocked");
505 return u64;
506}
507
508
509/**
510 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
511 *
512 * @returns The timestamp.
513 * @param pVM The cross context VM structure.
514 * @param fCheckTimers Check timers or not
515 * @param pcNsToDeadline Where to return the number of nano seconds to
516 * the next virtual sync timer deadline. Can be
517 * NULL.
518 * @thread EMT.
519 */
520DECLINLINE(uint64_t) tmVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers, uint64_t *pcNsToDeadline)
521{
522 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGet);
523
524 uint64_t u64;
525 if (!pVM->tm.s.fVirtualSyncTicking)
526 {
527 if (pcNsToDeadline)
528 *pcNsToDeadline = 0;
529 u64 = pVM->tm.s.u64VirtualSync;
530 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-stopped1");
531 return u64;
532 }
533
534 /*
535 * Query the virtual clock and do the usual expired timer check.
536 */
537 Assert(pVM->tm.s.cVirtualTicking);
538 u64 = tmVirtualGetRaw(pVM);
539 if (fCheckTimers)
540 {
541 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
542 if ( !VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)
543 && pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64)
544 {
545 Log5(("TMAllVirtual(%u): FF: 0 -> 1\n", __LINE__));
546 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
547#ifdef IN_RING3
548# ifdef VBOX_WITH_REM
549 REMR3NotifyTimerPending(pVM, pVCpuDst);
550# endif
551 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM /** @todo |VMNOTIFYFF_FLAGS_POKE*/);
552#endif
553 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
554 }
555 }
556
557 /*
558 * If we can get the lock, get it. The result is much more reliable.
559 *
560 * Note! This is where all clock source devices branch off because they
561 * will be owning the lock already. The 'else' is taken by code
562 * which is less picky or hasn't been adjusted yet
563 */
564 if (PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock) == VINF_SUCCESS)
565 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
566
567 /*
568 * When the clock is ticking, not doing catch ups and not running into an
569 * expired time, we can get away without locking. Try this first.
570 */
571 uint64_t off;
572 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
573 {
574 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
575 {
576 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
577 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
578 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
579 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)))
580 {
581 off = u64 - off;
582 uint64_t const u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
583 if (off < u64Expire)
584 {
585 if (pcNsToDeadline)
586 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, u64Expire - off);
587 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
588 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless]\n", off));
589 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-lockless");
590 return off;
591 }
592 }
593 }
594 }
595 else
596 {
597 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
598 if (RT_LIKELY(!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)))
599 {
600 if (pcNsToDeadline)
601 *pcNsToDeadline = 0;
602 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetLockless);
603 Log6(("tmVirtualSyncGetEx -> %'RU64 [lockless/stopped]\n", off));
604 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped2");
605 return off;
606 }
607 }
608
609 /*
610 * Read the offset and adjust if we're playing catch-up.
611 *
612 * The catch-up adjusting work by us decrementing the offset by a percentage of
613 * the time elapsed since the previous TMVirtualGetSync call.
614 *
615 * It's possible to get a very long or even negative interval between two read
616 * for the following reasons:
617 * - Someone might have suspended the process execution, frequently the case when
618 * debugging the process.
619 * - We might be on a different CPU which TSC isn't quite in sync with the
620 * other CPUs in the system.
621 * - Another thread is racing us and we might have been preempted while inside
622 * this function.
623 *
624 * Assuming nano second virtual time, we can simply ignore any intervals which has
625 * any of the upper 32 bits set.
626 */
627 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
628 int cOuterTries = 42;
629 for (;; cOuterTries--)
630 {
631 /* Try grab the lock, things get simpler when owning the lock. */
632 int rcLock = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
633 if (RT_SUCCESS_NP(rcLock))
634 return tmVirtualSyncGetLocked(pVM, u64, pcNsToDeadline);
635
636 /* Re-check the ticking flag. */
637 if (!ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
638 {
639 off = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSync);
640 if ( ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking)
641 && cOuterTries > 0)
642 continue;
643 if (pcNsToDeadline)
644 *pcNsToDeadline = 0;
645 Log6(("tmVirtualSyncGetEx -> %'RU64 [stopped]\n", off));
646 DBGFTRACE_U64_TAG(pVM, off, "tmVirtualSyncGetEx-stopped3");
647 return off;
648 }
649
650 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
651 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
652 {
653 /* No changes allowed, try get a consistent set of parameters. */
654 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
655 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
656 uint32_t const u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
657 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
658 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
659 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
660 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
661 || cOuterTries <= 0)
662 {
663 uint64_t u64Delta = u64 - u64Prev;
664 if (RT_LIKELY(!(u64Delta >> 32)))
665 {
666 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
667 if (off > u64Sub + offGivenUp)
668 {
669 off -= u64Sub;
670 Log4(("TM: %'RU64/-%'8RU64: sub %RU32 [NoLock]\n", u64 - off, pVM->tm.s.offVirtualSync - offGivenUp, u64Sub));
671 }
672 else
673 {
674 /* we've completely caught up. */
675 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
676 off = offGivenUp;
677 Log4(("TM: %'RU64/0: caught up [NoLock]\n", u64));
678 }
679 }
680 else
681 /* More than 4 seconds since last time (or negative), ignore it. */
682 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
683
684 /* Check that we're still running and in catch up. */
685 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
686 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
687 break;
688 if (cOuterTries <= 0)
689 break; /* enough */
690 }
691 }
692 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
693 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
694 break; /* Got an consistent offset */
695 else if (cOuterTries <= 0)
696 break; /* enough */
697 }
698 if (cOuterTries <= 0)
699 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetELoop);
700
701 /*
702 * Complete the calculation of the current TMCLOCK_VIRTUAL_SYNC time. The current
703 * approach is to never pass the head timer. So, when we do stop the clock and
704 * set the timer pending flag.
705 */
706 u64 -= off;
707/** @todo u64VirtualSyncLast */
708 uint64_t u64Expire = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
709 if (u64 >= u64Expire)
710 {
711 PVMCPUCC pVCpuDst = VMCC_GET_CPU(pVM, pVM->tm.s.idTimerCpu);
712 if (!VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER))
713 {
714 Log5(("TMAllVirtual(%u): FF: %d -> 1 (NoLock)\n", __LINE__, VMCPU_FF_IS_SET(pVCpuDst, VMCPU_FF_TIMER)));
715 VM_FF_SET(pVM, VM_FF_TM_VIRTUAL_SYNC); /* Hmm? */
716 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
717#ifdef IN_RING3
718# ifdef VBOX_WITH_REM
719 REMR3NotifyTimerPending(pVM, pVCpuDst);
720# endif
721 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
722#endif
723 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetSetFF);
724 Log4(("TM: %'RU64/-%'8RU64: exp tmr=>ff [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
725 }
726 else
727 Log4(("TM: %'RU64/-%'8RU64: exp tmr [NoLock]\n", u64, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp));
728 if (pcNsToDeadline)
729 *pcNsToDeadline = 0;
730 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGetExpired);
731 }
732 else if (pcNsToDeadline)
733 {
734 uint64_t cNsToDeadline = u64Expire - u64;
735 if (ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
736 cNsToDeadline = ASMMultU64ByU32DivByU32(cNsToDeadline, 100,
737 ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage) + 100);
738 *pcNsToDeadline = tmVirtualVirtToNsDeadline(pVM, cNsToDeadline);
739 }
740
741 Log6(("tmVirtualSyncGetEx -> %'RU64\n", u64));
742 DBGFTRACE_U64_TAG(pVM, u64, "tmVirtualSyncGetEx-nolock");
743 return u64;
744}
745
746
747/**
748 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
749 *
750 * @returns The timestamp.
751 * @param pVM The cross context VM structure.
752 * @thread EMT.
753 * @remarks May set the timer and virtual sync FFs.
754 */
755VMM_INT_DECL(uint64_t) TMVirtualSyncGet(PVMCC pVM)
756{
757 return tmVirtualSyncGetEx(pVM, true /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
758}
759
760
761/**
762 * Gets the current TMCLOCK_VIRTUAL_SYNC time without checking timers running on
763 * TMCLOCK_VIRTUAL.
764 *
765 * @returns The timestamp.
766 * @param pVM The cross context VM structure.
767 * @thread EMT.
768 * @remarks May set the timer and virtual sync FFs.
769 */
770VMM_INT_DECL(uint64_t) TMVirtualSyncGetNoCheck(PVMCC pVM)
771{
772 return tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, NULL /*pcNsToDeadline*/);
773}
774
775
776/**
777 * Gets the current TMCLOCK_VIRTUAL_SYNC time.
778 *
779 * @returns The timestamp.
780 * @param pVM The cross context VM structure.
781 * @param fCheckTimers Check timers on the virtual clock or not.
782 * @thread EMT.
783 * @remarks May set the timer and virtual sync FFs.
784 */
785VMM_INT_DECL(uint64_t) TMVirtualSyncGetEx(PVMCC pVM, bool fCheckTimers)
786{
787 return tmVirtualSyncGetEx(pVM, fCheckTimers, NULL /*pcNsToDeadline*/);
788}
789
790
791/**
792 * Gets the current TMCLOCK_VIRTUAL_SYNC time and ticks to the next deadline
793 * without checking timers running on TMCLOCK_VIRTUAL.
794 *
795 * @returns The timestamp.
796 * @param pVM The cross context VM structure.
797 * @param pcNsToDeadline Where to return the number of nano seconds to
798 * the next virtual sync timer deadline.
799 * @thread EMT.
800 * @remarks May set the timer and virtual sync FFs.
801 */
802VMM_INT_DECL(uint64_t) TMVirtualSyncGetWithDeadlineNoCheck(PVMCC pVM, uint64_t *pcNsToDeadline)
803{
804 uint64_t cNsToDeadlineTmp; /* try convince the compiler to skip the if tests. */
805 uint64_t u64Now = tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadlineTmp);
806 *pcNsToDeadline = cNsToDeadlineTmp;
807 return u64Now;
808}
809
810
811/**
812 * Gets the number of nano seconds to the next virtual sync deadline.
813 *
814 * @returns The number of TMCLOCK_VIRTUAL ticks.
815 * @param pVM The cross context VM structure.
816 * @thread EMT.
817 * @remarks May set the timer and virtual sync FFs.
818 */
819VMMDECL(uint64_t) TMVirtualSyncGetNsToDeadline(PVMCC pVM)
820{
821 uint64_t cNsToDeadline;
822 tmVirtualSyncGetEx(pVM, false /*fCheckTimers*/, &cNsToDeadline);
823 return cNsToDeadline;
824}
825
826
827/**
828 * Gets the current lag of the synchronous virtual clock (relative to the virtual clock).
829 *
830 * @return The current lag.
831 * @param pVM The cross context VM structure.
832 */
833VMM_INT_DECL(uint64_t) TMVirtualSyncGetLag(PVMCC pVM)
834{
835 return pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp;
836}
837
838
839/**
840 * Get the current catch-up percent.
841 *
842 * @return The current catch0up percent. 0 means running at the same speed as the virtual clock.
843 * @param pVM The cross context VM structure.
844 */
845VMM_INT_DECL(uint32_t) TMVirtualSyncGetCatchUpPct(PVMCC pVM)
846{
847 if (pVM->tm.s.fVirtualSyncCatchUp)
848 return pVM->tm.s.u32VirtualSyncCatchUpPercentage;
849 return 0;
850}
851
852
853/**
854 * Gets the current TMCLOCK_VIRTUAL frequency.
855 *
856 * @returns The frequency.
857 * @param pVM The cross context VM structure.
858 */
859VMM_INT_DECL(uint64_t) TMVirtualGetFreq(PVM pVM)
860{
861 NOREF(pVM);
862 return TMCLOCK_FREQ_VIRTUAL;
863}
864
865
866/**
867 * Worker for TMR3PauseClocks.
868 *
869 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
870 * @param pVM The cross context VM structure.
871 */
872int tmVirtualPauseLocked(PVM pVM)
873{
874 uint32_t c = ASMAtomicDecU32(&pVM->tm.s.cVirtualTicking);
875 AssertMsgReturn(c < pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
876 if (c == 0)
877 {
878 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualPause);
879 pVM->tm.s.u64Virtual = tmVirtualGetRaw(pVM);
880 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, false);
881 }
882 return VINF_SUCCESS;
883}
884
885
886/**
887 * Worker for TMR3ResumeClocks.
888 *
889 * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted).
890 * @param pVM The cross context VM structure.
891 */
892int tmVirtualResumeLocked(PVM pVM)
893{
894 uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cVirtualTicking);
895 AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE);
896 if (c == 1)
897 {
898 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualResume);
899 pVM->tm.s.u64VirtualRawPrev = 0;
900 pVM->tm.s.u64VirtualWarpDriveStart = tmVirtualGetRawNanoTS(pVM);
901 pVM->tm.s.u64VirtualOffset = pVM->tm.s.u64VirtualWarpDriveStart - pVM->tm.s.u64Virtual;
902 ASMAtomicWriteBool(&pVM->tm.s.fVirtualSyncTicking, true);
903 }
904 return VINF_SUCCESS;
905}
906
907
908/**
909 * Converts from virtual ticks to nanoseconds.
910 *
911 * @returns nanoseconds.
912 * @param pVM The cross context VM structure.
913 * @param u64VirtualTicks The virtual ticks to convert.
914 * @remark There could be rounding errors here. We just do a simple integer divide
915 * without any adjustments.
916 */
917VMM_INT_DECL(uint64_t) TMVirtualToNano(PVM pVM, uint64_t u64VirtualTicks)
918{
919 NOREF(pVM);
920 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
921 return u64VirtualTicks;
922}
923
924
925/**
926 * Converts from virtual ticks to microseconds.
927 *
928 * @returns microseconds.
929 * @param pVM The cross context VM structure.
930 * @param u64VirtualTicks The virtual ticks to convert.
931 * @remark There could be rounding errors here. We just do a simple integer divide
932 * without any adjustments.
933 */
934VMM_INT_DECL(uint64_t) TMVirtualToMicro(PVM pVM, uint64_t u64VirtualTicks)
935{
936 NOREF(pVM);
937 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
938 return u64VirtualTicks / 1000;
939}
940
941
942/**
943 * Converts from virtual ticks to milliseconds.
944 *
945 * @returns milliseconds.
946 * @param pVM The cross context VM structure.
947 * @param u64VirtualTicks The virtual ticks to convert.
948 * @remark There could be rounding errors here. We just do a simple integer divide
949 * without any adjustments.
950 */
951VMM_INT_DECL(uint64_t) TMVirtualToMilli(PVM pVM, uint64_t u64VirtualTicks)
952{
953 NOREF(pVM);
954 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
955 return u64VirtualTicks / 1000000;
956}
957
958
959/**
960 * Converts from nanoseconds to virtual ticks.
961 *
962 * @returns virtual ticks.
963 * @param pVM The cross context VM structure.
964 * @param u64NanoTS The nanosecond value ticks to convert.
965 * @remark There could be rounding and overflow errors here.
966 */
967VMM_INT_DECL(uint64_t) TMVirtualFromNano(PVM pVM, uint64_t u64NanoTS)
968{
969 NOREF(pVM);
970 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
971 return u64NanoTS;
972}
973
974
975/**
976 * Converts from microseconds to virtual ticks.
977 *
978 * @returns virtual ticks.
979 * @param pVM The cross context VM structure.
980 * @param u64MicroTS The microsecond value ticks to convert.
981 * @remark There could be rounding and overflow errors here.
982 */
983VMM_INT_DECL(uint64_t) TMVirtualFromMicro(PVM pVM, uint64_t u64MicroTS)
984{
985 NOREF(pVM);
986 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
987 return u64MicroTS * 1000;
988}
989
990
991/**
992 * Converts from milliseconds to virtual ticks.
993 *
994 * @returns virtual ticks.
995 * @param pVM The cross context VM structure.
996 * @param u64MilliTS The millisecond value ticks to convert.
997 * @remark There could be rounding and overflow errors here.
998 */
999VMM_INT_DECL(uint64_t) TMVirtualFromMilli(PVM pVM, uint64_t u64MilliTS)
1000{
1001 NOREF(pVM);
1002 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1003 return u64MilliTS * 1000000;
1004}
1005
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette