VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c@ 85196

最後變更 在這個檔案從85196是 83326,由 vboxsync 提交於 5 年 前

IPRT/r0drv/mp-r0drv-linux.c: Use version checks for linux/cpumask.h stuff rather than assuming that everything in it will remain macros forever (num_online_cpus isn't anymore). Fixed possible bug in RTMpGetCount where it wouldn't handle CPU hotplugging right.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 17.5 KB
 
1/* $Id: mp-r0drv-linux.c 83326 2020-03-19 12:16:12Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2008-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
41
42/*********************************************************************************************************************************
43* Defined Constants And Macros *
44*********************************************************************************************************************************/
45#if defined(nr_cpumask_bits) || LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
46# define VBOX_NR_CPUMASK_BITS (nr_cpumask_bits) /* same as nr_cpu_ids */
47#else
48# define VBOX_NR_CPUMASK_BITS (NR_CPUS)
49#endif
50
51
52RTDECL(RTCPUID) RTMpCpuId(void)
53{
54 return smp_processor_id();
55}
56RT_EXPORT_SYMBOL(RTMpCpuId);
57
58
59RTDECL(int) RTMpCurSetIndex(void)
60{
61 return smp_processor_id();
62}
63RT_EXPORT_SYMBOL(RTMpCurSetIndex);
64
65
66RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
67{
68 return *pidCpu = smp_processor_id();
69}
70RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
71
72
73RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
74{
75 return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
76}
77RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
78
79
80RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
81{
82 return (unsigned)iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
83}
84RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
85
86
87RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
88{
89 return VBOX_NR_CPUMASK_BITS - 1;
90}
91RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
92
93
94RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
95{
96#if defined(CONFIG_SMP)
97# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 2) || defined(cpu_possible)
98 return idCpu < VBOX_NR_CPUMASK_BITS && cpu_possible(idCpu);
99# else /* < 2.5.29 */
100 return idCpu < (RTCPUID)(smp_num_cpus);
101# endif
102#else
103 return idCpu == RTMpCpuId();
104#endif
105}
106RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
107
108
109RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
110{
111 RTCPUID idCpu;
112
113 RTCpuSetEmpty(pSet);
114 idCpu = RTMpGetMaxCpuId();
115 do
116 {
117 if (RTMpIsCpuPossible(idCpu))
118 RTCpuSetAdd(pSet, idCpu);
119 } while (idCpu-- > 0);
120 return pSet;
121}
122RT_EXPORT_SYMBOL(RTMpGetSet);
123
124
125RTDECL(RTCPUID) RTMpGetCount(void)
126{
127#ifdef CONFIG_SMP
128# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 4) || defined(num_possible_cpus)
129 return num_possible_cpus();
130# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
131 return smp_num_cpus;
132# else
133 RTCPUSET Set;
134 RTMpGetSet(&Set);
135 return RTCpuSetCount(&Set);
136# endif
137#else
138 return 1;
139#endif
140}
141RT_EXPORT_SYMBOL(RTMpGetCount);
142
143
144RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
145{
146#ifdef CONFIG_SMP
147# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(cpu_online)
148 return idCpu < VBOX_NR_CPUMASK_BITS && cpu_online(idCpu);
149# else /* 2.4: */
150 return idCpu < VBOX_NR_CPUMASK_BITS && cpu_online_map & RT_BIT_64(idCpu);
151# endif
152#else
153 return idCpu == RTMpCpuId();
154#endif
155}
156RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
157
158
159RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
160{
161#ifdef CONFIG_SMP
162 RTCPUID idCpu;
163
164 RTCpuSetEmpty(pSet);
165 idCpu = RTMpGetMaxCpuId();
166 do
167 {
168 if (RTMpIsCpuOnline(idCpu))
169 RTCpuSetAdd(pSet, idCpu);
170 } while (idCpu-- > 0);
171#else
172 RTCpuSetEmpty(pSet);
173 RTCpuSetAdd(pSet, RTMpCpuId());
174#endif
175 return pSet;
176}
177RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
178
179
180RTDECL(RTCPUID) RTMpGetOnlineCount(void)
181{
182#ifdef CONFIG_SMP
183# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) || defined(num_online_cpus)
184 return num_online_cpus();
185# else
186 RTCPUSET Set;
187 RTMpGetOnlineSet(&Set);
188 return RTCpuSetCount(&Set);
189# endif
190#else
191 return 1;
192#endif
193}
194RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
195
196
197RTDECL(bool) RTMpIsCpuWorkPending(void)
198{
199 /** @todo (not used on non-Windows platforms yet). */
200 return false;
201}
202RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
203
204
205/**
206 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
207 *
208 * @param pvInfo Pointer to the RTMPARGS package.
209 */
210static void rtmpLinuxWrapper(void *pvInfo)
211{
212 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
213 ASMAtomicIncU32(&pArgs->cHits);
214 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
215}
216
217#ifdef CONFIG_SMP
218
219# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
220/**
221 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
222 * increment after calling the worker.
223 *
224 * @param pvInfo Pointer to the RTMPARGS package.
225 */
226static void rtmpLinuxWrapperPostInc(void *pvInfo)
227{
228 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
229 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
230 ASMAtomicIncU32(&pArgs->cHits);
231}
232# endif
233
234
235/**
236 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
237 *
238 * @param pvInfo Pointer to the RTMPARGS package.
239 */
240static void rtmpLinuxAllWrapper(void *pvInfo)
241{
242 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
243 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
244 RTCPUID idCpu = RTMpCpuId();
245 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
246
247 if (RTCpuSetIsMember(pWorkerSet, idCpu))
248 {
249 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
250 RTCpuSetDel(pWorkerSet, idCpu);
251 }
252}
253
254#endif /* CONFIG_SMP */
255
256RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
257{
258 IPRT_LINUX_SAVE_EFL_AC();
259 RTMPARGS Args;
260 RTCPUSET OnlineSet;
261 RTCPUID idCpu;
262#ifdef CONFIG_SMP
263 uint32_t cLoops;
264#endif
265
266 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
267
268 Args.pfnWorker = pfnWorker;
269 Args.pvUser1 = pvUser1;
270 Args.pvUser2 = pvUser2;
271 Args.idCpu = NIL_RTCPUID;
272 Args.cHits = 0;
273
274 RTThreadPreemptDisable(&PreemptState);
275 RTMpGetOnlineSet(&OnlineSet);
276 Args.pWorkerSet = &OnlineSet;
277 idCpu = RTMpCpuId();
278
279#ifdef CONFIG_SMP
280 if (RTCpuSetCount(&OnlineSet) > 1)
281 {
282 /* Fire the function on all other CPUs without waiting for completion. */
283# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
284 smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
285# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
286 int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
287 Assert(!rc); NOREF(rc);
288# else
289 int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
290 Assert(!rc); NOREF(rc);
291# endif
292 }
293#endif
294
295 /* Fire the function on this CPU. */
296 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
297 RTCpuSetDel(Args.pWorkerSet, idCpu);
298
299#ifdef CONFIG_SMP
300 /* Wait for all of them finish. */
301 cLoops = 64000;
302 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
303 {
304 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
305 if (!cLoops--)
306 {
307 RTCPUSET OnlineSetNow;
308 RTMpGetOnlineSet(&OnlineSetNow);
309 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
310
311 cLoops = 64000;
312 }
313
314 ASMNopPause();
315 }
316#endif
317
318 RTThreadPreemptRestore(&PreemptState);
319 IPRT_LINUX_RESTORE_EFL_AC();
320 return VINF_SUCCESS;
321}
322RT_EXPORT_SYMBOL(RTMpOnAll);
323
324
325RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
326{
327#ifdef CONFIG_SMP
328 IPRT_LINUX_SAVE_EFL_AC();
329 RTMPARGS Args;
330
331 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
332 Args.pfnWorker = pfnWorker;
333 Args.pvUser1 = pvUser1;
334 Args.pvUser2 = pvUser2;
335 Args.idCpu = NIL_RTCPUID;
336 Args.cHits = 0;
337
338 RTThreadPreemptDisable(&PreemptState);
339# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
340 smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
341# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
342 int rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
343 Assert(rc == 0); NOREF(rc);
344# else /* older kernels */
345 int rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
346 Assert(rc == 0); NOREF(rc);
347# endif /* older kernels */
348 RTThreadPreemptRestore(&PreemptState);
349
350 IPRT_LINUX_RESTORE_EFL_AC();
351#else
352 RT_NOREF(pfnWorker, pvUser1, pvUser2);
353#endif
354 return VINF_SUCCESS;
355}
356RT_EXPORT_SYMBOL(RTMpOnOthers);
357
358
359#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) && defined(CONFIG_SMP)
360/**
361 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
362 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
363 *
364 * @param pvInfo Pointer to the RTMPARGS package.
365 */
366static void rtMpLinuxOnPairWrapper(void *pvInfo)
367{
368 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
369 RTCPUID idCpu = RTMpCpuId();
370
371 if ( idCpu == pArgs->idCpu
372 || idCpu == pArgs->idCpu2)
373 {
374 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
375 ASMAtomicIncU32(&pArgs->cHits);
376 }
377}
378#endif
379
380
381RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
382{
383#ifdef CONFIG_SMP
384 IPRT_LINUX_SAVE_EFL_AC();
385 int rc;
386 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
387
388 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
389 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
390
391 /*
392 * Check that both CPUs are online before doing the broadcast call.
393 */
394 RTThreadPreemptDisable(&PreemptState);
395 if ( RTMpIsCpuOnline(idCpu1)
396 && RTMpIsCpuOnline(idCpu2))
397 {
398 /*
399 * Use the smp_call_function variant taking a cpu mask where available,
400 * falling back on broadcast with filter. Slight snag if one of the
401 * CPUs is the one we're running on, we must do the call and the post
402 * call wait ourselves.
403 */
404# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
405 /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
406 cpumask_var_t DstCpuMask;
407# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
408 cpumask_t DstCpuMask;
409# endif
410 RTCPUID idCpuSelf = RTMpCpuId();
411 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
412 RTMPARGS Args;
413 Args.pfnWorker = pfnWorker;
414 Args.pvUser1 = pvUser1;
415 Args.pvUser2 = pvUser2;
416 Args.idCpu = idCpu1;
417 Args.idCpu2 = idCpu2;
418 Args.cHits = 0;
419
420# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
421 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
422 return VERR_NO_MEMORY;
423 cpumask_set_cpu(idCpu1, DstCpuMask);
424 cpumask_set_cpu(idCpu2, DstCpuMask);
425# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
426 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
427 return VERR_NO_MEMORY;
428 cpumask_clear(DstCpuMask);
429 cpumask_set_cpu(idCpu1, DstCpuMask);
430 cpumask_set_cpu(idCpu2, DstCpuMask);
431# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
432 cpus_clear(DstCpuMask);
433 cpu_set(idCpu1, DstCpuMask);
434 cpu_set(idCpu2, DstCpuMask);
435# endif
436
437# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
438 smp_call_function_many(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
439 rc = 0;
440# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
441 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
442# else /* older kernels */
443 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
444# endif /* older kernels */
445 Assert(rc == 0);
446
447 /* Call ourselves if necessary and wait for the other party to be done. */
448 if (fCallSelf)
449 {
450 uint32_t cLoops = 0;
451 rtmpLinuxWrapper(&Args);
452 while (ASMAtomicReadU32(&Args.cHits) < 2)
453 {
454 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
455 break;
456 cLoops++;
457 ASMNopPause();
458 }
459 }
460
461 Assert(Args.cHits <= 2);
462 if (Args.cHits == 2)
463 rc = VINF_SUCCESS;
464 else if (Args.cHits == 1)
465 rc = VERR_NOT_ALL_CPUS_SHOWED;
466 else if (Args.cHits == 0)
467 rc = VERR_CPU_OFFLINE;
468 else
469 rc = VERR_CPU_IPE_1;
470
471# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
472 free_cpumask_var(DstCpuMask);
473# endif
474 }
475 /*
476 * A CPU must be present to be considered just offline.
477 */
478 else if ( RTMpIsCpuPresent(idCpu1)
479 && RTMpIsCpuPresent(idCpu2))
480 rc = VERR_CPU_OFFLINE;
481 else
482 rc = VERR_CPU_NOT_FOUND;
483 RTThreadPreemptRestore(&PreemptState);;
484 IPRT_LINUX_RESTORE_EFL_AC();
485 return rc;
486
487#else /* !CONFIG_SMP */
488 RT_NOREF(idCpu1, idCpu2, fFlags, pfnWorker, pvUser1, pvUser2);
489 return VERR_CPU_NOT_FOUND;
490#endif /* !CONFIG_SMP */
491}
492RT_EXPORT_SYMBOL(RTMpOnPair);
493
494
495RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
496{
497 return true;
498}
499RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
500
501
502#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
503/**
504 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
505 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
506 *
507 * @param pvInfo Pointer to the RTMPARGS package.
508 */
509static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
510{
511 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
512 RTCPUID idCpu = RTMpCpuId();
513
514 if (idCpu == pArgs->idCpu)
515 {
516 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
517 ASMAtomicIncU32(&pArgs->cHits);
518 }
519}
520#endif
521
522
523RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
524{
525 IPRT_LINUX_SAVE_EFL_AC();
526 int rc;
527 RTMPARGS Args;
528
529 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
530 Args.pfnWorker = pfnWorker;
531 Args.pvUser1 = pvUser1;
532 Args.pvUser2 = pvUser2;
533 Args.idCpu = idCpu;
534 Args.cHits = 0;
535
536 if (!RTMpIsCpuPossible(idCpu))
537 return VERR_CPU_NOT_FOUND;
538
539 RTThreadPreemptDisable(&PreemptState);
540 if (idCpu != RTMpCpuId())
541 {
542#ifdef CONFIG_SMP
543 if (RTMpIsCpuOnline(idCpu))
544 {
545# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
546 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
547# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
548 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
549# else /* older kernels */
550 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
551# endif /* older kernels */
552 Assert(rc == 0);
553 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
554 }
555 else
556#endif /* CONFIG_SMP */
557 rc = VERR_CPU_OFFLINE;
558 }
559 else
560 {
561 rtmpLinuxWrapper(&Args);
562 rc = VINF_SUCCESS;
563 }
564 RTThreadPreemptRestore(&PreemptState);;
565
566 NOREF(rc);
567 IPRT_LINUX_RESTORE_EFL_AC();
568 return rc;
569}
570RT_EXPORT_SYMBOL(RTMpOnSpecific);
571
572
573#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && defined(CONFIG_SMP)
574/**
575 * Dummy callback used by RTMpPokeCpu.
576 *
577 * @param pvInfo Ignored.
578 */
579static void rtmpLinuxPokeCpuCallback(void *pvInfo)
580{
581 NOREF(pvInfo);
582}
583#endif
584
585
586RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
587{
588#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
589 IPRT_LINUX_SAVE_EFL_AC();
590 int rc;
591 if (RTMpIsCpuPossible(idCpu))
592 {
593 if (RTMpIsCpuOnline(idCpu))
594 {
595# ifdef CONFIG_SMP
596# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
597 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
598# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
599 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
600# else /* older kernels */
601# error oops
602# endif /* older kernels */
603 Assert(rc == 0);
604# endif /* CONFIG_SMP */
605 rc = VINF_SUCCESS;
606 }
607 else
608 rc = VERR_CPU_OFFLINE;
609 }
610 else
611 rc = VERR_CPU_NOT_FOUND;
612 IPRT_LINUX_RESTORE_EFL_AC();
613 return rc;
614
615#else /* older kernels */
616 /* no unicast here? */
617 return VERR_NOT_SUPPORTED;
618#endif /* older kernels */
619}
620RT_EXPORT_SYMBOL(RTMpPokeCpu);
621
622
623RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
624{
625 return true;
626}
627RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
628
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette