VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/vbi/i86pc/os/vbi.c@ 25720

最後變更 在這個檔案從25720是 25183,由 vboxsync 提交於 15 年 前

r0drv/Solaris: fix warnings.

  • 屬性 svn:eol-style 設為 native
檔案大小: 24.4 KB
 
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Private interfaces for VirtualBox access to Solaris kernel internal
28 * facilities.
29 *
30 * See sys/vbi.h for what each function does.
31 */
32
33#include <sys/kmem.h>
34#include <sys/types.h>
35#include <sys/mman.h>
36#include <sys/thread.h>
37#include <sys/mutex.h>
38#include <sys/condvar.h>
39#include <sys/sdt.h>
40#include <sys/schedctl.h>
41#include <sys/time.h>
42#include <sys/sysmacros.h>
43#include <sys/cmn_err.h>
44#include <sys/vmsystm.h>
45#include <sys/cyclic.h>
46#include <sys/class.h>
47#include <sys/cpuvar.h>
48#include <sys/kobj.h>
49#include <sys/x_call.h>
50#include <sys/x86_archext.h>
51#include <vm/hat.h>
52#include <vm/seg_vn.h>
53#include <vm/seg_kmem.h>
54#include <sys/ddi.h>
55#include <sys/sunddi.h>
56#include <sys/modctl.h>
57#include <sys/machparam.h>
58#include <sys/utsname.h>
59
60#include "vbi.h"
61
62#define VBIPROC() ((proc_t *)vbi_proc())
63
64/*
65 * We have to use dl_lookup to find contig_free().
66 */
67extern void *contig_alloc(size_t, ddi_dma_attr_t *, uintptr_t, int);
68extern void contig_free(void *, size_t);
69#pragma weak contig_free
70static void (*p_contig_free)(void *, size_t) = contig_free;
71
72/*
73 * Workarounds for running on old versions of solaris with different cross call
74 * interfaces. If we find xc_init_cpu() in the kenel, then just use the defined
75 * interfaces for xc_call() from the include file where the xc_call()
76 * interfaces just takes a pointer to a ulong_t array. The array must be long
77 * enough to hold "ncpus" bits at runtime.
78
79 * The reason for the hacks is that using the type "cpuset_t" is pretty much
80 * impossible from code built outside the Solaris source repository that wants
81 * to run on multiple releases of Solaris.
82 *
83 * For old style xc_call()s, 32 bit solaris and older 64 bit versions use
84 * "ulong_t" as cpuset_t.
85 *
86 * Later versions of 64 bit Solaris used: struct {ulong_t words[x];}
87 * where "x" depends on NCPU.
88 *
89 * We detect the difference in 64 bit support by checking the kernel value of
90 * max_cpuid, which always holds the compiled value of NCPU - 1.
91 *
92 * If Solaris increases NCPU to more than 256, this module will continue
93 * to work on all versions of Solaris as long as the number of installed
94 * CPUs in the machine is <= VBI_NCPU. If VBI_NCPU is increased, this code
95 * has to be re-written some to provide compatibility with older Solaris which
96 * expects cpuset_t to be based on NCPU==256 -- or we discontinue support
97 * of old Nevada/S10.
98 */
99static int use_old = 0;
100static int use_old_with_ulong = 0;
101static void (*p_xc_call)() = (void (*)())xc_call;
102
103#define VBI_NCPU 256
104#define VBI_SET_WORDS (VBI_NCPU / (sizeof (ulong_t) * 8))
105typedef struct vbi_cpuset {
106 ulong_t words[VBI_SET_WORDS];
107} vbi_cpuset_t;
108#define X_CALL_HIPRI (2) /* for old Solaris interface */
109
110/*
111 * module linkage stuff
112 */
113#if 0
114static struct modlmisc vbi_modlmisc = {
115 &mod_miscops, "VirtualBox Interfaces V6"
116};
117
118static struct modlinkage vbi_modlinkage = {
119 MODREV_1, { (void *)&vbi_modlmisc, NULL }
120};
121#endif
122
123extern uintptr_t kernelbase;
124#define IS_KERNEL(v) ((uintptr_t)(v) >= kernelbase)
125
126static int vbi_verbose = 0;
127
128#define VBI_VERBOSE(msg) {if (vbi_verbose) cmn_err(CE_WARN, msg);}
129
130/* Introduced in v6 */
131static int vbi_is_nevada = 0;
132
133#ifdef _LP64
134/* 64-bit Solaris 10 offsets */
135/* CPU */
136static int off_s10_cpu_runrun = 232;
137static int off_s10_cpu_kprunrun = 233;
138/* kthread_t */
139static int off_s10_t_preempt = 42;
140
141/* 64-bit Solaris 11 (Nevada/OpenSolaris) offsets */
142/* CPU */
143static int off_s11_cpu_runrun = 216;
144static int off_s11_cpu_kprunrun = 217;
145/* kthread_t */
146static int off_s11_t_preempt = 42;
147#else
148/* 32-bit Solaris 10 offsets */
149/* CPU */
150static int off_s10_cpu_runrun = 124;
151static int off_s10_cpu_kprunrun = 125;
152/* kthread_t */
153static int off_s10_t_preempt = 26;
154
155/* 32-bit Solaris 11 (Nevada/OpenSolaris) offsets */
156/* CPU */
157static int off_s11_cpu_runrun = 112;
158static int off_s11_cpu_kprunrun = 113;
159/* kthread_t */
160static int off_s11_t_preempt = 26;
161#endif
162
163
164/* Which offsets will be used */
165static int off_cpu_runrun = -1;
166static int off_cpu_kprunrun = -1;
167static int off_t_preempt = -1;
168
169#define VBI_T_PREEMPT (*((char *)curthread + off_t_preempt))
170#define VBI_CPU_KPRUNRUN (*((char *)CPU + off_cpu_kprunrun))
171#define VBI_CPU_RUNRUN (*((char *)CPU + off_cpu_runrun))
172
173#undef kpreempt_disable
174#undef kpreempt_enable
175
176#define VBI_PREEMPT_DISABLE() \
177 { \
178 VBI_T_PREEMPT++; \
179 ASSERT(VBI_T_PREEMPT >= 1); \
180 }
181#define VBI_PREEMPT_ENABLE() \
182 { \
183 ASSERT(VBI_T_PREEMPT >= 1); \
184 if (--VBI_T_PREEMPT == 0 && \
185 VBI_CPU_RUNRUN) \
186 kpreempt(KPREEMPT_SYNC); \
187 }
188
189/* End of v6 intro */
190
191#if 0
192int
193_init(void)
194{
195 int err = vbi_init();
196 if (!err)
197 err = mod_install(&vbi_modlinkage);
198 return (err);
199}
200#endif
201
202int
203vbi_init(void)
204{
205 /*
206 * Check to see if this version of virtualbox interface module will work
207 * with the kernel.
208 */
209 if (kobj_getsymvalue("xc_init_cpu", 1) != NULL) {
210 /*
211 * Our bit vector storage needs to be large enough for the
212 * actual number of CPUs running in the sytem.
213 */
214 if (ncpus > VBI_NCPU)
215 return (EINVAL);
216 } else {
217 use_old = 1;
218 if (max_cpuid + 1 == sizeof(ulong_t) * 8)
219 use_old_with_ulong = 1;
220 else if (max_cpuid + 1 != VBI_NCPU)
221 return (EINVAL); /* cpuset_t size mismatch */
222 }
223
224 /*
225 * In older versions of Solaris contig_free() is a static routine.
226 */
227 if (p_contig_free == NULL) {
228 p_contig_free = (void (*)(void *, size_t))
229 kobj_getsymvalue("contig_free", 1);
230 if (p_contig_free == NULL) {
231 cmn_err(CE_NOTE, " contig_free() not found in kernel");
232 return (EINVAL);
233 }
234 }
235
236 /*
237 * Check if this is S10 or Nevada
238 */
239 if (!strncmp(utsname.release, "5.11", sizeof("5.11") - 1))
240 {
241 /* Nevada detected... */
242 vbi_is_nevada = 1;
243
244 off_cpu_runrun = off_s11_cpu_runrun;
245 off_cpu_kprunrun = off_s11_cpu_kprunrun;
246 off_t_preempt = off_s11_t_preempt;
247 }
248 else
249 {
250 /* Solaris 10 detected... */
251 vbi_is_nevada = 0;
252
253 off_cpu_runrun = off_s10_cpu_runrun;
254 off_cpu_kprunrun = off_s10_cpu_kprunrun;
255 off_t_preempt = off_s10_t_preempt;
256 }
257
258 /*
259 * Sanity checking...
260 */
261 /* CPU */
262 char crr = VBI_CPU_RUNRUN;
263 char krr = VBI_CPU_KPRUNRUN;
264 if ( (crr < 0 || crr > 1)
265 || (krr < 0 || krr > 1))
266 {
267 cmn_err(CE_NOTE, ":CPU structure sanity check failed! OS version mismatch.\n");
268 return EINVAL;
269 }
270
271 /* Thread */
272 char t_preempt = VBI_T_PREEMPT;
273 if (t_preempt < 0 || t_preempt > 32)
274 {
275 cmn_err(CE_NOTE, ":Thread structure sanity check failed! OS version mismatch.\n");
276 return EINVAL;
277 }
278 return (0);
279}
280
281#if 0
282int
283_fini(void)
284{
285 int err = mod_remove(&vbi_modlinkage);
286 if (err != 0)
287 return (err);
288
289 return (0);
290}
291
292int
293_info(struct modinfo *modinfop)
294{
295 return (mod_info(&vbi_modlinkage, modinfop));
296}
297#endif
298
299
300static ddi_dma_attr_t base_attr = {
301 DMA_ATTR_V0, /* Version Number */
302 (uint64_t)0, /* lower limit */
303 (uint64_t)0, /* high limit */
304 (uint64_t)0xffffffff, /* counter limit */
305 (uint64_t)PAGESIZE, /* pagesize alignment */
306 (uint64_t)PAGESIZE, /* pagesize burst size */
307 (uint64_t)PAGESIZE, /* pagesize effective DMA size */
308 (uint64_t)0xffffffff, /* max DMA xfer size */
309 (uint64_t)0xffffffff, /* segment boundary */
310 1, /* list length (1 for contiguous) */
311 1, /* device granularity */
312 0 /* bus-specific flags */
313};
314
315static void *
316vbi_internal_alloc(uint64_t *phys, size_t size, int contig)
317{
318 ddi_dma_attr_t attr;
319 pfn_t pfn;
320 void *ptr;
321 uint_t npages;
322
323 if ((size & PAGEOFFSET) != 0)
324 return (NULL);
325 npages = size >> PAGESHIFT;
326 if (npages == 0)
327 return (NULL);
328
329 attr = base_attr;
330 attr.dma_attr_addr_hi = *phys;
331 if (!contig)
332 attr.dma_attr_sgllen = npages;
333 ptr = contig_alloc(size, &attr, PAGESIZE, 1);
334
335 if (ptr == NULL) {
336 VBI_VERBOSE("vbi_internal_alloc() failure");
337 return (NULL);
338 }
339
340 pfn = hat_getpfnum(kas.a_hat, (caddr_t)ptr);
341 if (pfn == PFN_INVALID)
342 panic("vbi_contig_alloc(): hat_getpfnum() failed\n");
343 *phys = (uint64_t)pfn << PAGESHIFT;
344 return (ptr);
345}
346
347void *
348vbi_contig_alloc(uint64_t *phys, size_t size)
349{
350 return (vbi_internal_alloc(phys, size, 1));
351}
352
353void
354vbi_contig_free(void *va, size_t size)
355{
356 p_contig_free(va, size);
357}
358
359void *
360vbi_kernel_map(uint64_t pa, size_t size, uint_t prot)
361{
362 caddr_t va;
363
364 if ((pa & PAGEOFFSET) || (size & PAGEOFFSET)) {
365 VBI_VERBOSE("vbi_kernel_map() bad pa or size");
366 return (NULL);
367 }
368
369 va = vmem_alloc(heap_arena, size, VM_SLEEP);
370
371 hat_devload(kas.a_hat, va, size, (pfn_t)(pa >> PAGESHIFT),
372 prot, HAT_LOAD | HAT_LOAD_LOCK | HAT_UNORDERED_OK);
373
374 return (va);
375}
376
377void
378vbi_unmap(void *va, size_t size)
379{
380 if (IS_KERNEL(va)) {
381 hat_unload(kas.a_hat, va, size, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
382 vmem_free(heap_arena, va, size);
383 } else {
384 struct as *as = VBIPROC()->p_as;
385
386 as_rangelock(as);
387 (void) as_unmap(as, va, size);
388 as_rangeunlock(as);
389 }
390}
391
392void *
393vbi_curthread(void)
394{
395 return (curthread);
396}
397
398int
399vbi_yield(void)
400{
401 int rv = 0;
402
403 vbi_preempt_disable();
404
405 char tpr = VBI_T_PREEMPT;
406 char kpr = VBI_CPU_KPRUNRUN;
407 if (tpr == 1 && kpr)
408 rv = 1;
409
410 vbi_preempt_enable();
411 return (rv);
412}
413
414uint64_t
415vbi_timer_granularity(void)
416{
417 return (nsec_per_tick);
418}
419
420typedef struct vbi_timer {
421 cyc_handler_t vbi_handler;
422 cyclic_id_t vbi_cyclic;
423 uint64_t vbi_interval;
424 void (*vbi_func)();
425 void *vbi_arg1;
426 void *vbi_arg2;
427} vbi_timer_t;
428
429static void
430vbi_timer_callback(void *arg)
431{
432 vbi_timer_t *t = arg;
433
434 if (t->vbi_interval == 0)
435 vbi_timer_stop(arg);
436 t->vbi_func(t->vbi_arg1, t->vbi_arg2);
437}
438
439void *
440vbi_timer_create(void *callback, void *arg1, void *arg2, uint64_t interval)
441{
442 vbi_timer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
443
444 t->vbi_func = (void (*)())callback;
445 t->vbi_arg1 = arg1;
446 t->vbi_arg2 = arg2;
447 t->vbi_handler.cyh_func = vbi_timer_callback;
448 t->vbi_handler.cyh_arg = (void *)t;
449 t->vbi_handler.cyh_level = CY_LOCK_LEVEL;
450 t->vbi_cyclic = CYCLIC_NONE;
451 t->vbi_interval = interval;
452 return (t);
453}
454
455void
456vbi_timer_destroy(void *timer)
457{
458 vbi_timer_t *t = timer;
459 if (t != NULL) {
460 vbi_timer_stop(timer);
461 kmem_free(t, sizeof (*t));
462 }
463}
464
465void
466vbi_timer_start(void *timer, uint64_t when)
467{
468 vbi_timer_t *t = timer;
469 cyc_time_t fire_time;
470 uint64_t interval = t->vbi_interval;
471
472 mutex_enter(&cpu_lock);
473 when += gethrtime();
474 fire_time.cyt_when = when;
475 if (interval == 0)
476 fire_time.cyt_interval = when;
477 else
478 fire_time.cyt_interval = interval;
479 t->vbi_cyclic = cyclic_add(&t->vbi_handler, &fire_time);
480 mutex_exit(&cpu_lock);
481}
482
483void
484vbi_timer_stop(void *timer)
485{
486 vbi_timer_t *t = timer;
487
488 if (t->vbi_cyclic == CYCLIC_NONE)
489 return;
490 mutex_enter(&cpu_lock);
491 if (t->vbi_cyclic != CYCLIC_NONE) {
492 cyclic_remove(t->vbi_cyclic);
493 t->vbi_cyclic = CYCLIC_NONE;
494 }
495 mutex_exit(&cpu_lock);
496}
497
498uint64_t
499vbi_tod(void)
500{
501 timestruc_t ts;
502
503 mutex_enter(&tod_lock);
504 ts = tod_get();
505 mutex_exit(&tod_lock);
506 return ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec);
507}
508
509
510void *
511vbi_proc(void)
512{
513 proc_t *p;
514 drv_getparm(UPROCP, &p);
515 return (p);
516}
517
518void
519vbi_set_priority(void *thread, int priority)
520{
521 kthread_t *t = thread;
522
523 thread_lock(t);
524 (void) thread_change_pri(t, priority, 0);
525 thread_unlock(t);
526}
527
528void *
529vbi_thread_create(void *func, void *arg, size_t len, int priority)
530{
531 kthread_t *t;
532
533 t = thread_create(NULL, NULL, (void (*)())func, arg, len,
534 VBIPROC(), TS_RUN, priority);
535 return (t);
536}
537
538void
539vbi_thread_exit(void)
540{
541 thread_exit();
542}
543
544void *
545vbi_text_alloc(size_t size)
546{
547 return (segkmem_alloc(heaptext_arena, size, KM_SLEEP));
548}
549
550void
551vbi_text_free(void *va, size_t size)
552{
553 segkmem_free(heaptext_arena, va, size);
554}
555
556int
557vbi_cpu_id(void)
558{
559 return (CPU->cpu_id);
560}
561
562int
563vbi_max_cpu_id(void)
564{
565 return (max_cpuid);
566}
567
568int
569vbi_cpu_maxcount(void)
570{
571 return (max_cpuid + 1);
572}
573
574int
575vbi_cpu_count(void)
576{
577 return (ncpus);
578}
579
580int
581vbi_cpu_online(int c)
582{
583 int x;
584
585 mutex_enter(&cpu_lock);
586 x = cpu_is_online(cpu[c]);
587 mutex_exit(&cpu_lock);
588 return (x);
589}
590
591void
592vbi_preempt_disable(void)
593{
594 VBI_PREEMPT_DISABLE();
595}
596
597void
598vbi_preempt_enable(void)
599{
600 VBI_PREEMPT_ENABLE();
601}
602
603void
604vbi_execute_on_all(void *func, void *arg)
605{
606 vbi_cpuset_t set;
607 int i;
608
609 for (i = 0; i < VBI_SET_WORDS; ++i)
610 set.words[i] = (ulong_t)-1L;
611 if (use_old) {
612 if (use_old_with_ulong) {
613 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
614 set.words[0], (xc_func_t)func);
615 } else {
616 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
617 set, (xc_func_t)func);
618 }
619 } else {
620 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
621 }
622}
623
624void
625vbi_execute_on_others(void *func, void *arg)
626{
627 vbi_cpuset_t set;
628 int i;
629
630 for (i = 0; i < VBI_SET_WORDS; ++i)
631 set.words[i] = (ulong_t)-1L;
632 BT_CLEAR(set.words, vbi_cpu_id());
633 if (use_old) {
634 if (use_old_with_ulong) {
635 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
636 set.words[0], (xc_func_t)func);
637 } else {
638 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
639 set, (xc_func_t)func);
640 }
641 } else {
642 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
643 }
644}
645
646void
647vbi_execute_on_one(void *func, void *arg, int c)
648{
649 vbi_cpuset_t set;
650 int i;
651
652 for (i = 0; i < VBI_SET_WORDS; ++i)
653 set.words[i] = 0;
654 BT_SET(set.words, c);
655 if (use_old) {
656 if (use_old_with_ulong) {
657 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
658 set.words[0], (xc_func_t)func);
659 } else {
660 p_xc_call((xc_arg_t)arg, 0, 0, X_CALL_HIPRI,
661 set, (xc_func_t)func);
662 }
663 } else {
664 xc_call((xc_arg_t)arg, 0, 0, &set.words[0], (xc_func_t)func);
665 }
666}
667
668int
669vbi_lock_va(void *addr, size_t len, int access, void **handle)
670{
671 faultcode_t err;
672
673 /*
674 * kernel mappings on x86 are always locked, so only handle user.
675 */
676 *handle = NULL;
677 if (!IS_KERNEL(addr)) {
678 err = as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
679 (caddr_t)addr, len, F_SOFTLOCK, access);
680 if (err != 0) {
681 VBI_VERBOSE("vbi_lock_va() failed to lock");
682 return (-1);
683 }
684 }
685 return (0);
686}
687
688/*ARGSUSED*/
689void
690vbi_unlock_va(void *addr, size_t len, int access, void *handle)
691{
692 if (!IS_KERNEL(addr))
693 as_fault(VBIPROC()->p_as->a_hat, VBIPROC()->p_as,
694 (caddr_t)addr, len, F_SOFTUNLOCK, access);
695}
696
697uint64_t
698vbi_va_to_pa(void *addr)
699{
700 struct hat *hat;
701 pfn_t pfn;
702 uintptr_t v = (uintptr_t)addr;
703
704 if (IS_KERNEL(v))
705 hat = kas.a_hat;
706 else
707 hat = VBIPROC()->p_as->a_hat;
708 pfn = hat_getpfnum(hat, (caddr_t)(v & PAGEMASK));
709 if (pfn == PFN_INVALID)
710 return (-(uint64_t)1);
711 return (((uint64_t)pfn << PAGESHIFT) | (v & PAGEOFFSET));
712}
713
714
715struct segvbi_crargs {
716 uint64_t *palist;
717 uint_t prot;
718};
719
720struct segvbi_data {
721 uint_t prot;
722};
723
724static struct seg_ops segvbi_ops;
725
726static int
727segvbi_create(struct seg *seg, void *args)
728{
729 struct segvbi_crargs *a = args;
730 struct segvbi_data *data;
731 struct as *as = seg->s_as;
732 int error = 0;
733 caddr_t va;
734 ulong_t pgcnt;
735 ulong_t p;
736
737 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
738 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
739 data->prot = a->prot | PROT_USER;
740
741 seg->s_ops = &segvbi_ops;
742 seg->s_data = data;
743
744 /*
745 * now load locked mappings to the pages
746 */
747 va = seg->s_base;
748 pgcnt = seg->s_size >> PAGESHIFT;
749 for (p = 0; p < pgcnt; ++p, va += PAGESIZE) {
750 hat_devload(as->a_hat, va,
751 PAGESIZE, a->palist[p] >> PAGESHIFT,
752 data->prot | HAT_UNORDERED_OK, HAT_LOAD | HAT_LOAD_LOCK);
753 }
754
755 return (error);
756}
757
758/*
759 * Duplicate a seg and return new segment in newseg.
760 */
761static int
762segvbi_dup(struct seg *seg, struct seg *newseg)
763{
764 struct segvbi_data *data = seg->s_data;
765 struct segvbi_data *ndata;
766
767 ndata = kmem_zalloc(sizeof (*data), KM_SLEEP);
768 ndata->prot = data->prot;
769 newseg->s_ops = &segvbi_ops;
770 newseg->s_data = ndata;
771
772 return (0);
773}
774
775static int
776segvbi_unmap(struct seg *seg, caddr_t addr, size_t len)
777{
778 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
779 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
780 panic("segvbi_unmap");
781
782 if (addr != seg->s_base || len != seg->s_size)
783 return (ENOTSUP);
784
785 hat_unload(seg->s_as->a_hat, addr, len,
786 HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
787
788 seg_free(seg);
789 return (0);
790}
791
792static void
793segvbi_free(struct seg *seg)
794{
795 struct segvbi_data *data = seg->s_data;
796 kmem_free(data, sizeof (*data));
797}
798
799/*
800 * We never demand-fault for seg_vbi.
801 */
802static int
803segvbi_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
804 enum fault_type type, enum seg_rw rw)
805{
806 return (FC_MAKE_ERR(EFAULT));
807}
808
809static int
810segvbi_faulta(struct seg *seg, caddr_t addr)
811{
812 return (0);
813}
814
815static int
816segvbi_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
817{
818 return (EACCES);
819}
820
821static int
822segvbi_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
823{
824 return (EINVAL);
825}
826
827static int
828segvbi_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
829{
830 return (-1);
831}
832
833static int
834segvbi_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
835{
836 return (0);
837}
838
839static size_t
840segvbi_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
841{
842 size_t v;
843
844 for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
845 len -= PAGESIZE, v += PAGESIZE)
846 *vec++ = 1;
847 return (v);
848}
849
850static int
851segvbi_lockop(struct seg *seg, caddr_t addr,
852 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
853{
854 return (0);
855}
856
857static int
858segvbi_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
859{
860 struct segvbi_data *data = seg->s_data;
861 return (data->prot);
862}
863
864static u_offset_t
865segvbi_getoffset(struct seg *seg, caddr_t addr)
866{
867 return ((uintptr_t)addr - (uintptr_t)seg->s_base);
868}
869
870static int
871segvbi_gettype(struct seg *seg, caddr_t addr)
872{
873 return (MAP_SHARED);
874}
875
876static vnode_t vbivp;
877
878static int
879segvbi_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
880{
881 *vpp = &vbivp;
882 return (0);
883}
884
885static int
886segvbi_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
887{
888 return (0);
889}
890
891static void
892segvbi_dump(struct seg *seg)
893{}
894
895static int
896segvbi_pagelock(struct seg *seg, caddr_t addr, size_t len,
897 struct page ***ppp, enum lock_type type, enum seg_rw rw)
898{
899 return (ENOTSUP);
900}
901
902static int
903segvbi_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
904{
905 return (ENOTSUP);
906}
907
908static int
909segvbi_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
910{
911 return (ENODEV);
912}
913
914static lgrp_mem_policy_info_t *
915segvbi_getpolicy(struct seg *seg, caddr_t addr)
916{
917 return (NULL);
918}
919
920static int
921segvbi_capable(struct seg *seg, segcapability_t capability)
922{
923 return (0);
924}
925
926static struct seg_ops segvbi_ops = {
927 segvbi_dup,
928 segvbi_unmap,
929 segvbi_free,
930 segvbi_fault,
931 segvbi_faulta,
932 segvbi_setprot,
933 segvbi_checkprot,
934 (int (*)())segvbi_kluster,
935 (size_t (*)(struct seg *))NULL, /* swapout */
936 segvbi_sync,
937 segvbi_incore,
938 segvbi_lockop,
939 segvbi_getprot,
940 segvbi_getoffset,
941 segvbi_gettype,
942 segvbi_getvp,
943 segvbi_advise,
944 segvbi_dump,
945 segvbi_pagelock,
946 segvbi_setpagesize,
947 segvbi_getmemid,
948 segvbi_getpolicy,
949 segvbi_capable
950};
951
952
953
954/*
955 * Interfaces to inject physical pages into user address space
956 * and later remove them.
957 */
958int
959vbi_user_map(caddr_t *va, uint_t prot, uint64_t *palist, size_t len)
960{
961 struct as *as = VBIPROC()->p_as;
962 struct segvbi_crargs args;
963 int error = 0;
964
965 args.palist = palist;
966 args.prot = prot;
967 as_rangelock(as);
968 map_addr(va, len, 0, 0, MAP_SHARED);
969 if (*va != NULL)
970 error = as_map(as, *va, len, segvbi_create, &args);
971 else
972 error = ENOMEM;
973 if (error)
974 VBI_VERBOSE("vbi_user_map() failed");
975 as_rangeunlock(as);
976 return (error);
977}
978
979
980/*
981 * This is revision 2 of the interface.
982 */
983
984struct vbi_cpu_watch {
985 void (*vbi_cpu_func)();
986 void *vbi_cpu_arg;
987};
988
989static int
990vbi_watcher(cpu_setup_t state, int icpu, void *arg)
991{
992 vbi_cpu_watch_t *w = arg;
993 int online;
994
995 if (state == CPU_ON)
996 online = 1;
997 else if (state == CPU_OFF)
998 online = 0;
999 else
1000 return (0);
1001 w->vbi_cpu_func(w->vbi_cpu_arg, icpu, online);
1002 return (0);
1003}
1004
1005vbi_cpu_watch_t *
1006vbi_watch_cpus(void (*func)(), void *arg, int current_too)
1007{
1008 int c;
1009 vbi_cpu_watch_t *w;
1010
1011 w = kmem_alloc(sizeof (*w), KM_SLEEP);
1012 w->vbi_cpu_func = func;
1013 w->vbi_cpu_arg = arg;
1014 mutex_enter(&cpu_lock);
1015 register_cpu_setup_func(vbi_watcher, w);
1016 if (current_too) {
1017 for (c = 0; c < ncpus; ++c) {
1018 if (cpu_is_online(cpu[c]))
1019 func(arg, c, 1);
1020 }
1021 }
1022 mutex_exit(&cpu_lock);
1023 return (w);
1024}
1025
1026void
1027vbi_ignore_cpus(vbi_cpu_watch_t *w)
1028{
1029 mutex_enter(&cpu_lock);
1030 unregister_cpu_setup_func(vbi_watcher, w);
1031 mutex_exit(&cpu_lock);
1032 kmem_free(w, sizeof (*w));
1033}
1034
1035/*
1036 * Simple timers are pretty much a pass through to the cyclic subsystem.
1037 */
1038struct vbi_stimer {
1039 cyc_handler_t s_handler;
1040 cyc_time_t s_fire_time;
1041 cyclic_id_t s_cyclic;
1042 uint64_t s_tick;
1043 void (*s_func)(void *, uint64_t);
1044 void *s_arg;
1045};
1046
1047static void
1048vbi_stimer_func(void *arg)
1049{
1050 vbi_stimer_t *t = arg;
1051 t->s_func(t->s_arg, ++t->s_tick);
1052}
1053
1054extern vbi_stimer_t *
1055vbi_stimer_begin(
1056 void (*func)(void *, uint64_t),
1057 void *arg,
1058 uint64_t when,
1059 uint64_t interval,
1060 int on_cpu)
1061{
1062 vbi_stimer_t *t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1063
1064 t->s_handler.cyh_func = vbi_stimer_func;
1065 t->s_handler.cyh_arg = t;
1066 t->s_handler.cyh_level = CY_LOCK_LEVEL;
1067 t->s_tick = 0;
1068 t->s_func = func;
1069 t->s_arg = arg;
1070
1071 mutex_enter(&cpu_lock);
1072 if (on_cpu != VBI_ANY_CPU && !cpu_is_online(cpu[on_cpu])) {
1073 t = NULL;
1074 goto done;
1075 }
1076
1077 when += gethrtime();
1078 t->s_fire_time.cyt_when = when;
1079 if (interval == 0)
1080 t->s_fire_time.cyt_interval = INT64_MAX - when;
1081 else
1082 t->s_fire_time.cyt_interval = interval;
1083 t->s_cyclic = cyclic_add(&t->s_handler, &t->s_fire_time);
1084 if (on_cpu != VBI_ANY_CPU)
1085 cyclic_bind(t->s_cyclic, cpu[on_cpu], NULL);
1086done:
1087 mutex_exit(&cpu_lock);
1088 return (t);
1089}
1090
1091extern void
1092vbi_stimer_end(vbi_stimer_t *t)
1093{
1094 mutex_enter(&cpu_lock);
1095 cyclic_remove(t->s_cyclic);
1096 mutex_exit(&cpu_lock);
1097 kmem_free(t, sizeof (*t));
1098}
1099
1100/*
1101 * Global timers are more complicated. They include a counter on the callback,
1102 * that indicates the first call on a given cpu.
1103 */
1104struct vbi_gtimer {
1105 uint64_t *g_counters;
1106 void (*g_func)(void *, uint64_t);
1107 void *g_arg;
1108 uint64_t g_when;
1109 uint64_t g_interval;
1110 cyclic_id_t g_cyclic;
1111};
1112
1113static void
1114vbi_gtimer_func(void *arg)
1115{
1116 vbi_gtimer_t *t = arg;
1117 t->g_func(t->g_arg, ++t->g_counters[vbi_cpu_id()]);
1118}
1119
1120/*
1121 * Whenever a cpu is onlined, need to reset the g_counters[] for it to zero.
1122 */
1123static void
1124vbi_gtimer_online(void *arg, cpu_t *pcpu, cyc_handler_t *h, cyc_time_t *ct)
1125{
1126 vbi_gtimer_t *t = arg;
1127 hrtime_t now;
1128
1129 t->g_counters[pcpu->cpu_id] = 0;
1130 h->cyh_func = vbi_gtimer_func;
1131 h->cyh_arg = t;
1132 h->cyh_level = CY_LOCK_LEVEL;
1133 now = gethrtime();
1134 if (t->g_when < now)
1135 ct->cyt_when = now + t->g_interval / 2;
1136 else
1137 ct->cyt_when = t->g_when;
1138 ct->cyt_interval = t->g_interval;
1139}
1140
1141
1142vbi_gtimer_t *
1143vbi_gtimer_begin(
1144 void (*func)(void *, uint64_t),
1145 void *arg,
1146 uint64_t when,
1147 uint64_t interval)
1148{
1149 vbi_gtimer_t *t;
1150 cyc_omni_handler_t omni;
1151
1152 /*
1153 * one shot global timer is not supported yet.
1154 */
1155 if (interval == 0)
1156 return (NULL);
1157
1158 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
1159 t->g_counters = kmem_zalloc(ncpus * sizeof (uint64_t), KM_SLEEP);
1160 t->g_when = when + gethrtime();
1161 t->g_interval = interval;
1162 t->g_arg = arg;
1163 t->g_func = func;
1164 t->g_cyclic = CYCLIC_NONE;
1165
1166 omni.cyo_online = (void (*)())vbi_gtimer_online;
1167 omni.cyo_offline = NULL;
1168 omni.cyo_arg = t;
1169
1170 mutex_enter(&cpu_lock);
1171 t->g_cyclic = cyclic_add_omni(&omni);
1172 mutex_exit(&cpu_lock);
1173 return (t);
1174}
1175
1176extern void
1177vbi_gtimer_end(vbi_gtimer_t *t)
1178{
1179 mutex_enter(&cpu_lock);
1180 cyclic_remove(t->g_cyclic);
1181 mutex_exit(&cpu_lock);
1182 kmem_free(t->g_counters, ncpus * sizeof (uint64_t));
1183 kmem_free(t, sizeof (*t));
1184}
1185
1186int
1187vbi_is_preempt_enabled(void)
1188{
1189 char tpr = VBI_T_PREEMPT;
1190 return (tpr == 0);
1191}
1192
1193void
1194vbi_poke_cpu(int c)
1195{
1196 if (c < ncpus)
1197 poke_cpu(c);
1198}
1199
1200/*
1201 * This is revision 5 of the interface. As more functions are added,
1202 * they should go after this point in the file and the revision level
1203 * increased. Also change vbi_modlmisc at the top of the file.
1204 */
1205uint_t vbi_revision_level = 6;
1206
1207void *
1208vbi_lowmem_alloc(uint64_t phys, size_t size)
1209{
1210 return (vbi_internal_alloc(&phys, size, 0));
1211}
1212
1213void
1214vbi_lowmem_free(void *va, size_t size)
1215{
1216 p_contig_free(va, size);
1217}
1218
1219/*
1220 * This is revision 6 of the interface.
1221 */
1222
1223int
1224vbi_is_preempt_pending(void)
1225{
1226 char crr = VBI_CPU_RUNRUN;
1227 char krr = VBI_CPU_KPRUNRUN;
1228 return crr != 0 || krr != 0;
1229}
1230
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette