VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/netbsd/memobj-r0drv-netbsd.c@ 63376

最後變更 在這個檔案從63376是 63191,由 vboxsync 提交於 9 年 前

r0drv/netbsd: initial r0drv support for NetBSD.
From Haomai Wang GSoC project with additional changes by Arto Huusko.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 17.1 KB
 
1/* $Id: memobj-r0drv-netbsd.c 63191 2016-08-09 03:01:52Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NetBSD.
4 */
5
6/*********************************************************************************************************************************
7* Header Files *
8*********************************************************************************************************************************/
9#include "the-netbsd-kernel.h"
10
11#include <iprt/memobj.h>
12#include <iprt/mem.h>
13#include <iprt/err.h>
14#include <iprt/assert.h>
15#include <iprt/log.h>
16#include <iprt/param.h>
17#include <iprt/process.h>
18#include "internal/memobj.h"
19
20
21/*********************************************************************************************************************************
22* Structures and Typedefs *
23*********************************************************************************************************************************/
24/**
25 * The NetBSD version of the memory object structure.
26 */
27typedef struct RTR0MEMOBJNETBSD
28{
29 /** The core structure. */
30 RTR0MEMOBJINTERNAL Core;
31 size_t size;
32 struct pglist pglist;
33} RTR0MEMOBJNETBSD, *PRTR0MEMOBJNETBSD;
34
35
36typedef struct vm_map* vm_map_t;
37
38/**
39 * Gets the virtual memory map the specified object is mapped into.
40 *
41 * @returns VM map handle on success, NULL if no map.
42 * @param pMem The memory object.
43 */
44static vm_map_t rtR0MemObjNetBSDGetMap(PRTR0MEMOBJINTERNAL pMem)
45{
46 switch (pMem->enmType)
47 {
48 case RTR0MEMOBJTYPE_PAGE:
49 case RTR0MEMOBJTYPE_LOW:
50 case RTR0MEMOBJTYPE_CONT:
51 return kernel_map;
52
53 case RTR0MEMOBJTYPE_PHYS:
54 case RTR0MEMOBJTYPE_PHYS_NC:
55 return NULL; /* pretend these have no mapping atm. */
56
57 case RTR0MEMOBJTYPE_LOCK:
58 return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
59 ? kernel_map
60 : &((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map;
61
62 case RTR0MEMOBJTYPE_RES_VIRT:
63 return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
64 ? kernel_map
65 : &((struct proc *)pMem->u.ResVirt.R0Process)->p_vmspace->vm_map;
66
67 case RTR0MEMOBJTYPE_MAPPING:
68 return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
69 ? kernel_map
70 : &((struct proc *)pMem->u.Mapping.R0Process)->p_vmspace->vm_map;
71
72 default:
73 return NULL;
74 }
75}
76
77
78DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
79{
80 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
81 int rc;
82
83 switch (pMemNetBSD->Core.enmType)
84 {
85 case RTR0MEMOBJTYPE_PAGE:
86 {
87 kmem_free(pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
88 break;
89 }
90 case RTR0MEMOBJTYPE_LOW:
91 case RTR0MEMOBJTYPE_CONT:
92 {
93 /* Unmap */
94 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
95 /* Free the virtual space */
96 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
97 /* Free the physical pages */
98 uvm_pglistfree(&pMemNetBSD->pglist);
99 break;
100 }
101 case RTR0MEMOBJTYPE_PHYS:
102 case RTR0MEMOBJTYPE_PHYS_NC:
103 {
104 /* Free the physical pages */
105 uvm_pglistfree(&pMemNetBSD->pglist);
106 break;
107 }
108 case RTR0MEMOBJTYPE_LOCK:
109 if (pMemNetBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
110 {
111 uvm_map_pageable(
112 &((struct proc *)pMemNetBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map,
113 (vaddr_t)pMemNetBSD->Core.pv,
114 ((vaddr_t)pMemNetBSD->Core.pv) + pMemNetBSD->Core.cb,
115 1, 0);
116 }
117 break;
118 case RTR0MEMOBJTYPE_RES_VIRT:
119 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
120 {
121 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
122 }
123 break;
124 case RTR0MEMOBJTYPE_MAPPING:
125 if (pMemNetBSD->Core.u.Lock.R0Process == NIL_RTR0PROCESS)
126 {
127 pmap_kremove((vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb);
128 uvm_km_free(kernel_map, (vaddr_t)pMemNetBSD->Core.pv, pMemNetBSD->Core.cb, UVM_KMF_VAONLY);
129 }
130 break;
131
132 default:
133 AssertMsgFailed(("enmType=%d\n", pMemNetBSD->Core.enmType));
134 return VERR_INTERNAL_ERROR;
135 }
136
137 return VINF_SUCCESS;
138}
139
140static int rtR0MemObjNetBSDAllocHelper(PRTR0MEMOBJNETBSD pMemNetBSD, size_t cb, bool fExecutable,
141 paddr_t VmPhysAddrHigh, bool fContiguous)
142{
143 /* Virtual space first */
144 vaddr_t virt = uvm_km_alloc(kernel_map, cb, 0,
145 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
146 if (virt == 0)
147 return VERR_NO_MEMORY;
148
149 struct pglist *rlist = &pMemNetBSD->pglist;
150
151 int nsegs = fContiguous ? 1 : INT_MAX;
152
153 /* Physical pages */
154 if (uvm_pglistalloc(cb, 0, VmPhysAddrHigh,
155 PAGE_SIZE, 0, rlist, nsegs, 1) != 0)
156 {
157 uvm_km_free(kernel_map, virt, cb, UVM_KMF_VAONLY);
158 return VERR_NO_MEMORY;
159 }
160
161 /* Map */
162 struct vm_page *page;
163 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
164 if (fExecutable)
165 prot |= VM_PROT_EXECUTE;
166 vaddr_t virt2 = virt;
167 TAILQ_FOREACH(page, rlist, pageq.queue)
168 {
169 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
170 virt2 += PAGE_SIZE;
171 }
172
173 pMemNetBSD->Core.pv = (void *)virt;
174 if (fContiguous)
175 {
176 page = TAILQ_FIRST(rlist);
177 pMemNetBSD->Core.u.Cont.Phys = VM_PAGE_TO_PHYS(page);
178 }
179 return VINF_SUCCESS;
180}
181
182DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
183{
184 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
185 RTR0MEMOBJTYPE_PAGE, NULL, cb);
186 if (!pMemNetBSD)
187 return VERR_NO_MEMORY;
188
189 void *pvMem = kmem_alloc(cb, KM_SLEEP);
190 if (RT_UNLIKELY(!pvMem))
191 {
192 rtR0MemObjDelete(&pMemNetBSD->Core);
193 return VERR_NO_PAGE_MEMORY;
194 }
195 if (fExecutable)
196 {
197 pmap_protect(pmap_kernel(), (vaddr_t)pvMem, ((vaddr_t)pvMem) + cb,
198 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE);
199 }
200
201 pMemNetBSD->Core.pv = pvMem;
202 *ppMem = &pMemNetBSD->Core;
203 return VINF_SUCCESS;
204}
205
206
207DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
208{
209 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
210 RTR0MEMOBJTYPE_LOW, NULL, cb);
211 if (!pMemNetBSD)
212 return VERR_NO_MEMORY;
213
214 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, false);
215 if (rc)
216 {
217 rtR0MemObjDelete(&pMemNetBSD->Core);
218 return rc;
219 }
220
221 *ppMem = &pMemNetBSD->Core;
222 return VINF_SUCCESS;
223}
224
225
226DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
227{
228 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
229 RTR0MEMOBJTYPE_CONT, NULL, cb);
230 if (!pMemNetBSD)
231 return VERR_NO_MEMORY;
232
233 int rc = rtR0MemObjNetBSDAllocHelper(pMemNetBSD, cb, fExecutable, _4G - 1, true);
234 if (rc)
235 {
236 rtR0MemObjDelete(&pMemNetBSD->Core);
237 return rc;
238 }
239
240 *ppMem = &pMemNetBSD->Core;
241 return VINF_SUCCESS;
242}
243
244
245static int rtR0MemObjNetBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
246 size_t cb,
247 RTHCPHYS PhysHighest, size_t uAlignment,
248 bool fContiguous)
249{
250 paddr_t VmPhysAddrHigh;
251
252 /* create the object. */
253 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD),
254 enmType, NULL, cb);
255 if (!pMemNetBSD)
256 return VERR_NO_MEMORY;
257
258 if (PhysHighest != NIL_RTHCPHYS)
259 VmPhysAddrHigh = PhysHighest;
260 else
261 VmPhysAddrHigh = ~(paddr_t)0;
262
263 int nsegs = fContiguous ? 1 : INT_MAX;
264
265 int error = uvm_pglistalloc(cb, 0, VmPhysAddrHigh, uAlignment, 0, &pMemNetBSD->pglist, nsegs, 1);
266 if (error)
267 {
268 rtR0MemObjDelete(&pMemNetBSD->Core);
269 return VERR_NO_MEMORY;
270 }
271
272 if (fContiguous)
273 {
274 Assert(enmType == RTR0MEMOBJTYPE_PHYS);
275 const struct vm_page * const pg = TAILQ_FIRST(&pMemNetBSD->pglist);
276 pMemNetBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(pg);
277 pMemNetBSD->Core.u.Phys.fAllocated = true;
278 }
279 *ppMem = &pMemNetBSD->Core;
280
281 return VINF_SUCCESS;
282}
283
284
285DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
286{
287 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
288}
289
290
291DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
292{
293 return rtR0MemObjNetBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
294}
295
296
297DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
298{
299 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
300
301 /* create the object. */
302 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
303 if (!pMemNetBSD)
304 return VERR_NO_MEMORY;
305
306 /* there is no allocation here, it needs to be mapped somewhere first. */
307 pMemNetBSD->Core.u.Phys.fAllocated = false;
308 pMemNetBSD->Core.u.Phys.PhysBase = Phys;
309 pMemNetBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
310 TAILQ_INIT(&pMemNetBSD->pglist);
311 *ppMem = &pMemNetBSD->Core;
312 return VINF_SUCCESS;
313}
314
315
316DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
317{
318 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
319 if (!pMemNetBSD)
320 return VERR_NO_MEMORY;
321
322 int rc = uvm_map_pageable(
323 &((struct proc *)R0Process)->p_vmspace->vm_map,
324 R3Ptr,
325 R3Ptr + cb,
326 0, 0);
327 if (rc)
328 {
329 rtR0MemObjDelete(&pMemNetBSD->Core);
330 return VERR_NO_MEMORY;
331 }
332
333 pMemNetBSD->Core.u.Lock.R0Process = R0Process;
334 *ppMem = &pMemNetBSD->Core;
335 return VINF_SUCCESS;
336}
337
338
339DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
340{
341 /* Kernel memory (always?) wired; all memory allocated by vbox code is? */
342 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
343 if (!pMemNetBSD)
344 return VERR_NO_MEMORY;
345
346 pMemNetBSD->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
347 pMemNetBSD->Core.pv = pv;
348 *ppMem = &pMemNetBSD->Core;
349 return VINF_SUCCESS;
350}
351
352DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
353{
354 if (pvFixed != (void *)-1)
355 {
356 /* can we support this? or can we assume the virtual space is already reserved? */
357 printf("reserve specified kernel virtual address not supported\n");
358 return VERR_NOT_SUPPORTED;
359 }
360
361 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
362 if (!pMemNetBSD)
363 return VERR_NO_MEMORY;
364
365 vaddr_t virt = uvm_km_alloc(kernel_map, cb, uAlignment,
366 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
367 if (virt == 0)
368 {
369 rtR0MemObjDelete(&pMemNetBSD->Core);
370 return VERR_NO_MEMORY;
371 }
372
373 pMemNetBSD->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
374 pMemNetBSD->Core.pv = (void *)virt;
375 *ppMem = &pMemNetBSD->Core;
376 return VINF_SUCCESS;
377}
378
379
380DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
381{
382 printf("NativeReserveUser\n");
383 return VERR_NOT_SUPPORTED;
384}
385
386
387DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
388 unsigned fProt, size_t offSub, size_t cbSub)
389{
390 if (pvFixed != (void *)-1)
391 {
392 /* can we support this? or can we assume the virtual space is already reserved? */
393 printf("map to specified kernel virtual address not supported\n");
394 return VERR_NOT_SUPPORTED;
395 }
396
397 PRTR0MEMOBJNETBSD pMemNetBSD0 = (PRTR0MEMOBJNETBSD)pMemToMap;
398 if ((pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS)
399 && (pMemNetBSD0->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC))
400 {
401 printf("memory to map is not physical\n");
402 return VERR_NOT_SUPPORTED;
403 }
404 size_t sz = cbSub > 0 ? cbSub : pMemNetBSD0->Core.cb;
405
406 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)rtR0MemObjNew(sizeof(*pMemNetBSD), RTR0MEMOBJTYPE_MAPPING, NULL, sz);
407
408 vaddr_t virt = uvm_km_alloc(kernel_map, sz, uAlignment,
409 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_CANFAIL);
410 if (virt == 0)
411 {
412 rtR0MemObjDelete(&pMemNetBSD->Core);
413 return VERR_NO_MEMORY;
414 }
415
416 vm_prot_t prot = 0;
417
418 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
419 prot |= VM_PROT_READ;
420 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
421 prot |= VM_PROT_WRITE;
422 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
423 prot |= VM_PROT_EXECUTE;
424
425 struct vm_page *page;
426 vaddr_t virt2 = virt;
427 size_t map_pos = 0;
428 TAILQ_FOREACH(page, &pMemNetBSD0->pglist, pageq.queue)
429 {
430 if (map_pos >= offSub)
431 {
432 if (cbSub > 0 && (map_pos >= offSub + cbSub))
433 break;
434
435 pmap_kenter_pa(virt2, VM_PAGE_TO_PHYS(page), prot, 0);
436 virt2 += PAGE_SIZE;
437 }
438 map_pos += PAGE_SIZE;
439 }
440
441 pMemNetBSD->Core.pv = (void *)virt;
442 pMemNetBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
443 *ppMem = &pMemNetBSD->Core;
444
445 return VINF_SUCCESS;
446}
447
448
449DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
450 unsigned fProt, RTR0PROCESS R0Process)
451{
452 printf("NativeMapUser\n");
453 return VERR_NOT_SUPPORTED;
454}
455
456
457DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
458{
459 vm_prot_t ProtectionFlags = 0;
460 vaddr_t AddrStart = (vaddr_t)pMem->pv + offSub;
461 vm_map_t pVmMap = rtR0MemObjNetBSDGetMap(pMem);
462
463 if (!pVmMap)
464 return VERR_NOT_SUPPORTED;
465
466 if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
467 ProtectionFlags |= UVM_PROT_R;
468 if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
469 ProtectionFlags |= UVM_PROT_W;
470 if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
471 ProtectionFlags |= UVM_PROT_X;
472
473 int error = uvm_map_protect(pVmMap, AddrStart, AddrStart + cbSub,
474 ProtectionFlags, 0);
475 if (!error)
476 return VINF_SUCCESS;
477
478 return VERR_NOT_SUPPORTED;
479}
480
481
482DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
483{
484 PRTR0MEMOBJNETBSD pMemNetBSD = (PRTR0MEMOBJNETBSD)pMem;
485
486 switch (pMemNetBSD->Core.enmType)
487 {
488 case RTR0MEMOBJTYPE_PAGE:
489 case RTR0MEMOBJTYPE_LOW:
490 {
491 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
492 paddr_t pa = 0;
493 pmap_extract(pmap_kernel(), va, &pa);
494 return pa;
495 }
496 case RTR0MEMOBJTYPE_CONT:
497 return pMemNetBSD->Core.u.Cont.Phys + ptoa(iPage);
498 case RTR0MEMOBJTYPE_PHYS:
499 return pMemNetBSD->Core.u.Phys.PhysBase + ptoa(iPage);
500 case RTR0MEMOBJTYPE_PHYS_NC:
501 {
502 struct vm_page *page;
503 size_t i = 0;
504 TAILQ_FOREACH(page, &pMemNetBSD->pglist, pageq.queue)
505 {
506 if (i == iPage)
507 break;
508 i++;
509 }
510 return VM_PAGE_TO_PHYS(page);
511 }
512 case RTR0MEMOBJTYPE_LOCK:
513 case RTR0MEMOBJTYPE_MAPPING:
514 {
515 pmap_t pmap;
516 if (pMem->u.Lock.R0Process == NIL_RTR0PROCESS)
517 pmap = pmap_kernel();
518 else
519 pmap = ((struct proc *)pMem->u.Lock.R0Process)->p_vmspace->vm_map.pmap;
520 vaddr_t va = (vaddr_t)pMemNetBSD->Core.pv + ptoa(iPage);
521 paddr_t pa = 0;
522 pmap_extract(pmap, va, &pa);
523 return pa;
524 }
525 case RTR0MEMOBJTYPE_RES_VIRT:
526 return NIL_RTHCPHYS;
527 default:
528 return NIL_RTHCPHYS;
529 }
530}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette