VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 41169

最後變更 在這個檔案從41169是 41146,由 vboxsync 提交於 13 年 前

memobj-r0drv-solaris: Implemented rtR0MemObjNativeMapKernel.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 37.8 KB
 
1/* $Id: memobj-r0drv-solaris.c 41146 2012-05-03 20:14:02Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-solaris-kernel.h"
32#include "internal/iprt.h"
33#include <iprt/memobj.h>
34
35#include <iprt/asm.h>
36#include <iprt/assert.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/mem.h>
40#include <iprt/param.h>
41#include <iprt/process.h>
42#include "internal/memobj.h"
43#include "memobj-r0drv-solaris.h"
44
45/*******************************************************************************
46* Defined Constants And Macros *
47*******************************************************************************/
48#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
49
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * The Solaris version of the memory object structure.
56 */
57typedef struct RTR0MEMOBJSOL
58{
59 /** The core structure. */
60 RTR0MEMOBJINTERNAL Core;
61 /** Pointer to kernel memory cookie. */
62 ddi_umem_cookie_t Cookie;
63 /** Shadow locked pages. */
64 void *pvHandle;
65 /** Access during locking. */
66 int fAccess;
67 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS
68 * allocation. */
69 bool fLargePage;
70} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
71
72
73/*******************************************************************************
74* Global Variables *
75*******************************************************************************/
76static vnode_t g_PageVnode;
77
78
79/**
80 * Returns the physical address for a virtual address.
81 *
82 * @param pv The virtual address.
83 *
84 * @returns The physical address corresponding to @a pv.
85 */
86static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
87{
88 struct hat *pHat = NULL;
89 pfn_t PageFrameNum = 0;
90 uintptr_t uVirtAddr = (uintptr_t)pv;
91
92 if (SOL_IS_KRNL_ADDR(pv))
93 pHat = kas.a_hat;
94 else
95 {
96 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
97 AssertRelease(pProcess);
98 pHat = pProcess->p_as->a_hat;
99 }
100
101 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
102 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
103 return (((uint64_t)PageFrameNum << PAGESHIFT) | (uVirtAddr & PAGEOFFSET));
104}
105
106
107/**
108 * Returns the physical address for a page.
109 *
110 * @param pPage Pointer to the page.
111 *
112 * @returns The physical address for a page.
113 */
114static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
115{
116 AssertPtr(pPage);
117 pfn_t PageFrameNum = page_pptonum(pPage);
118 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
119 return (uint64_t)PageFrameNum << PAGESHIFT;
120}
121
122
123/**
124 * Retreives a free page from the kernel freelist.
125 *
126 * @param virtAddr The virtual address to which this page maybe mapped in
127 * the future.
128 * @param cbPage The size of the page.
129 *
130 * @returns Pointer to the allocated page, NULL on failure.
131 */
132static page_t *rtR0MemObjSolPageFromFreelist(caddr_t virtAddr, size_t cbPage)
133{
134 seg_t KernelSeg;
135 KernelSeg.s_as = &kas;
136 page_t *pPage = page_get_freelist(&g_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
137 cbPage, 0 /* flags */, NULL /* NUMA group */);
138 if ( !pPage
139 && g_frtSolUseKflt)
140 {
141 pPage = page_get_freelist(&g_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
142 cbPage, PG_KFLT, NULL /* NUMA group */);
143 }
144 return pPage;
145}
146
147
148/**
149 * Retrieves a free page from the kernel cachelist.
150 *
151 * @param virtAddr The virtual address to which this page maybe mapped in
152 * the future.
153 * @param cbPage The size of the page.
154 *
155 * @return Pointer to the allocated page, NULL on failure.
156 */
157static page_t *rtR0MemObjSolPageFromCachelist(caddr_t virtAddr, size_t cbPage)
158{
159 seg_t KernelSeg;
160 KernelSeg.s_as = &kas;
161 page_t *pPage = page_get_cachelist(&g_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
162 0 /* flags */, NULL /* NUMA group */);
163 if ( !pPage
164 && g_frtSolUseKflt)
165 {
166 pPage = page_get_cachelist(&g_PageVnode, 0 /* offset */, &KernelSeg, virtAddr,
167 PG_KFLT, NULL /* NUMA group */);
168 }
169
170 /*
171 * Remove association with the vnode for pages from the cachelist.
172 */
173 if (!PP_ISAGED(pPage))
174 page_hashout(pPage, NULL /* mutex */);
175
176 return pPage;
177}
178
179
180/**
181 * Allocates physical non-contiguous memory.
182 *
183 * @param uPhysHi The upper physical address limit (inclusive).
184 * @param puPhys Where to store the physical address of first page. Optional,
185 * can be NULL.
186 * @param cb The size of the allocation.
187 *
188 * @return Array of allocated pages, NULL on failure.
189 */
190static page_t **rtR0MemObjSolPagesAlloc(uint64_t uPhysHi, uint64_t *puPhys, size_t cb)
191{
192 /*
193 * The page freelist and cachelist both hold pages that are not mapped into any address space.
194 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
195 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
196 *
197 * Reserve available memory for pages and create the pages.
198 */
199 pgcnt_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
200 int rc = page_resv(cPages, KM_NOSLEEP);
201 if (rc)
202 {
203 rc = page_create_wait(cPages, 0 /* flags */);
204 if (rc)
205 {
206 size_t cbPages = cPages * sizeof(page_t *);
207 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
208 if (RT_LIKELY(ppPages))
209 {
210 /*
211 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
212 * we don't yet have the 'virtAddr' to which this memory may be mapped.
213 */
214 caddr_t virtAddr = NULL;
215 for (size_t i = 0; i < cPages; i++, virtAddr += PAGESIZE)
216 {
217 uint32_t cTries = 3;
218 page_t *pPage = NULL;
219 while (cTries > 0)
220 {
221 /*
222 * Get a page from the freelist or cachelist & verify if it's within our
223 * requested range.
224 */
225 pPage = rtR0MemObjSolPageFromFreelist(virtAddr, PAGESIZE);
226 if (!pPage)
227 {
228 pPage = rtR0MemObjSolPageFromCachelist(virtAddr, PAGESIZE);
229 if (RT_UNLIKELY(!pPage))
230 break;
231 }
232 if (uPhysHi != NIL_RTHCPHYS)
233 {
234 uint64_t uPhys = rtR0MemObjSolPagePhys(pPage);
235 if (uPhys > uPhysHi)
236 {
237 page_free(pPage, 0 /* don't need page, move to tail of pagelist */);
238 pPage = NULL;
239 --cTries;
240 continue;
241 }
242 }
243
244 PP_CLRFREE(pPage); /* Page is no longer free */
245 PP_CLRAGED(pPage); /* Page is not hashed in */
246 ppPages[i] = pPage;
247 break;
248 }
249
250 if (RT_UNLIKELY(!pPage))
251 {
252 /*
253 * No pages found or found pages didn't meet requirements, release what was grabbed so far.
254 */
255 page_create_putback(cPages - i);
256 while (--i >= 0)
257 page_free(ppPages[i], 0 /* don't need page, move to tail of pagelist */);
258 kmem_free(ppPages, cbPages);
259 page_unresv(cPages);
260 return NULL;
261 }
262 }
263
264 /*
265 * We now have the pages locked exclusively, before they are mapped in
266 * we must downgrade the lock.
267 */
268 if (puPhys)
269 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
270 return ppPages;
271 }
272
273 page_create_putback(cPages);
274 }
275
276 page_unresv(cPages);
277 }
278
279 return NULL;
280}
281
282
283/**
284 * Prepares pages allocated by rtR0MemObjSolPagesAlloc for mapping.
285 *
286 * @param ppPages Pointer to the page list.
287 * @param cb Size of the allocation.
288 * @param auPhys Where to store the physical address of the premapped
289 * pages.
290 * @param cPages The number of pages (entries) in @a auPhys.
291 *
292 * @returns IPRT status code.
293 */
294static int rtR0MemObjSolPagesPreMap(page_t **ppPages, size_t cb, uint64_t auPhys[], size_t cPages)
295{
296 AssertPtrReturn(ppPages, VERR_INVALID_PARAMETER);
297 AssertPtrReturn(auPhys, VERR_INVALID_PARAMETER);
298
299 for (size_t iPage = 0; iPage < cPages; iPage++)
300 {
301 /*
302 * Prepare pages for mapping into kernel/user-space. Downgrade the
303 * exclusive page lock to a shared lock to prevent page relocation.
304 */
305 if (page_tryupgrade(ppPages[iPage]) == 1)
306 page_downgrade(ppPages[iPage]);
307
308 auPhys[iPage] = rtR0MemObjSolPagePhys(ppPages[iPage]);
309 }
310
311 return VINF_SUCCESS;
312}
313
314
315/**
316 * Frees pages allocated by rtR0MemObjSolPagesAlloc.
317 *
318 * @param ppPages Pointer to the page list.
319 * @param cbPages Size of the allocation.
320 */
321static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
322{
323 size_t cPages = (cb + PAGESIZE - 1) >> PAGESHIFT;
324 size_t cbPages = cPages * sizeof(page_t *);
325 for (size_t iPage = 0; iPage < cPages; iPage++)
326 {
327 /*
328 * We need to exclusive lock the pages before freeing them.
329 */
330 int rc = page_tryupgrade(ppPages[iPage]);
331 if (!rc)
332 {
333 page_unlock(ppPages[iPage]);
334 while (!page_lock(ppPages[iPage], SE_EXCL, NULL /* mutex */, P_RECLAIM))
335 {
336 /* nothing */;
337 }
338 }
339 page_free(ppPages[iPage], 0 /* don't need page, move to tail of pagelist */);
340 }
341 kmem_free(ppPages, cbPages);
342 page_unresv(cPages);
343}
344
345
346/**
347 * Allocates a large page to cover the required allocation size.
348 *
349 * @param puPhys Where to store the physical address of the allocated
350 * page. Optional, can be NULL.
351 * @param cb Size of the allocation.
352 *
353 * @returns Pointer to the allocated large page, NULL on failure.
354 */
355static page_t *rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cb)
356{
357 /*
358 * Reserve available memory and create the sub-pages.
359 */
360 const pgcnt_t cPages = cb >> PAGESHIFT;
361 int rc = page_resv(cPages, KM_NOSLEEP);
362 if (rc)
363 {
364 rc = page_create_wait(cPages, 0 /* flags */);
365 if (rc)
366 {
367 /*
368 * Get a page off the free list. We set virtAddr to 0 since we don't know where
369 * the memory is going to be mapped.
370 */
371 seg_t KernelSeg;
372 caddr_t virtAddr = NULL;
373 KernelSeg.s_as = &kas;
374 page_t *pRootPage = rtR0MemObjSolPageFromFreelist(virtAddr, cb);
375 if (pRootPage)
376 {
377 AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
378
379 /*
380 * Mark all the sub-pages as non-free and not-hashed-in.
381 * It is paramount that we destroy the list (before freeing it).
382 */
383 page_t *pPageList = pRootPage;
384 for (size_t iPage = 0; iPage < cPages; iPage++)
385 {
386 page_t *pPage = pPageList;
387 AssertPtr(pPage);
388 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
389 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
390 page_sub(&pPageList, pPage);
391
392 /*
393 * Ensure page is now be free and the page size-code must match that of the root page.
394 */
395 AssertMsg(PP_ISFREE(pPage), ("%p\n", pPage));
396 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("%p - %d expected %d \n", pPage, pPage->p_szc, pRootPage->p_szc));
397
398 PP_CLRFREE(pPage); /* Page no longer free */
399 PP_CLRAGED(pPage); /* Page no longer hashed-in */
400 }
401
402 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
403 AssertMsg(!(uPhys & (cb - 1)), ("%llx %zx\n", uPhys, cb));
404 if (puPhys)
405 *puPhys = uPhys;
406
407 return pRootPage;
408 }
409
410 page_create_putback(cPages);
411 }
412
413 page_unresv(cPages);
414 }
415
416 return NULL;
417}
418
419
420/**
421 * Prepares the large page allocated by rtR0MemObjSolLargePageAlloc to be mapped.
422 *
423 * @param pRootPage Pointer to the root page.
424 * @param cb Size of the allocation.
425 *
426 * @returns IPRT status code.
427 */
428static int rtR0MemObjSolLargePagePreMap(page_t *pRootPage, size_t cb)
429{
430 const pgcnt_t cPages = cb >> PAGESHIFT;
431
432 Assert(page_get_pagecnt(pRootPage->p_szc) == cPages);
433 AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx npages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
434
435 /*
436 * We need to downgrade the sub-pages from exclusive to shared locking
437 * to prevent page relocation.
438 */
439 for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
440 {
441 page_t *pPage = page_nextn(pRootPage, iPage);
442 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
443 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
444 AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
445
446 if (page_tryupgrade(pPage) == 1)
447 page_downgrade(pPage);
448 AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
449 }
450
451 return VINF_SUCCESS;
452}
453
454
455/**
456 * Frees the page allocated by rtR0MemObjSolLargePageAlloc.
457 *
458 * @param pRootPage Pointer to the root page.
459 * @param cb Allocated size.
460 */
461static void rtR0MemObjSolLargePageFree(page_t *pRootPage, size_t cb)
462{
463 pgcnt_t cPages = cb >> PAGESHIFT;
464
465 Assert(page_get_pagecnt(pRootPage->p_szc) == cPages);
466 AssertMsg(!(page_pptonum(pRootPage) & (cPages - 1)), ("%p:%lx cPages=%lx\n", pRootPage, page_pptonum(pRootPage), cPages));
467
468 /*
469 * We need to exclusively lock the sub-pages before freeing the large one.
470 */
471 for (pgcnt_t iPage = 0; iPage < cPages; iPage++)
472 {
473 page_t *pPage = page_nextn(pRootPage, iPage);
474 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
475 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
476 AssertMsg(!PP_ISFREE(pPage), ("%p\n", pPage));
477
478 int rc = page_tryupgrade(pPage);
479 if (!rc)
480 {
481 page_unlock(pPage);
482 while (!page_lock(pPage, SE_EXCL, NULL /* mutex */, P_RECLAIM))
483 {
484 /* nothing */;
485 }
486 }
487 }
488
489 /*
490 * Free the large page and unreserve the memory.
491 */
492 page_free_pages(pRootPage);
493 page_unresv(cPages);
494
495}
496
497
498/**
499 * Unmaps kernel/user-space mapped memory.
500 *
501 * @param pv Pointer to the mapped memory block.
502 * @param cb Size of the memory block.
503 */
504static void rtR0MemObjSolUnmap(void *pv, size_t cb)
505{
506 if (SOL_IS_KRNL_ADDR(pv))
507 {
508 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
509 vmem_free(heap_arena, pv, cb);
510 }
511 else
512 {
513 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
514 AssertPtr(pAddrSpace);
515 as_rangelock(pAddrSpace);
516 as_unmap(pAddrSpace, pv, cb);
517 as_rangeunlock(pAddrSpace);
518 }
519}
520
521
522/**
523 * Lock down memory mappings for a virtual address.
524 *
525 * @param pv Pointer to the memory to lock down.
526 * @param cb Size of the memory block.
527 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
528 *
529 * @returns IPRT status code.
530 */
531static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
532{
533 /*
534 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
535 */
536 if (!SOL_IS_KRNL_ADDR(pv))
537 {
538 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
539 AssertPtr(pProc);
540 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
541 if (rc)
542 {
543 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
544 return VERR_LOCK_FAILED;
545 }
546 }
547 return VINF_SUCCESS;
548}
549
550
551/**
552 * Unlock memory mappings for a virtual address.
553 *
554 * @param pv Pointer to the locked memory.
555 * @param cb Size of the memory block.
556 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
557 */
558static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
559{
560 if (!SOL_IS_KRNL_ADDR(pv))
561 {
562 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
563 AssertPtr(pProcess);
564 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
565 }
566}
567
568
569/**
570 * Maps a list of physical pages into user address space.
571 *
572 * @param pVirtAddr Where to store the virtual address of the mapping.
573 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
574 * PROT_EXEC)
575 * @param paPhysAddrs Array of physical addresses to pages.
576 * @param cb Size of memory being mapped.
577 *
578 * @returns IPRT status code.
579 */
580static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb)
581{
582 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
583 int rc = VERR_INTERNAL_ERROR;
584 SEGVBOX_CRARGS Args;
585
586 Args.paPhysAddrs = paPhysAddrs;
587 Args.fPageAccess = fPageAccess;
588
589 as_rangelock(pAddrSpace);
590 map_addr(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
591 if (*pVirtAddr != NULL)
592 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
593 else
594 rc = ENOMEM;
595 as_rangeunlock(pAddrSpace);
596
597 return RTErrConvertFromErrno(rc);
598}
599
600
601DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
602{
603 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
604
605 switch (pMemSolaris->Core.enmType)
606 {
607 case RTR0MEMOBJTYPE_LOW:
608 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
609 break;
610
611 case RTR0MEMOBJTYPE_PHYS:
612 if (pMemSolaris->Core.u.Phys.fAllocated)
613 {
614 if (pMemSolaris->fLargePage)
615 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
616 else
617 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
618 }
619 break;
620
621 case RTR0MEMOBJTYPE_PHYS_NC:
622 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
623 break;
624
625 case RTR0MEMOBJTYPE_PAGE:
626 ddi_umem_free(pMemSolaris->Cookie);
627 break;
628
629 case RTR0MEMOBJTYPE_LOCK:
630 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
631 break;
632
633 case RTR0MEMOBJTYPE_MAPPING:
634 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
635 break;
636
637 case RTR0MEMOBJTYPE_RES_VIRT:
638 {
639 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
640 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
641 else
642 AssertFailed();
643 break;
644 }
645
646 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
647 default:
648 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
649 return VERR_INTERNAL_ERROR;
650 }
651
652 return VINF_SUCCESS;
653}
654
655
656DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
657{
658 /* Create the object. */
659 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb);
660 if (RT_UNLIKELY(!pMemSolaris))
661 return VERR_NO_MEMORY;
662
663 void *pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
664 if (RT_UNLIKELY(!pvMem))
665 {
666 rtR0MemObjDelete(&pMemSolaris->Core);
667 return VERR_NO_PAGE_MEMORY;
668 }
669
670 pMemSolaris->Core.pv = pvMem;
671 pMemSolaris->pvHandle = NULL;
672 *ppMem = &pMemSolaris->Core;
673 return VINF_SUCCESS;
674}
675
676
677DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
678{
679 NOREF(fExecutable);
680
681 /* Create the object */
682 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb);
683 if (!pMemSolaris)
684 return VERR_NO_MEMORY;
685
686 /* Allocate physically low page-aligned memory. */
687 uint64_t uPhysHi = _4G - 1;
688 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGESIZE, false /* fContig */);
689 if (RT_UNLIKELY(!pvMem))
690 {
691 rtR0MemObjDelete(&pMemSolaris->Core);
692 return VERR_NO_LOW_MEMORY;
693 }
694 pMemSolaris->Core.pv = pvMem;
695 pMemSolaris->pvHandle = NULL;
696 *ppMem = &pMemSolaris->Core;
697 return VINF_SUCCESS;
698}
699
700
701DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
702{
703 NOREF(fExecutable);
704 return rtR0MemObjNativeAllocPhys(ppMem, cb, _4G - 1, PAGE_SIZE /* alignment */);
705}
706
707
708DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
709{
710#if HC_ARCH_BITS == 64
711 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
712 if (RT_UNLIKELY(!pMemSolaris))
713 return VERR_NO_MEMORY;
714
715 uint64_t PhysAddr = UINT64_MAX;
716 void *pvPages = rtR0MemObjSolPagesAlloc((uint64_t)PhysHighest, &PhysAddr, cb);
717 if (!pvPages)
718 {
719 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
720 rtR0MemObjDelete(&pMemSolaris->Core);
721 return VERR_NO_MEMORY;
722 }
723 pMemSolaris->Core.pv = NULL;
724 pMemSolaris->pvHandle = pvPages;
725
726 Assert(PhysAddr != UINT64_MAX);
727 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
728 *ppMem = &pMemSolaris->Core;
729 return VINF_SUCCESS;
730
731#else /* 32 bit: */
732 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
733#endif
734}
735
736
737DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
738{
739 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
740
741 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
742 if (RT_UNLIKELY(!pMemSolaris))
743 return VERR_NO_MEMORY;
744
745 /*
746 * Allocating one large page gets special treatment.
747 */
748 static uint32_t s_cbLargePage = UINT32_MAX;
749 if (s_cbLargePage == UINT32_MAX)
750 {
751#if 0 /* currently not entirely stable, so disabled. */
752 if (page_num_pagesizes() > 1)
753 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1));
754 else
755#endif
756 ASMAtomicWriteU32(&s_cbLargePage, 0);
757 }
758 uint64_t PhysAddr;
759 if ( cb == s_cbLargePage
760 && cb == uAlignment
761 && PhysHighest == NIL_RTHCPHYS)
762 {
763 /*
764 * Allocate one large page.
765 */
766 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
767 if (RT_LIKELY(pvPages))
768 {
769 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
770 pMemSolaris->Core.pv = NULL;
771 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
772 pMemSolaris->Core.u.Phys.fAllocated = true;
773 pMemSolaris->pvHandle = pvPages;
774 pMemSolaris->fLargePage = true;
775
776 *ppMem = &pMemSolaris->Core;
777 return VINF_SUCCESS;
778 }
779 }
780 else
781 {
782 /*
783 * Allocate physically contiguous memory aligned as specified.
784 */
785 AssertCompile(NIL_RTHCPHYS == UINT64_MAX);
786 PhysAddr = PhysHighest;
787 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
788 if (RT_LIKELY(pvMem))
789 {
790 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
791 Assert(PhysAddr < PhysHighest);
792 Assert(PhysAddr + cb <= PhysHighest);
793
794 pMemSolaris->Core.pv = pvMem;
795 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
796 pMemSolaris->Core.u.Phys.fAllocated = true;
797 pMemSolaris->pvHandle = NULL;
798 pMemSolaris->fLargePage = false;
799
800 *ppMem = &pMemSolaris->Core;
801 return VINF_SUCCESS;
802 }
803 }
804 rtR0MemObjDelete(&pMemSolaris->Core);
805 return VERR_NO_CONT_MEMORY;
806}
807
808
809DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
810{
811 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
812
813 /* Create the object. */
814 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb);
815 if (!pMemSolaris)
816 return VERR_NO_MEMORY;
817
818 /* There is no allocation here, it needs to be mapped somewhere first. */
819 pMemSolaris->Core.u.Phys.fAllocated = false;
820 pMemSolaris->Core.u.Phys.PhysBase = Phys;
821 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
822 *ppMem = &pMemSolaris->Core;
823 return VINF_SUCCESS;
824}
825
826
827DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
828 RTR0PROCESS R0Process)
829{
830 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
831 NOREF(fAccess);
832
833 /* Create the locking object */
834 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
835 if (!pMemSolaris)
836 return VERR_NO_MEMORY;
837
838 /* Lock down user pages. */
839 int fPageAccess = S_READ;
840 if (fAccess & RTMEM_PROT_WRITE)
841 fPageAccess = S_WRITE;
842 if (fAccess & RTMEM_PROT_EXEC)
843 fPageAccess = S_EXEC;
844 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
845 if (RT_FAILURE(rc))
846 {
847 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
848 rtR0MemObjDelete(&pMemSolaris->Core);
849 return rc;
850 }
851
852 /* Fill in the object attributes and return successfully. */
853 pMemSolaris->Core.u.Lock.R0Process = R0Process;
854 pMemSolaris->pvHandle = NULL;
855 pMemSolaris->fAccess = fPageAccess;
856 *ppMem = &pMemSolaris->Core;
857 return VINF_SUCCESS;
858}
859
860
861DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
862{
863 NOREF(fAccess);
864
865 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
866 if (!pMemSolaris)
867 return VERR_NO_MEMORY;
868
869 /* Lock down kernel pages. */
870 int fPageAccess = S_READ;
871 if (fAccess & RTMEM_PROT_WRITE)
872 fPageAccess = S_WRITE;
873 if (fAccess & RTMEM_PROT_EXEC)
874 fPageAccess = S_EXEC;
875 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
876 if (RT_FAILURE(rc))
877 {
878 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
879 rtR0MemObjDelete(&pMemSolaris->Core);
880 return rc;
881 }
882
883 /* Fill in the object attributes and return successfully. */
884 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
885 pMemSolaris->pvHandle = NULL;
886 pMemSolaris->fAccess = fPageAccess;
887 *ppMem = &pMemSolaris->Core;
888 return VINF_SUCCESS;
889}
890
891
892DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
893{
894 PRTR0MEMOBJSOL pMemSolaris;
895
896 /*
897 * Use xalloc.
898 */
899 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
900 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
901 if (RT_UNLIKELY(!pv))
902 return VERR_NO_MEMORY;
903
904 /* Create the object. */
905 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
906 if (!pMemSolaris)
907 {
908 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
909 vmem_xfree(heap_arena, pv, cb);
910 return VERR_NO_MEMORY;
911 }
912
913 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
914 *ppMem = &pMemSolaris->Core;
915 return VINF_SUCCESS;
916}
917
918
919DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
920{
921 return VERR_NOT_SUPPORTED;
922}
923
924
925DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
926 unsigned fProt, size_t offSub, size_t cbSub)
927{
928 /* Fail if requested to do something we can't. */
929 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
930 if (uAlignment > PAGE_SIZE)
931 return VERR_NOT_SUPPORTED;
932
933 /*
934 * Use xalloc to get address space.
935 */
936 if (!cbSub)
937 cbSub = pMemToMap->cb;
938 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
939 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
940 if (RT_UNLIKELY(!pv))
941 return VERR_MAP_FAILED;
942
943 /*
944 * Load the pages from the other object into it.
945 */
946 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
947 if (fProt & RTMEM_PROT_READ)
948 fAttr |= PROT_READ;
949 if (fProt & RTMEM_PROT_EXEC)
950 fAttr |= PROT_EXEC;
951 if (fProt & RTMEM_PROT_WRITE)
952 fAttr |= PROT_WRITE;
953 fAttr |= HAT_NOSYNC;
954
955 int rc = VINF_SUCCESS;
956 size_t off = 0;
957 while (off < cbSub)
958 {
959 RTHCPHYS HCPhys = rtR0MemObjNativeGetPagePhysAddr(pMemToMap, (offSub + offSub) >> PAGE_SHIFT);
960 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
961 pfn_t pfn = HCPhys >> PAGESHIFT;
962 AssertBreakStmt(((RTHCPHYS)pfn << PAGESHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
963
964 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
965
966 /* Advance. */
967 off += PAGE_SIZE;
968 }
969 if (RT_SUCCESS(rc))
970 {
971 /*
972 * Create a memory object for the mapping.
973 */
974 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cbSub);
975 if (pMemSolaris)
976 {
977 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
978 *ppMem = &pMemSolaris->Core;
979 return VINF_SUCCESS;
980 }
981
982 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
983 rc = VERR_NO_MEMORY;
984 }
985
986 if (off)
987 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
988 vmem_xfree(heap_arena, pv, cbSub);
989 return rc;
990}
991
992
993DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
994 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
995{
996 /*
997 * Fend off things we cannot do.
998 */
999 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1000 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1001 if (uAlignment != PAGE_SIZE)
1002 return VERR_NOT_SUPPORTED;
1003
1004 /*
1005 * Get parameters from the source object.
1006 */
1007 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1008 void *pv = pMemToMapSolaris->Core.pv;
1009 size_t cb = pMemToMapSolaris->Core.cb;
1010 size_t cPages = cb >> PAGE_SHIFT;
1011
1012 /*
1013 * Create the mapping object
1014 */
1015 PRTR0MEMOBJSOL pMemSolaris;
1016 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pv, cb);
1017 if (RT_UNLIKELY(!pMemSolaris))
1018 return VERR_NO_MEMORY;
1019
1020 int rc = VINF_SUCCESS;
1021 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1022 if (RT_LIKELY(paPhysAddrs))
1023 {
1024 /*
1025 * Prepare the pages according to type.
1026 */
1027 if (pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC)
1028 rc = rtR0MemObjSolPagesPreMap(pMemToMapSolaris->pvHandle, cb, paPhysAddrs, cPages);
1029 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1030 && pMemToMapSolaris->fLargePage)
1031 {
1032 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1033 for (pgcnt_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1034 paPhysAddrs[iPage] = Phys;
1035 rc = rtR0MemObjSolLargePagePreMap(pMemToMapSolaris->pvHandle, cb);
1036 }
1037 else
1038 {
1039 /*
1040 * Have kernel mapping, just translate virtual to physical.
1041 */
1042 AssertPtr(pv);
1043 rc = VINF_SUCCESS;
1044 for (size_t iPage = 0; iPage < cPages; iPage++)
1045 {
1046 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pv);
1047 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1048 {
1049 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1050 rc = VERR_MAP_FAILED;
1051 break;
1052 }
1053 pv = (void *)((uintptr_t)pv + PAGE_SIZE);
1054 }
1055 }
1056 if (RT_SUCCESS(rc))
1057 {
1058 unsigned fPageAccess = PROT_READ;
1059 if (fProt & RTMEM_PROT_WRITE)
1060 fPageAccess |= PROT_WRITE;
1061 if (fProt & RTMEM_PROT_EXEC)
1062 fPageAccess |= PROT_EXEC;
1063
1064 /*
1065 * Perform the actual mapping.
1066 */
1067 caddr_t UserAddr = NULL;
1068 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb);
1069 if (RT_SUCCESS(rc))
1070 {
1071 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1072 pMemSolaris->Core.pv = UserAddr;
1073
1074 *ppMem = &pMemSolaris->Core;
1075 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1076 return VINF_SUCCESS;
1077 }
1078
1079 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1080 }
1081
1082 rc = VERR_MAP_FAILED;
1083 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1084 }
1085 else
1086 rc = VERR_NO_MEMORY;
1087 rtR0MemObjDelete(&pMemSolaris->Core);
1088 return rc;
1089}
1090
1091
1092DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1093{
1094 NOREF(pMem);
1095 NOREF(offSub);
1096 NOREF(cbSub);
1097 NOREF(fProt);
1098 return VERR_NOT_SUPPORTED;
1099}
1100
1101
1102DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1103{
1104 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1105
1106 switch (pMemSolaris->Core.enmType)
1107 {
1108 case RTR0MEMOBJTYPE_PHYS_NC:
1109 if (pMemSolaris->Core.u.Phys.fAllocated)
1110 {
1111 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1112 return rtR0MemObjSolVirtToPhys(pb);
1113 }
1114 page_t **ppPages = pMemSolaris->pvHandle;
1115 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1116
1117 case RTR0MEMOBJTYPE_PAGE:
1118 case RTR0MEMOBJTYPE_LOW:
1119 case RTR0MEMOBJTYPE_LOCK:
1120 {
1121 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1122 return rtR0MemObjSolVirtToPhys(pb);
1123 }
1124
1125 /*
1126 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1127 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1128 */
1129 case RTR0MEMOBJTYPE_MAPPING:
1130 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1131
1132 case RTR0MEMOBJTYPE_CONT:
1133 case RTR0MEMOBJTYPE_PHYS:
1134 AssertFailed(); /* handled by the caller */
1135 case RTR0MEMOBJTYPE_RES_VIRT:
1136 default:
1137 return NIL_RTHCPHYS;
1138 }
1139}
1140
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette