VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c@ 104848

最後變更 在這個檔案從104848是 104848,由 vboxsync 提交於 10 月 前

VMM/PGM,SUPDrv,IPRT: Added a RTR0MemObjZeroInitialize function to IPRT/SUPDrv for helping zero initializing MMIO2 backing memory. bugref:10687

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 44.0 KB
 
1/* $Id: memobj-r0drv-solaris.c 104848 2024-06-05 09:38:20Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, Solaris.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "the-solaris-kernel.h"
42#include "internal/iprt.h"
43#include <iprt/memobj.h>
44
45#include <iprt/asm.h>
46#include <iprt/assert.h>
47#include <iprt/err.h>
48#include <iprt/log.h>
49#include <iprt/mem.h>
50#include <iprt/param.h>
51#include <iprt/process.h>
52#include <iprt/string.h>
53#include "internal/memobj.h"
54#include "memobj-r0drv-solaris.h"
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60#define SOL_IS_KRNL_ADDR(vx) ((uintptr_t)(vx) >= kernelbase)
61
62
63/*********************************************************************************************************************************
64* Structures and Typedefs *
65*********************************************************************************************************************************/
66/**
67 * The Solaris version of the memory object structure.
68 */
69typedef struct RTR0MEMOBJSOL
70{
71 /** The core structure. */
72 RTR0MEMOBJINTERNAL Core;
73 /** Pointer to kernel memory cookie. */
74 ddi_umem_cookie_t Cookie;
75 /** Shadow locked pages. */
76 void *pvHandle;
77 /** Access during locking. */
78 int fAccess;
79 /** Set if large pages are involved in an RTR0MEMOBJTYPE_PHYS allocation. */
80 bool fLargePage;
81 /** Whether we have individual pages or a kernel-mapped virtual memory
82 * block in an RTR0MEMOBJTYPE_PHYS_NC allocation. */
83 bool fIndivPages;
84 /** Set if executable allocation - only RTR0MEMOBJTYPE_PHYS. */
85 bool fExecutable;
86} RTR0MEMOBJSOL, *PRTR0MEMOBJSOL;
87
88
89/*********************************************************************************************************************************
90* Global Variables *
91*********************************************************************************************************************************/
92static vnode_t g_PageVnode;
93static kmutex_t g_OffsetMtx;
94static u_offset_t g_offPage;
95
96static vnode_t g_LargePageVnode;
97static kmutex_t g_LargePageOffsetMtx;
98static u_offset_t g_offLargePage;
99static bool g_fLargePageNoReloc;
100
101
102/**
103 * Returns the physical address for a virtual address.
104 *
105 * @param pv The virtual address.
106 *
107 * @returns The physical address corresponding to @a pv.
108 */
109static uint64_t rtR0MemObjSolVirtToPhys(void *pv)
110{
111 struct hat *pHat = NULL;
112 pfn_t PageFrameNum = 0;
113 uintptr_t uVirtAddr = (uintptr_t)pv;
114
115 if (SOL_IS_KRNL_ADDR(pv))
116 pHat = kas.a_hat;
117 else
118 {
119 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
120 AssertRelease(pProcess);
121 pHat = pProcess->p_as->a_hat;
122 }
123
124 PageFrameNum = hat_getpfnum(pHat, (caddr_t)(uVirtAddr & PAGEMASK));
125 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolVirtToPhys failed. pv=%p\n", pv));
126 return (((uint64_t)PageFrameNum << PAGE_SHIFT) | (uVirtAddr & PAGE_OFFSET_MASK));
127}
128
129
130/**
131 * Returns the physical address for a page.
132 *
133 * @param pPage Pointer to the page.
134 *
135 * @returns The physical address for a page.
136 */
137static inline uint64_t rtR0MemObjSolPagePhys(page_t *pPage)
138{
139 AssertPtr(pPage);
140 pfn_t PageFrameNum = page_pptonum(pPage);
141 AssertReleaseMsg(PageFrameNum != PFN_INVALID, ("rtR0MemObjSolPagePhys failed pPage=%p\n"));
142 return (uint64_t)PageFrameNum << PAGE_SHIFT;
143}
144
145
146/**
147 * Allocates one page.
148 *
149 * @param virtAddr The virtual address to which this page maybe mapped in
150 * the future.
151 *
152 * @returns Pointer to the allocated page, NULL on failure.
153 */
154static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
155{
156 u_offset_t offPage;
157 seg_t KernelSeg;
158
159 /*
160 * 16777215 terabytes of total memory for all VMs or
161 * restart 8000 1GB VMs 2147483 times until wraparound!
162 */
163 mutex_enter(&g_OffsetMtx);
164 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
165 g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
166 offPage = g_offPage;
167 mutex_exit(&g_OffsetMtx);
168
169 KernelSeg.s_as = &kas;
170 page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
171 if (RT_LIKELY(pPage))
172 {
173 /*
174 * Lock this page into memory "long term" to prevent this page from being paged out
175 * when we drop the page lock temporarily (during free). Downgrade to a shared lock
176 * to prevent page relocation.
177 */
178 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
179 page_io_unlock(pPage);
180 page_downgrade(pPage);
181 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
182 }
183
184 return pPage;
185}
186
187
188/**
189 * Destroys an allocated page.
190 *
191 * @param pPage Pointer to the page to be destroyed.
192 * @remarks This function expects page in @c pPage to be shared locked.
193 */
194static void rtR0MemObjSolPageDestroy(page_t *pPage)
195{
196 /*
197 * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
198 * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
199 * we cannot touch any page_t members once the lock is dropped.
200 */
201 AssertPtr(pPage);
202 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
203
204 u_offset_t offPage = pPage->p_offset;
205 int rc = page_tryupgrade(pPage);
206 if (!rc)
207 {
208 page_unlock(pPage);
209 page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);
210
211 /*
212 * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
213 */
214 AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
215 &g_PageVnode, offPage, pFoundPage, pPage));
216 }
217 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
218 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
219 page_destroy(pPage, 0 /* move it to the free list */);
220}
221
222
223/* Currently not used on 32-bits, define it to shut up gcc. */
224#if HC_ARCH_BITS == 64
225/**
226 * Allocates physical, non-contiguous memory of pages.
227 *
228 * @param puPhys Where to store the physical address of first page. Optional,
229 * can be NULL.
230 * @param cb The size of the allocation.
231 *
232 * @return Array of allocated pages, NULL on failure.
233 */
234static page_t **rtR0MemObjSolPagesAlloc(uint64_t *puPhys, size_t cb)
235{
236 /*
237 * VM1:
238 * The page freelist and cachelist both hold pages that are not mapped into any address space.
239 * The cachelist is not really free pages but when memory is exhausted they'll be moved to the
240 * free lists, it's the total of the free+cache list that we see on the 'free' column in vmstat.
241 *
242 * VM2:
243 * @todo Document what happens behind the scenes in VM2 regarding the free and cachelist.
244 */
245
246 /*
247 * Non-pageable memory reservation request for _4K pages, don't sleep.
248 */
249 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
250 int rc = page_resv(cPages, KM_NOSLEEP);
251 if (rc)
252 {
253 size_t cbPages = cPages * sizeof(page_t *);
254 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
255 if (RT_LIKELY(ppPages))
256 {
257 /*
258 * Get pages from kseg, the 'virtAddr' here is only for colouring but unfortunately
259 * we don't yet have the 'virtAddr' to which this memory may be mapped.
260 */
261 caddr_t virtAddr = 0;
262 for (size_t i = 0; i < cPages; i++, virtAddr += PAGE_SIZE)
263 {
264 /*
265 * Get a page from the free list locked exclusively. The page will be named (hashed in)
266 * and we rely on it during free. The page we get will be shared locked to prevent the page
267 * from being relocated.
268 */
269 page_t *pPage = rtR0MemObjSolPageAlloc(virtAddr);
270 if (RT_UNLIKELY(!pPage))
271 {
272 /*
273 * No page found, release whatever pages we grabbed so far.
274 */
275 for (size_t k = 0; k < i; k++)
276 rtR0MemObjSolPageDestroy(ppPages[k]);
277 kmem_free(ppPages, cbPages);
278 page_unresv(cPages);
279 return NULL;
280 }
281
282 ppPages[i] = pPage;
283 }
284
285 if (puPhys)
286 *puPhys = rtR0MemObjSolPagePhys(ppPages[0]);
287 return ppPages;
288 }
289
290 page_unresv(cPages);
291 }
292
293 return NULL;
294}
295#endif /* HC_ARCH_BITS == 64 */
296
297
298/**
299 * Frees the allocates pages.
300 *
301 * @param ppPages Pointer to the page list.
302 * @param cbPages Size of the allocation.
303 */
304static void rtR0MemObjSolPagesFree(page_t **ppPages, size_t cb)
305{
306 size_t cPages = (cb + PAGE_SIZE - 1) >> PAGE_SHIFT;
307 size_t cbPages = cPages * sizeof(page_t *);
308 for (size_t iPage = 0; iPage < cPages; iPage++)
309 rtR0MemObjSolPageDestroy(ppPages[iPage]);
310
311 kmem_free(ppPages, cbPages);
312 page_unresv(cPages);
313}
314
315
316/**
317 * Allocates one large page.
318 *
319 * @param puPhys Where to store the physical address of the allocated
320 * page. Optional, can be NULL.
321 * @param cbLargePage Size of the large page.
322 *
323 * @returns Pointer to a list of pages that cover the large page, NULL on
324 * failure.
325 */
326static page_t **rtR0MemObjSolLargePageAlloc(uint64_t *puPhys, size_t cbLargePage)
327{
328 /*
329 * Check PG_NORELOC support for large pages. Using this helps prevent _1G page
330 * fragementation on systems that support it.
331 */
332 static bool fPageNoRelocChecked = false;
333 if (fPageNoRelocChecked == false)
334 {
335 fPageNoRelocChecked = true;
336 g_fLargePageNoReloc = false;
337 if ( g_pfnrtR0Sol_page_noreloc_supported
338 && g_pfnrtR0Sol_page_noreloc_supported(cbLargePage))
339 {
340 g_fLargePageNoReloc = true;
341 }
342 }
343
344 /*
345 * Non-pageable memory reservation request for _4K pages, don't sleep.
346 */
347 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
348 size_t cbPages = cPages * sizeof(page_t *);
349 u_offset_t offPage = 0;
350 int rc = page_resv(cPages, KM_NOSLEEP);
351 if (rc)
352 {
353 page_t **ppPages = kmem_zalloc(cbPages, KM_SLEEP);
354 if (RT_LIKELY(ppPages))
355 {
356 mutex_enter(&g_LargePageOffsetMtx);
357 AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
358 g_offLargePage = RT_ALIGN_64(g_offLargePage, cbLargePage) + cbLargePage;
359 offPage = g_offLargePage;
360 mutex_exit(&g_LargePageOffsetMtx);
361
362 seg_t KernelSeg;
363 KernelSeg.s_as = &kas;
364 page_t *pRootPage = page_create_va_large(&g_LargePageVnode, offPage, cbLargePage,
365 PG_EXCL | (g_fLargePageNoReloc ? PG_NORELOC : 0), &KernelSeg,
366 0 /* vaddr */,NULL /* locality group */);
367 if (pRootPage)
368 {
369 /*
370 * Split it into sub-pages, downgrade each page to a shared lock to prevent page relocation.
371 */
372 page_t *pPageList = pRootPage;
373 for (size_t iPage = 0; iPage < cPages; iPage++)
374 {
375 page_t *pPage = pPageList;
376 AssertPtr(pPage);
377 AssertMsg(page_pptonum(pPage) == iPage + page_pptonum(pRootPage),
378 ("%p:%lx %lx+%lx\n", pPage, page_pptonum(pPage), iPage, page_pptonum(pRootPage)));
379 AssertMsg(pPage->p_szc == pRootPage->p_szc, ("Size code mismatch %p %d %d\n", pPage,
380 (int)pPage->p_szc, (int)pRootPage->p_szc));
381
382 /*
383 * Lock the page into memory "long term". This prevents callers of page_try_demote_pages() (such as the
384 * pageout scanner) from demoting the large page into smaller pages while we temporarily release the
385 * exclusive lock (during free). We pass "0, 1" since we've already accounted for availrmem during
386 * page_resv().
387 */
388 page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
389
390 page_sub(&pPageList, pPage);
391 page_io_unlock(pPage);
392 page_downgrade(pPage);
393 Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
394
395 ppPages[iPage] = pPage;
396 }
397 Assert(pPageList == NULL);
398 Assert(ppPages[0] == pRootPage);
399
400 uint64_t uPhys = rtR0MemObjSolPagePhys(pRootPage);
401 AssertMsg(!(uPhys & (cbLargePage - 1)), ("%llx %zx\n", uPhys, cbLargePage));
402 if (puPhys)
403 *puPhys = uPhys;
404 return ppPages;
405 }
406
407 /*
408 * Don't restore offPrev in case of failure (race condition), we have plenty of offset space.
409 * The offset must be unique (for the same vnode) or we'll encounter panics on page_create_va_large().
410 */
411 kmem_free(ppPages, cbPages);
412 }
413
414 page_unresv(cPages);
415 }
416 return NULL;
417}
418
419
420/**
421 * Frees the large page.
422 *
423 * @param ppPages Pointer to the list of small pages that cover the
424 * large page.
425 * @param cbLargePage Size of the allocation (i.e. size of the large
426 * page).
427 */
428static void rtR0MemObjSolLargePageFree(page_t **ppPages, size_t cbLargePage)
429{
430 Assert(ppPages);
431 Assert(cbLargePage > PAGE_SIZE);
432
433 bool fDemoted = false;
434 size_t cPages = (cbLargePage + PAGE_SIZE - 1) >> PAGE_SHIFT;
435 size_t cbPages = cPages * sizeof(page_t *);
436 page_t *pPageList = ppPages[0];
437
438 for (size_t iPage = 0; iPage < cPages; iPage++)
439 {
440 /*
441 * We need the pages exclusively locked, try upgrading the shared lock.
442 * If it fails, drop the shared page lock (cannot access any page_t members once this is done)
443 * and lookup the page from the page hash locking it exclusively.
444 */
445 page_t *pPage = ppPages[iPage];
446 u_offset_t offPage = pPage->p_offset;
447 int rc = page_tryupgrade(pPage);
448 if (!rc)
449 {
450 page_unlock(pPage);
451 page_t *pFoundPage = page_lookup(&g_LargePageVnode, offPage, SE_EXCL);
452 AssertRelease(pFoundPage);
453
454 if (g_fLargePageNoReloc)
455 {
456 /*
457 * This can only be guaranteed if PG_NORELOC is used while allocating the pages.
458 */
459 AssertReleaseMsg(pFoundPage == pPage,
460 ("lookup failed %p:%llu returned %p, expected %p\n", &g_LargePageVnode, offPage,
461 pFoundPage, pPage));
462 }
463
464 /*
465 * Check for page demotion (regardless of relocation). Some places in Solaris (e.g. VM1 page_retire())
466 * could possibly demote the large page to _4K pages between our call to page_unlock() and page_lookup().
467 */
468 if (page_get_pagecnt(pFoundPage->p_szc) == 1) /* Base size of only _4K associated with this page. */
469 fDemoted = true;
470 pPage = pFoundPage;
471 ppPages[iPage] = pFoundPage;
472 }
473 Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
474 page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
475 }
476
477 if (fDemoted)
478 {
479 for (size_t iPage = 0; iPage < cPages; iPage++)
480 {
481 Assert(page_get_pagecnt(ppPages[iPage]->p_szc) == 1);
482 page_destroy(ppPages[iPage], 0 /* move it to the free list */);
483 }
484 }
485 else
486 {
487 /*
488 * Although we shred the adjacent pages in the linked list, page_destroy_pages works on
489 * adjacent pages via array increments. So this does indeed free all the pages.
490 */
491 AssertPtr(pPageList);
492 page_destroy_pages(pPageList);
493 }
494 kmem_free(ppPages, cbPages);
495 page_unresv(cPages);
496}
497
498
499/**
500 * Unmaps kernel/user-space mapped memory.
501 *
502 * @param pv Pointer to the mapped memory block.
503 * @param cb Size of the memory block.
504 */
505static void rtR0MemObjSolUnmap(void *pv, size_t cb)
506{
507 if (SOL_IS_KRNL_ADDR(pv))
508 {
509 hat_unload(kas.a_hat, pv, cb, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
510 vmem_free(heap_arena, pv, cb);
511 }
512 else
513 {
514 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
515 AssertPtr(pAddrSpace);
516 as_rangelock(pAddrSpace);
517 as_unmap(pAddrSpace, pv, cb);
518 as_rangeunlock(pAddrSpace);
519 }
520}
521
522
523/**
524 * Lock down memory mappings for a virtual address.
525 *
526 * @param pv Pointer to the memory to lock down.
527 * @param cb Size of the memory block.
528 * @param fAccess Page access rights (S_READ, S_WRITE, S_EXEC)
529 *
530 * @returns IPRT status code.
531 */
532static int rtR0MemObjSolLock(void *pv, size_t cb, int fPageAccess)
533{
534 /*
535 * Kernel memory mappings on x86/amd64 are always locked, only handle user-space memory.
536 */
537 if (!SOL_IS_KRNL_ADDR(pv))
538 {
539 proc_t *pProc = (proc_t *)RTR0ProcHandleSelf();
540 AssertPtr(pProc);
541 faultcode_t rc = as_fault(pProc->p_as->a_hat, pProc->p_as, (caddr_t)pv, cb, F_SOFTLOCK, fPageAccess);
542 if (rc)
543 {
544 LogRel(("rtR0MemObjSolLock failed for pv=%pv cb=%lx fPageAccess=%d rc=%d\n", pv, cb, fPageAccess, rc));
545 return VERR_LOCK_FAILED;
546 }
547 }
548 return VINF_SUCCESS;
549}
550
551
552/**
553 * Unlock memory mappings for a virtual address.
554 *
555 * @param pv Pointer to the locked memory.
556 * @param cb Size of the memory block.
557 * @param fPageAccess Page access rights (S_READ, S_WRITE, S_EXEC).
558 */
559static void rtR0MemObjSolUnlock(void *pv, size_t cb, int fPageAccess)
560{
561 if (!SOL_IS_KRNL_ADDR(pv))
562 {
563 proc_t *pProcess = (proc_t *)RTR0ProcHandleSelf();
564 AssertPtr(pProcess);
565 as_fault(pProcess->p_as->a_hat, pProcess->p_as, (caddr_t)pv, cb, F_SOFTUNLOCK, fPageAccess);
566 }
567}
568
569
570/**
571 * Maps a list of physical pages into user address space.
572 *
573 * @param pVirtAddr Where to store the virtual address of the mapping.
574 * @param fPageAccess Page access rights (PROT_READ, PROT_WRITE,
575 * PROT_EXEC)
576 * @param paPhysAddrs Array of physical addresses to pages.
577 * @param cb Size of memory being mapped.
578 *
579 * @returns IPRT status code.
580 */
581static int rtR0MemObjSolUserMap(caddr_t *pVirtAddr, unsigned fPageAccess, uint64_t *paPhysAddrs, size_t cb, size_t cbPageSize)
582{
583 struct as *pAddrSpace = ((proc_t *)RTR0ProcHandleSelf())->p_as;
584 int rc;
585 SEGVBOX_CRARGS Args;
586
587 Args.paPhysAddrs = paPhysAddrs;
588 Args.fPageAccess = fPageAccess;
589 Args.cbPageSize = cbPageSize;
590
591 as_rangelock(pAddrSpace);
592 if (g_frtSolOldMapAddr)
593 g_rtSolMapAddr.u.pfnSol_map_addr_old(pVirtAddr, cb, 0 /* offset */, 0 /* vacalign */, MAP_SHARED);
594 else
595 g_rtSolMapAddr.u.pfnSol_map_addr(pVirtAddr, cb, 0 /* offset */, MAP_SHARED);
596 if (*pVirtAddr != NULL)
597 rc = as_map(pAddrSpace, *pVirtAddr, cb, rtR0SegVBoxSolCreate, &Args);
598 else
599 rc = ENOMEM;
600 as_rangeunlock(pAddrSpace);
601
602 return RTErrConvertFromErrno(rc);
603}
604
605
606DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
607{
608 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
609
610 switch (pMemSolaris->Core.enmType)
611 {
612 case RTR0MEMOBJTYPE_LOW:
613 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
614 break;
615
616 case RTR0MEMOBJTYPE_PHYS:
617 if (pMemSolaris->Core.u.Phys.fAllocated)
618 {
619 if (pMemSolaris->fLargePage)
620 rtR0MemObjSolLargePageFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
621 else
622 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
623 }
624 break;
625
626 case RTR0MEMOBJTYPE_PHYS_NC:
627 if (pMemSolaris->fIndivPages)
628 rtR0MemObjSolPagesFree(pMemSolaris->pvHandle, pMemSolaris->Core.cb);
629 else
630 rtR0SolMemFree(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
631 break;
632
633 case RTR0MEMOBJTYPE_PAGE:
634 if (!pMemSolaris->fExecutable)
635 ddi_umem_free(pMemSolaris->Cookie);
636 else
637 segkmem_free(heaptext_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
638 break;
639
640 case RTR0MEMOBJTYPE_LOCK:
641 rtR0MemObjSolUnlock(pMemSolaris->Core.pv, pMemSolaris->Core.cb, pMemSolaris->fAccess);
642 break;
643
644 case RTR0MEMOBJTYPE_MAPPING:
645 rtR0MemObjSolUnmap(pMemSolaris->Core.pv, pMemSolaris->Core.cb);
646 break;
647
648 case RTR0MEMOBJTYPE_RES_VIRT:
649 if (pMemSolaris->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
650 vmem_xfree(heap_arena, pMemSolaris->Core.pv, pMemSolaris->Core.cb);
651 else
652 AssertFailed();
653 break;
654
655 case RTR0MEMOBJTYPE_CONT: /* we don't use this type here. */
656 default:
657 AssertMsgFailed(("enmType=%d\n", pMemSolaris->Core.enmType));
658 return VERR_INTERNAL_ERROR;
659 }
660
661 return VINF_SUCCESS;
662}
663
664
665DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
666{
667 /* Create the object. */
668 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PAGE, NULL, cb, pszTag);
669 if (pMemSolaris)
670 {
671 void *pvMem;
672 if (!fExecutable)
673 {
674 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
675 pvMem = ddi_umem_alloc(cb, DDI_UMEM_SLEEP, &pMemSolaris->Cookie);
676 }
677 else
678 {
679 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /** @todo does segkmem_alloc zero the memory? */
680 pvMem = segkmem_alloc(heaptext_arena, cb, KM_SLEEP);
681 }
682 if (pvMem)
683 {
684 pMemSolaris->Core.pv = pvMem;
685 pMemSolaris->pvHandle = NULL;
686 pMemSolaris->fExecutable = fExecutable;
687 *ppMem = &pMemSolaris->Core;
688 return VINF_SUCCESS;
689 }
690 rtR0MemObjDelete(&pMemSolaris->Core);
691 return VERR_NO_PAGE_MEMORY;
692 }
693 return VERR_NO_MEMORY;
694}
695
696
697DECLHIDDEN(int) rtR0MemObjNativeAllocLarge(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, size_t cbLargePage, uint32_t fFlags,
698 const char *pszTag)
699{
700 return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
701}
702
703
704DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, const char *pszTag)
705{
706 AssertReturn(!fExecutable, VERR_NOT_SUPPORTED);
707
708 /* Create the object */
709 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOW, NULL, cb, pszTag);
710 if (pMemSolaris)
711 {
712 /* Allocate physically low page-aligned memory. */
713 uint64_t uPhysHi = _4G - 1;
714 void *pvMem = rtR0SolMemAlloc(uPhysHi, NULL /* puPhys */, cb, PAGE_SIZE, false /* fContig */);
715 if (pvMem)
716 {
717 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
718 pMemSolaris->Core.pv = pvMem;
719 pMemSolaris->pvHandle = NULL;
720 *ppMem = &pMemSolaris->Core;
721 return VINF_SUCCESS;
722 }
723 rtR0MemObjDelete(&pMemSolaris->Core);
724 return VERR_NO_LOW_MEMORY;
725 }
726 return VERR_NO_MEMORY;
727}
728
729
730DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest,
731 bool fExecutable, const char *pszTag)
732{
733 AssertReturn(!fExecutable, VERR_NOT_SUPPORTED);
734 return rtR0MemObjNativeAllocPhys(ppMem, cb, PhysHighest, PAGE_SIZE /* alignment */, pszTag);
735}
736
737
738DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, const char *pszTag)
739{
740#if HC_ARCH_BITS == 64
741 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb, pszTag);
742 if (pMemSolaris)
743 {
744 if (PhysHighest == NIL_RTHCPHYS)
745 {
746 uint64_t PhysAddr = UINT64_MAX;
747 void *pvPages = rtR0MemObjSolPagesAlloc(&PhysAddr, cb);
748 if (!pvPages)
749 {
750 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0MemObjSolPagesAlloc failed for cb=%u.\n", cb));
751 rtR0MemObjDelete(&pMemSolaris->Core);
752 return VERR_NO_MEMORY;
753 }
754 Assert(PhysAddr != UINT64_MAX);
755 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
756
757 pMemSolaris->Core.pv = NULL;
758 pMemSolaris->pvHandle = pvPages;
759 pMemSolaris->fIndivPages = true;
760 }
761 else
762 {
763 /*
764 * If we must satisfy an upper limit constraint, it isn't feasible to grab individual pages.
765 * We fall back to using contig_alloc().
766 */
767 uint64_t PhysAddr = UINT64_MAX;
768 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, PAGE_SIZE, false /* fContig */);
769 if (!pvMem)
770 {
771 LogRel(("rtR0MemObjNativeAllocPhysNC: rtR0SolMemAlloc failed for cb=%u PhysHighest=%RHp.\n", cb, PhysHighest));
772 rtR0MemObjDelete(&pMemSolaris->Core);
773 return VERR_NO_MEMORY;
774 }
775 Assert(PhysAddr != UINT64_MAX);
776 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
777
778 pMemSolaris->Core.pv = pvMem;
779 pMemSolaris->pvHandle = NULL;
780 pMemSolaris->fIndivPages = false;
781 }
782 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
783 *ppMem = &pMemSolaris->Core;
784 return VINF_SUCCESS;
785 }
786 return VERR_NO_MEMORY;
787
788#else /* 32 bit: */
789 return VERR_NOT_SUPPORTED; /* see the RTR0MemObjAllocPhysNC specs */
790#endif
791}
792
793
794DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment,
795 const char *pszTag)
796{
797 AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);
798
799 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
800 if (RT_UNLIKELY(!pMemSolaris))
801 return VERR_NO_MEMORY;
802
803 /*
804 * Allocating one large page gets special treatment.
805 */
806 static uint32_t s_cbLargePage = UINT32_MAX;
807 if (s_cbLargePage == UINT32_MAX)
808 {
809 if (page_num_pagesizes() > 1)
810 ASMAtomicWriteU32(&s_cbLargePage, page_get_pagesize(1)); /* Page-size code 1 maps to _2M on Solaris x86/amd64. */
811 else
812 ASMAtomicWriteU32(&s_cbLargePage, 0);
813 }
814
815 uint64_t PhysAddr;
816 if ( cb == s_cbLargePage
817 && cb == uAlignment
818 && PhysHighest == NIL_RTHCPHYS)
819 {
820 /*
821 * Allocate one large page (backed by physically contiguous memory).
822 */
823 void *pvPages = rtR0MemObjSolLargePageAlloc(&PhysAddr, cb);
824 if (RT_LIKELY(pvPages))
825 {
826 AssertMsg(!(PhysAddr & (cb - 1)), ("%RHp\n", PhysAddr));
827 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC; /*?*/
828 pMemSolaris->Core.pv = NULL;
829 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
830 pMemSolaris->Core.u.Phys.fAllocated = true;
831 pMemSolaris->pvHandle = pvPages;
832 pMemSolaris->fLargePage = true;
833
834 *ppMem = &pMemSolaris->Core;
835 return VINF_SUCCESS;
836 }
837 }
838 else
839 {
840 /*
841 * Allocate physically contiguous memory aligned as specified.
842 */
843 AssertCompile(NIL_RTHCPHYS == UINT64_MAX); NOREF(RTASSERTVAR);
844 PhysAddr = PhysHighest;
845 void *pvMem = rtR0SolMemAlloc(PhysHighest, &PhysAddr, cb, uAlignment, true /* fContig */);
846 if (RT_LIKELY(pvMem))
847 {
848 Assert(!(PhysAddr & PAGE_OFFSET_MASK));
849 Assert(PhysAddr < PhysHighest);
850 Assert(PhysAddr + cb <= PhysHighest);
851
852 pMemSolaris->Core.fFlags |= RTR0MEMOBJ_FLAGS_UNINITIALIZED_AT_ALLOC;
853 pMemSolaris->Core.pv = pvMem;
854 pMemSolaris->Core.u.Phys.PhysBase = PhysAddr;
855 pMemSolaris->Core.u.Phys.fAllocated = true;
856 pMemSolaris->pvHandle = NULL;
857 pMemSolaris->fLargePage = false;
858
859 *ppMem = &pMemSolaris->Core;
860 return VINF_SUCCESS;
861 }
862 }
863 rtR0MemObjDelete(&pMemSolaris->Core);
864 return VERR_NO_CONT_MEMORY;
865}
866
867
868DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy,
869 const char *pszTag)
870{
871 AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
872
873 /* Create the object. */
874 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_PHYS, NULL, cb, pszTag);
875 if (!pMemSolaris)
876 return VERR_NO_MEMORY;
877
878 /* There is no allocation here, it needs to be mapped somewhere first. */
879 pMemSolaris->Core.u.Phys.fAllocated = false;
880 pMemSolaris->Core.u.Phys.PhysBase = Phys;
881 pMemSolaris->Core.u.Phys.uCachePolicy = uCachePolicy;
882 *ppMem = &pMemSolaris->Core;
883 return VINF_SUCCESS;
884}
885
886
887DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
888 RTR0PROCESS R0Process, const char *pszTag)
889{
890 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
891 NOREF(fAccess);
892
893 /* Create the locking object */
894 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK,
895 (void *)R3Ptr, cb, pszTag);
896 if (!pMemSolaris)
897 return VERR_NO_MEMORY;
898
899 /* Lock down user pages. */
900 int fPageAccess = S_READ;
901 if (fAccess & RTMEM_PROT_WRITE)
902 fPageAccess = S_WRITE;
903 if (fAccess & RTMEM_PROT_EXEC)
904 fPageAccess = S_EXEC;
905 int rc = rtR0MemObjSolLock((void *)R3Ptr, cb, fPageAccess);
906 if (RT_FAILURE(rc))
907 {
908 LogRel(("rtR0MemObjNativeLockUser: rtR0MemObjSolLock failed rc=%d\n", rc));
909 rtR0MemObjDelete(&pMemSolaris->Core);
910 return rc;
911 }
912
913 /* Fill in the object attributes and return successfully. */
914 pMemSolaris->Core.u.Lock.R0Process = R0Process;
915 pMemSolaris->pvHandle = NULL;
916 pMemSolaris->fAccess = fPageAccess;
917 *ppMem = &pMemSolaris->Core;
918 return VINF_SUCCESS;
919}
920
921
922DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, const char *pszTag)
923{
924 NOREF(fAccess);
925
926 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb, pszTag);
927 if (!pMemSolaris)
928 return VERR_NO_MEMORY;
929
930 /* Lock down kernel pages. */
931 int fPageAccess = S_READ;
932 if (fAccess & RTMEM_PROT_WRITE)
933 fPageAccess = S_WRITE;
934 if (fAccess & RTMEM_PROT_EXEC)
935 fPageAccess = S_EXEC;
936 int rc = rtR0MemObjSolLock(pv, cb, fPageAccess);
937 if (RT_FAILURE(rc))
938 {
939 LogRel(("rtR0MemObjNativeLockKernel: rtR0MemObjSolLock failed rc=%d\n", rc));
940 rtR0MemObjDelete(&pMemSolaris->Core);
941 return rc;
942 }
943
944 /* Fill in the object attributes and return successfully. */
945 pMemSolaris->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
946 pMemSolaris->pvHandle = NULL;
947 pMemSolaris->fAccess = fPageAccess;
948 *ppMem = &pMemSolaris->Core;
949 return VINF_SUCCESS;
950}
951
952
953DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
954 const char *pszTag)
955{
956 PRTR0MEMOBJSOL pMemSolaris;
957
958 /*
959 * Use xalloc.
960 */
961 void *pv = vmem_xalloc(heap_arena, cb, uAlignment, 0 /* phase */, 0 /* nocross */,
962 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
963 if (RT_UNLIKELY(!pv))
964 return VERR_NO_MEMORY;
965
966 /* Create the object. */
967 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_RES_VIRT, pv, cb, pszTag);
968 if (!pMemSolaris)
969 {
970 LogRel(("rtR0MemObjNativeReserveKernel failed to alloc memory object.\n"));
971 vmem_xfree(heap_arena, pv, cb);
972 return VERR_NO_MEMORY;
973 }
974
975 pMemSolaris->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
976 *ppMem = &pMemSolaris->Core;
977 return VINF_SUCCESS;
978}
979
980
981DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment,
982 RTR0PROCESS R0Process, const char *pszTag)
983{
984 RT_NOREF(ppMem, R3PtrFixed, cb, uAlignment, R0Process, pszTag);
985 return VERR_NOT_SUPPORTED;
986}
987
988
989DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
990 unsigned fProt, size_t offSub, size_t cbSub, const char *pszTag)
991{
992 /* Fail if requested to do something we can't. */
993 AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
994 if (uAlignment > PAGE_SIZE)
995 return VERR_NOT_SUPPORTED;
996
997 /*
998 * Use xalloc to get address space.
999 */
1000 if (!cbSub)
1001 cbSub = pMemToMap->cb;
1002 void *pv = vmem_xalloc(heap_arena, cbSub, uAlignment, 0 /* phase */, 0 /* nocross */,
1003 NULL /* minaddr */, NULL /* maxaddr */, VM_SLEEP);
1004 if (RT_UNLIKELY(!pv))
1005 return VERR_MAP_FAILED;
1006
1007 /*
1008 * Load the pages from the other object into it.
1009 */
1010 uint32_t fAttr = HAT_UNORDERED_OK | HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1011 if (fProt & RTMEM_PROT_READ)
1012 fAttr |= PROT_READ;
1013 if (fProt & RTMEM_PROT_EXEC)
1014 fAttr |= PROT_EXEC;
1015 if (fProt & RTMEM_PROT_WRITE)
1016 fAttr |= PROT_WRITE;
1017 fAttr |= HAT_NOSYNC;
1018
1019 int rc = VINF_SUCCESS;
1020 size_t off = 0;
1021 while (off < cbSub)
1022 {
1023 RTHCPHYS HCPhys = RTR0MemObjGetPagePhysAddr(pMemToMap, (offSub + off) >> PAGE_SHIFT);
1024 AssertBreakStmt(HCPhys != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_2);
1025 pfn_t pfn = HCPhys >> PAGE_SHIFT;
1026 AssertBreakStmt(((RTHCPHYS)pfn << PAGE_SHIFT) == HCPhys, rc = VERR_INTERNAL_ERROR_3);
1027
1028 hat_devload(kas.a_hat, (uint8_t *)pv + off, PAGE_SIZE, pfn, fAttr, HAT_LOAD_LOCK);
1029
1030 /* Advance. */
1031 off += PAGE_SIZE;
1032 }
1033 if (RT_SUCCESS(rc))
1034 {
1035 /*
1036 * Create a memory object for the mapping.
1037 */
1038 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING,
1039 pv, cbSub, pszTag);
1040 if (pMemSolaris)
1041 {
1042 pMemSolaris->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
1043 *ppMem = &pMemSolaris->Core;
1044 return VINF_SUCCESS;
1045 }
1046
1047 LogRel(("rtR0MemObjNativeMapKernel failed to alloc memory object.\n"));
1048 rc = VERR_NO_MEMORY;
1049 }
1050
1051 if (off)
1052 hat_unload(kas.a_hat, pv, off, HAT_UNLOAD | HAT_UNLOAD_UNLOCK);
1053 vmem_xfree(heap_arena, pv, cbSub);
1054 return rc;
1055}
1056
1057
1058DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, PRTR0MEMOBJINTERNAL pMemToMap, RTR3PTR R3PtrFixed,
1059 size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub,
1060 const char *pszTag)
1061{
1062 /*
1063 * Fend off things we cannot do.
1064 */
1065 AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
1066 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
1067 if (uAlignment != PAGE_SIZE)
1068 return VERR_NOT_SUPPORTED;
1069
1070 /*
1071 * Get parameters from the source object and offSub/cbSub.
1072 */
1073 PRTR0MEMOBJSOL pMemToMapSolaris = (PRTR0MEMOBJSOL)pMemToMap;
1074 uint8_t *pb = pMemToMapSolaris->Core.pv ? (uint8_t *)pMemToMapSolaris->Core.pv + offSub : NULL;
1075 size_t const cb = cbSub ? cbSub : pMemToMapSolaris->Core.cb;
1076 size_t const cPages = cb >> PAGE_SHIFT;
1077 Assert(!offSub || cbSub);
1078 Assert(!(cb & PAGE_OFFSET_MASK));
1079
1080 /*
1081 * Create the mapping object
1082 */
1083 PRTR0MEMOBJSOL pMemSolaris;
1084 pMemSolaris = (PRTR0MEMOBJSOL)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_MAPPING, pb, cb, pszTag);
1085 if (RT_UNLIKELY(!pMemSolaris))
1086 return VERR_NO_MEMORY;
1087
1088 /*
1089 * Gather the physical page address of the pages to be mapped.
1090 */
1091 int rc = VINF_SUCCESS;
1092 uint64_t *paPhysAddrs = kmem_zalloc(sizeof(uint64_t) * cPages, KM_SLEEP);
1093 if (RT_LIKELY(paPhysAddrs))
1094 {
1095 if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS_NC
1096 && pMemToMapSolaris->fIndivPages)
1097 {
1098 /* Translate individual page_t to physical addresses. */
1099 page_t **papPages = pMemToMapSolaris->pvHandle;
1100 AssertPtr(papPages);
1101 papPages += offSub >> PAGE_SHIFT;
1102 for (size_t iPage = 0; iPage < cPages; iPage++)
1103 paPhysAddrs[iPage] = rtR0MemObjSolPagePhys(papPages[iPage]);
1104 }
1105 else if ( pMemToMapSolaris->Core.enmType == RTR0MEMOBJTYPE_PHYS
1106 && pMemToMapSolaris->fLargePage)
1107 {
1108 /* Split up the large page into page-sized chunks. */
1109 RTHCPHYS Phys = pMemToMapSolaris->Core.u.Phys.PhysBase;
1110 Phys += offSub;
1111 for (size_t iPage = 0; iPage < cPages; iPage++, Phys += PAGE_SIZE)
1112 paPhysAddrs[iPage] = Phys;
1113 }
1114 else
1115 {
1116 /* Have kernel mapping, just translate virtual to physical. */
1117 AssertPtr(pb);
1118 for (size_t iPage = 0; iPage < cPages; iPage++)
1119 {
1120 paPhysAddrs[iPage] = rtR0MemObjSolVirtToPhys(pb);
1121 if (RT_UNLIKELY(paPhysAddrs[iPage] == -(uint64_t)1))
1122 {
1123 LogRel(("rtR0MemObjNativeMapUser: no page to map.\n"));
1124 rc = VERR_MAP_FAILED;
1125 break;
1126 }
1127 pb += PAGE_SIZE;
1128 }
1129 }
1130 if (RT_SUCCESS(rc))
1131 {
1132 /*
1133 * Perform the actual mapping.
1134 */
1135 unsigned fPageAccess = PROT_READ;
1136 if (fProt & RTMEM_PROT_WRITE)
1137 fPageAccess |= PROT_WRITE;
1138 if (fProt & RTMEM_PROT_EXEC)
1139 fPageAccess |= PROT_EXEC;
1140
1141 caddr_t UserAddr = NULL;
1142 rc = rtR0MemObjSolUserMap(&UserAddr, fPageAccess, paPhysAddrs, cb, PAGE_SIZE);
1143 if (RT_SUCCESS(rc))
1144 {
1145 pMemSolaris->Core.u.Mapping.R0Process = R0Process;
1146 pMemSolaris->Core.pv = UserAddr;
1147
1148 *ppMem = &pMemSolaris->Core;
1149 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1150 return VINF_SUCCESS;
1151 }
1152
1153 LogRel(("rtR0MemObjNativeMapUser: rtR0MemObjSolUserMap failed rc=%d.\n", rc));
1154 }
1155
1156 rc = VERR_MAP_FAILED;
1157 kmem_free(paPhysAddrs, sizeof(uint64_t) * cPages);
1158 }
1159 else
1160 rc = VERR_NO_MEMORY;
1161 rtR0MemObjDelete(&pMemSolaris->Core);
1162 return rc;
1163}
1164
1165
1166DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
1167{
1168 NOREF(pMem);
1169 NOREF(offSub);
1170 NOREF(cbSub);
1171 NOREF(fProt);
1172 return VERR_NOT_SUPPORTED;
1173}
1174
1175
1176DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
1177{
1178 PRTR0MEMOBJSOL pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1179
1180 switch (pMemSolaris->Core.enmType)
1181 {
1182 case RTR0MEMOBJTYPE_PHYS_NC:
1183 if ( pMemSolaris->Core.u.Phys.fAllocated
1184 || !pMemSolaris->fIndivPages)
1185 {
1186 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1187 return rtR0MemObjSolVirtToPhys(pb);
1188 }
1189 page_t **ppPages = pMemSolaris->pvHandle;
1190 return rtR0MemObjSolPagePhys(ppPages[iPage]);
1191
1192 case RTR0MEMOBJTYPE_PAGE:
1193 case RTR0MEMOBJTYPE_LOW:
1194 case RTR0MEMOBJTYPE_LOCK:
1195 {
1196 uint8_t *pb = (uint8_t *)pMemSolaris->Core.pv + ((size_t)iPage << PAGE_SHIFT);
1197 return rtR0MemObjSolVirtToPhys(pb);
1198 }
1199
1200 /*
1201 * Although mapping can be handled by rtR0MemObjSolVirtToPhys(offset) like the above case,
1202 * request it from the parent so that we have a clear distinction between CONT/PHYS_NC.
1203 */
1204 case RTR0MEMOBJTYPE_MAPPING:
1205 return rtR0MemObjNativeGetPagePhysAddr(pMemSolaris->Core.uRel.Child.pParent, iPage);
1206
1207 case RTR0MEMOBJTYPE_CONT:
1208 case RTR0MEMOBJTYPE_PHYS:
1209 AssertFailed(); /* handled by the caller */
1210 case RTR0MEMOBJTYPE_RES_VIRT:
1211 default:
1212 return NIL_RTHCPHYS;
1213 }
1214}
1215
1216
1217DECLHIDDEN(int) rtR0MemObjNativeZeroInitWithoutMapping(PRTR0MEMOBJINTERNAL pMem)
1218{
1219 PRTR0MEMOBJSOL const pMemSolaris = (PRTR0MEMOBJSOL)pMem;
1220 size_t const cPages = pMemSolaris->Core.cb >> PAGE_SHIFT;
1221 size_t iPage;
1222 for (iPage = 0; iPage < cPages; iPage++)
1223 {
1224 void *pvPage;
1225
1226 /* Get the physical address of the page. */
1227 RTHCPHYS const HCPhys = rtR0MemObjNativeGetPagePhysAddr(&pMemSolaris->Core, iPage);
1228 AssertReturn(HCPhys != NIL_RTHCPHYS, VERR_INTERNAL_ERROR_3);
1229 Assert(!(HCPhys & PAGE_OFFSET_MASK));
1230
1231 /* Map it. */
1232 HCPhys >>= PAGE_SHIFT;
1233 AssertReturn(HCPhys <= physmax, VERR_INTERNAL_ERROR_3);
1234 pvPage = hat_kpm_pfn2va(HCPhys);
1235 AssertPtrReturn(pvPage, VERR_INTERNAL_ERROR_3);
1236
1237 /* Zero it. */
1238 RT_BZERO(pvPage, PAGE_SIZE);
1239 }
1240 return VINF_SUCCESS;
1241}
1242
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette