VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp@ 27737

最後變更 在這個檔案從27737是 27352,由 vboxsync 提交於 15 年 前

memobj-r0drv-nt.cpp: NT4 (vanilla, didn't check sp6) doesn't have MmAllocateContiguousMemorySpecifyCache. Hope this works...

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 28.4 KB
 
1/* $Id: memobj-r0drv-nt.cpp 27352 2010-03-14 23:27:45Z vboxsync $ */
2/** @file
3 * IPRT - Ring-0 Memory Objects, NT.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include "the-nt-kernel.h"
36
37#include <iprt/memobj.h>
38#include <iprt/alloc.h>
39#include <iprt/assert.h>
40#include <iprt/log.h>
41#include <iprt/param.h>
42#include <iprt/string.h>
43#include <iprt/process.h>
44#include "internal/memobj.h"
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50/** Maximum number of bytes we try to lock down in one go.
51 * This is supposed to have a limit right below 256MB, but this appears
52 * to actually be much lower. The values here have been determined experimentally.
53 */
54#ifdef RT_ARCH_X86
55# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
56#endif
57#ifdef RT_ARCH_AMD64
58# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
59#endif
60
61
62/*******************************************************************************
63* Structures and Typedefs *
64*******************************************************************************/
65/**
66 * The NT version of the memory object structure.
67 */
68typedef struct RTR0MEMOBJNT
69{
70 /** The core structure. */
71 RTR0MEMOBJINTERNAL Core;
72#ifndef IPRT_TARGET_NT4
73 /** Used MmAllocatePagesForMdl(). */
74 bool fAllocatedPagesForMdl;
75#endif
76 /** Pointer returned by MmSecureVirtualMemory */
77 PVOID pvSecureMem;
78 /** The number of PMDLs (memory descriptor lists) in the array. */
79 uint32_t cMdls;
80 /** Array of MDL pointers. (variable size) */
81 PMDL apMdls[1];
82} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
83
84
85int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
86{
87 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
88
89 /*
90 * Deal with it on a per type basis (just as a variation).
91 */
92 switch (pMemNt->Core.enmType)
93 {
94 case RTR0MEMOBJTYPE_LOW:
95#ifndef IPRT_TARGET_NT4
96 if (pMemNt->fAllocatedPagesForMdl)
97 {
98 Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
99 MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
100 pMemNt->Core.pv = NULL;
101 if (pMemNt->pvSecureMem)
102 {
103 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
104 pMemNt->pvSecureMem = NULL;
105 }
106
107 MmFreePagesFromMdl(pMemNt->apMdls[0]);
108 ExFreePool(pMemNt->apMdls[0]);
109 pMemNt->apMdls[0] = NULL;
110 pMemNt->cMdls = 0;
111 break;
112 }
113#endif
114 AssertFailed();
115 break;
116
117 case RTR0MEMOBJTYPE_PAGE:
118 Assert(pMemNt->Core.pv);
119 ExFreePool(pMemNt->Core.pv);
120 pMemNt->Core.pv = NULL;
121
122 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
123 IoFreeMdl(pMemNt->apMdls[0]);
124 pMemNt->apMdls[0] = NULL;
125 pMemNt->cMdls = 0;
126 break;
127
128 case RTR0MEMOBJTYPE_CONT:
129 Assert(pMemNt->Core.pv);
130 MmFreeContiguousMemory(pMemNt->Core.pv);
131 pMemNt->Core.pv = NULL;
132
133 Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
134 IoFreeMdl(pMemNt->apMdls[0]);
135 pMemNt->apMdls[0] = NULL;
136 pMemNt->cMdls = 0;
137 break;
138
139 case RTR0MEMOBJTYPE_PHYS:
140 case RTR0MEMOBJTYPE_PHYS_NC:
141#ifndef IPRT_TARGET_NT4
142 if (pMemNt->fAllocatedPagesForMdl)
143 {
144 MmFreePagesFromMdl(pMemNt->apMdls[0]);
145 ExFreePool(pMemNt->apMdls[0]);
146 pMemNt->apMdls[0] = NULL;
147 pMemNt->cMdls = 0;
148 break;
149 }
150#endif
151 AssertFailed();
152 break;
153
154 case RTR0MEMOBJTYPE_LOCK:
155 if (pMemNt->pvSecureMem)
156 {
157 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
158 pMemNt->pvSecureMem = NULL;
159 }
160 for (uint32_t i = 0; i < pMemNt->cMdls; i++)
161 {
162 MmUnlockPages(pMemNt->apMdls[i]);
163 IoFreeMdl(pMemNt->apMdls[i]);
164 pMemNt->apMdls[i] = NULL;
165 }
166 break;
167
168 case RTR0MEMOBJTYPE_RES_VIRT:
169/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
170 {
171 }
172 else
173 {
174 }*/
175 AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
176 return VERR_INTERNAL_ERROR;
177 break;
178
179 case RTR0MEMOBJTYPE_MAPPING:
180 {
181 Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
182 PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
183 Assert(pMemNtParent);
184 if (pMemNtParent->cMdls)
185 {
186 Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
187 Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
188 || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
189 MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
190 }
191 else
192 {
193 Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
194 && !pMemNtParent->Core.u.Phys.fAllocated);
195 Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
196 MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
197 }
198 pMemNt->Core.pv = NULL;
199 break;
200 }
201
202 default:
203 AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
204 return VERR_INTERNAL_ERROR;
205 }
206
207 return VINF_SUCCESS;
208}
209
210
211int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
212{
213 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
214
215 /*
216 * Try allocate the memory and create an MDL for them so
217 * we can query the physical addresses and do mappings later
218 * without running into out-of-memory conditions and similar problems.
219 */
220 int rc = VERR_NO_PAGE_MEMORY;
221 void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
222 if (pv)
223 {
224 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
225 if (pMdl)
226 {
227 MmBuildMdlForNonPagedPool(pMdl);
228#ifdef RT_ARCH_AMD64
229 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
230#endif
231
232 /*
233 * Create the IPRT memory object.
234 */
235 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
236 if (pMemNt)
237 {
238 pMemNt->cMdls = 1;
239 pMemNt->apMdls[0] = pMdl;
240 *ppMem = &pMemNt->Core;
241 return VINF_SUCCESS;
242 }
243
244 rc = VERR_NO_MEMORY;
245 IoFreeMdl(pMdl);
246 }
247 ExFreePool(pv);
248 }
249 return rc;
250}
251
252
253int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
254{
255 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
256
257 /*
258 * Try see if we get lucky first...
259 * (We could probably just assume we're lucky on NT4.)
260 */
261 int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
262 if (RT_SUCCESS(rc))
263 {
264 size_t iPage = cb >> PAGE_SHIFT;
265 while (iPage-- > 0)
266 if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
267 {
268 rc = VERR_NO_MEMORY;
269 break;
270 }
271 if (RT_SUCCESS(rc))
272 return rc;
273
274 /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
275 RTR0MemObjFree(*ppMem, false);
276 *ppMem = NULL;
277 }
278
279#ifndef IPRT_TARGET_NT4
280 /*
281 * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
282 */
283 PHYSICAL_ADDRESS Zero;
284 Zero.QuadPart = 0;
285 PHYSICAL_ADDRESS HighAddr;
286 HighAddr.QuadPart = _4G - 1;
287 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
288 if (pMdl)
289 {
290 if (MmGetMdlByteCount(pMdl) >= cb)
291 {
292 __try
293 {
294 void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
295 FALSE /* no bug check on failure */, NormalPagePriority);
296 if (pv)
297 {
298 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
299 if (pMemNt)
300 {
301 pMemNt->fAllocatedPagesForMdl = true;
302 pMemNt->cMdls = 1;
303 pMemNt->apMdls[0] = pMdl;
304 *ppMem = &pMemNt->Core;
305 return VINF_SUCCESS;
306 }
307 MmUnmapLockedPages(pv, pMdl);
308 }
309 }
310 __except(EXCEPTION_EXECUTE_HANDLER)
311 {
312 NTSTATUS rcNt = GetExceptionCode();
313 Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
314 /* nothing */
315 }
316 }
317 MmFreePagesFromMdl(pMdl);
318 ExFreePool(pMdl);
319 }
320#endif /* !IPRT_TARGET_NT4 */
321
322 /*
323 * Fall back on contiguous memory...
324 */
325 return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
326}
327
328
329/**
330 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
331 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
332 * to what rtR0MemObjNativeAllocCont() does.
333 *
334 * @returns IPRT status code.
335 * @param ppMem Where to store the pointer to the ring-0 memory object.
336 * @param cb The size.
337 * @param fExecutable Whether the mapping should be executable or not.
338 * @param PhysHighest The highest physical address for the pages in allocation.
339 * @param uAlignment The alignment of the physical memory to allocate.
340 * Supported values are PAGE_SIZE, _2M, _4M and _1G.
341 */
342static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
343 size_t uAlignment)
344{
345 AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
346#ifdef TARGET_NT4
347 if (uAlignment != PAGE_SIZE)
348 return VERR_NOT_SUPPORTED;
349#endif
350
351 /*
352 * Allocate the memory and create an MDL for it.
353 */
354 PHYSICAL_ADDRESS PhysAddrHighest;
355 PhysAddrHighest.QuadPart = PhysHighest;
356#ifndef TARGET_NT4
357 PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
358 PhysAddrLowest.QuadPart = 0;
359 PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
360 void *pv = MmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
361#else
362 void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
363#endif
364 if (!pv)
365 return VERR_NO_MEMORY;
366
367 PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
368 if (pMdl)
369 {
370 MmBuildMdlForNonPagedPool(pMdl);
371#ifdef RT_ARCH_AMD64
372 MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
373#endif
374
375 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
376 if (pMemNt)
377 {
378 pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
379 pMemNt->cMdls = 1;
380 pMemNt->apMdls[0] = pMdl;
381 *ppMem = &pMemNt->Core;
382 return VINF_SUCCESS;
383 }
384
385 IoFreeMdl(pMdl);
386 }
387 MmFreeContiguousMemory(pv);
388 return VERR_NO_MEMORY;
389}
390
391
392int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
393{
394 return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
395}
396
397
398int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
399{
400#ifndef IPRT_TARGET_NT4
401 /*
402 * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
403 *
404 * This is preferable to using MmAllocateContiguousMemory because there are
405 * a few situations where the memory shouldn't be mapped, like for instance
406 * VT-x control memory. Since these are rather small allocations (one or
407 * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
408 * request.
409 *
410 * If the allocation is big, the chances are *probably* not very good. The
411 * current limit is kind of random...
412 */
413 if ( cb < _128K
414 && uAlignment == PAGE_SIZE)
415
416 {
417 PHYSICAL_ADDRESS Zero;
418 Zero.QuadPart = 0;
419 PHYSICAL_ADDRESS HighAddr;
420 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
421 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
422 if (pMdl)
423 {
424 if (MmGetMdlByteCount(pMdl) >= cb)
425 {
426 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
427 PFN_NUMBER Pfn = paPfns[0] + 1;
428 const size_t cPages = cb >> PAGE_SHIFT;
429 size_t iPage;
430 for (iPage = 1; iPage < cPages; iPage++, Pfn++)
431 if (paPfns[iPage] != Pfn)
432 break;
433 if (iPage >= cPages)
434 {
435 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
436 if (pMemNt)
437 {
438 pMemNt->Core.u.Phys.fAllocated = true;
439 pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
440 pMemNt->fAllocatedPagesForMdl = true;
441 pMemNt->cMdls = 1;
442 pMemNt->apMdls[0] = pMdl;
443 *ppMem = &pMemNt->Core;
444 return VINF_SUCCESS;
445 }
446 }
447 }
448 MmFreePagesFromMdl(pMdl);
449 ExFreePool(pMdl);
450 }
451 }
452#endif /* !IPRT_TARGET_NT4 */
453
454 return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest, uAlignment);
455}
456
457
458int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
459{
460#ifndef IPRT_TARGET_NT4
461 PHYSICAL_ADDRESS Zero;
462 Zero.QuadPart = 0;
463 PHYSICAL_ADDRESS HighAddr;
464 HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
465 PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
466 if (pMdl)
467 {
468 if (MmGetMdlByteCount(pMdl) >= cb)
469 {
470 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
471 if (pMemNt)
472 {
473 pMemNt->fAllocatedPagesForMdl = true;
474 pMemNt->cMdls = 1;
475 pMemNt->apMdls[0] = pMdl;
476 *ppMem = &pMemNt->Core;
477 return VINF_SUCCESS;
478 }
479 }
480 MmFreePagesFromMdl(pMdl);
481 ExFreePool(pMdl);
482 }
483 return VERR_NO_MEMORY;
484#else /* IPRT_TARGET_NT4 */
485 return VERR_NOT_SUPPORTED;
486#endif /* IPRT_TARGET_NT4 */
487}
488
489
490int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
491{
492 /*
493 * Validate the address range and create a descriptor for it.
494 */
495 PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
496 if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
497 return VERR_ADDRESS_TOO_BIG;
498
499 /*
500 * Create the IPRT memory object.
501 */
502 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
503 if (pMemNt)
504 {
505 pMemNt->Core.u.Phys.PhysBase = Phys;
506 pMemNt->Core.u.Phys.fAllocated = false;
507 *ppMem = &pMemNt->Core;
508 return VINF_SUCCESS;
509 }
510 return VERR_NO_MEMORY;
511}
512
513
514/**
515 * Internal worker for locking down pages.
516 *
517 * @return IPRT status code.
518 *
519 * @param ppMem Where to store the memory object pointer.
520 * @param pv First page.
521 * @param cb Number of bytes.
522 * @param fAccess The desired access, a combination of RTMEM_PROT_READ
523 * and RTMEM_PROT_WRITE.
524 * @param R0Process The process \a pv and \a cb refers to.
525 */
526static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
527{
528 /*
529 * Calc the number of MDLs we need and allocate the memory object structure.
530 */
531 size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
532 if (cb % MAX_LOCK_MEM_SIZE)
533 cMdls++;
534 if (cMdls >= UINT32_MAX)
535 return VERR_OUT_OF_RANGE;
536 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
537 RTR0MEMOBJTYPE_LOCK, pv, cb);
538 if (!pMemNt)
539 return VERR_NO_MEMORY;
540
541 /*
542 * Loop locking down the sub parts of the memory.
543 */
544 int rc = VINF_SUCCESS;
545 size_t cbTotal = 0;
546 uint8_t *pb = (uint8_t *)pv;
547 uint32_t iMdl;
548 for (iMdl = 0; iMdl < cMdls; iMdl++)
549 {
550 /*
551 * Calc the Mdl size and allocate it.
552 */
553 size_t cbCur = cb - cbTotal;
554 if (cbCur > MAX_LOCK_MEM_SIZE)
555 cbCur = MAX_LOCK_MEM_SIZE;
556 AssertMsg(cbCur, ("cbCur: 0!\n"));
557 PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
558 if (!pMdl)
559 {
560 rc = VERR_NO_MEMORY;
561 break;
562 }
563
564 /*
565 * Lock the pages.
566 */
567 __try
568 {
569 MmProbeAndLockPages(pMdl,
570 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
571 fAccess == RTMEM_PROT_READ
572 ? IoReadAccess
573 : fAccess == RTMEM_PROT_WRITE
574 ? IoWriteAccess
575 : IoModifyAccess);
576
577 pMemNt->apMdls[iMdl] = pMdl;
578 pMemNt->cMdls++;
579 }
580 __except(EXCEPTION_EXECUTE_HANDLER)
581 {
582 IoFreeMdl(pMdl);
583 rc = VERR_LOCK_FAILED;
584 break;
585 }
586
587 if (R0Process != NIL_RTR0PROCESS)
588 {
589 /* Make sure the user process can't change the allocation. */
590 pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb,
591 fAccess & RTMEM_PROT_WRITE
592 ? PAGE_READWRITE
593 : PAGE_READONLY);
594 if (!pMemNt->pvSecureMem)
595 {
596 rc = VERR_NO_MEMORY;
597 break;
598 }
599 }
600
601 /* next */
602 cbTotal += cbCur;
603 pb += cbCur;
604 }
605 if (RT_SUCCESS(rc))
606 {
607 Assert(pMemNt->cMdls == cMdls);
608 pMemNt->Core.u.Lock.R0Process = R0Process;
609 *ppMem = &pMemNt->Core;
610 return rc;
611 }
612
613 /*
614 * We failed, perform cleanups.
615 */
616 while (iMdl-- > 0)
617 {
618 MmUnlockPages(pMemNt->apMdls[iMdl]);
619 IoFreeMdl(pMemNt->apMdls[iMdl]);
620 pMemNt->apMdls[iMdl] = NULL;
621 }
622 if (pMemNt->pvSecureMem)
623 {
624 MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
625 pMemNt->pvSecureMem = NULL;
626 }
627
628 rtR0MemObjDelete(&pMemNt->Core);
629 return rc;
630}
631
632
633int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
634{
635 AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
636 /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
637 return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
638}
639
640
641int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
642{
643 return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
644}
645
646
647int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
648{
649 /*
650 * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
651 */
652 return VERR_NOT_IMPLEMENTED;
653}
654
655
656int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
657{
658 /*
659 * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
660 */
661 return VERR_NOT_IMPLEMENTED;
662}
663
664
665/**
666 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
667 *
668 * @returns IPRT status code.
669 * @param ppMem Where to store the memory object for the mapping.
670 * @param pMemToMap The memory object to map.
671 * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
672 * @param uAlignment The alignment requirement for the mapping.
673 * @param fProt The desired page protection for the mapping.
674 * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
675 * If not nil, it's the current process.
676 */
677static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
678 unsigned fProt, RTR0PROCESS R0Process)
679{
680 int rc = VERR_MAP_FAILED;
681
682 /*
683 * Check that the specified alignment is supported.
684 */
685 if (uAlignment > PAGE_SIZE)
686 return VERR_NOT_SUPPORTED;
687
688 /*
689 * There are two basic cases here, either we've got an MDL and can
690 * map it using MmMapLockedPages, or we've got a contiguous physical
691 * range (MMIO most likely) and can use MmMapIoSpace.
692 */
693 PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
694 if (pMemNtToMap->cMdls)
695 {
696 /* don't attempt map locked regions with more than one mdl. */
697 if (pMemNtToMap->cMdls != 1)
698 return VERR_NOT_SUPPORTED;
699
700#ifdef IPRT_TARGET_NT4
701 /* NT SP0 can't map to a specific address. */
702 if (pvFixed != (void *)-1)
703 return VERR_NOT_SUPPORTED;
704#endif
705
706 /* we can't map anything to the first page, sorry. */
707 if (pvFixed == 0)
708 return VERR_NOT_SUPPORTED;
709
710 /* only one system mapping for now - no time to figure out MDL restrictions right now. */
711 if ( pMemNtToMap->Core.uRel.Parent.cMappings
712 && R0Process == NIL_RTR0PROCESS)
713 return VERR_NOT_SUPPORTED;
714
715 __try
716 {
717 /** @todo uAlignment */
718 /** @todo How to set the protection on the pages? */
719#ifdef IPRT_TARGET_NT4
720 void *pv = MmMapLockedPages(pMemNtToMap->apMdls[0],
721 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
722#else
723 void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
724 R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
725 MmCached,
726 pvFixed != (void *)-1 ? pvFixed : NULL,
727 FALSE /* no bug check on failure */,
728 NormalPagePriority);
729#endif
730 if (pv)
731 {
732 NOREF(fProt);
733
734 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
735 pMemNtToMap->Core.cb);
736 if (pMemNt)
737 {
738 pMemNt->Core.u.Mapping.R0Process = R0Process;
739 *ppMem = &pMemNt->Core;
740 return VINF_SUCCESS;
741 }
742
743 rc = VERR_NO_MEMORY;
744 MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
745 }
746 }
747 __except(EXCEPTION_EXECUTE_HANDLER)
748 {
749 NTSTATUS rcNt = GetExceptionCode();
750 Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
751
752 /* nothing */
753 rc = VERR_MAP_FAILED;
754 }
755
756 }
757 else
758 {
759 AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
760 && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
761
762 /* cannot map phys mem to user space (yet). */
763 if (R0Process != NIL_RTR0PROCESS)
764 return VERR_NOT_SUPPORTED;
765
766 /** @todo uAlignment */
767 /** @todo How to set the protection on the pages? */
768 PHYSICAL_ADDRESS Phys;
769 Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
770 void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
771 if (pv)
772 {
773 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
774 pMemNtToMap->Core.cb);
775 if (pMemNt)
776 {
777 pMemNt->Core.u.Mapping.R0Process = R0Process;
778 *ppMem = &pMemNt->Core;
779 return VINF_SUCCESS;
780 }
781
782 rc = VERR_NO_MEMORY;
783 MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
784 }
785 }
786
787 NOREF(uAlignment); NOREF(fProt);
788 return rc;
789}
790
791
792int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
793 unsigned fProt, size_t offSub, size_t cbSub)
794{
795 AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
796 return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
797}
798
799
800int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
801{
802 AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
803 return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
804}
805
806
807int rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
808{
809 NOREF(pMem);
810 NOREF(offSub);
811 NOREF(cbSub);
812 NOREF(fProt);
813 return VERR_NOT_SUPPORTED;
814}
815
816
817RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
818{
819 PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
820
821 if (pMemNt->cMdls)
822 {
823 if (pMemNt->cMdls == 1)
824 {
825 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
826 return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
827 }
828
829 size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
830 size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
831 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
832 return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
833 }
834
835 switch (pMemNt->Core.enmType)
836 {
837 case RTR0MEMOBJTYPE_MAPPING:
838 return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
839
840 case RTR0MEMOBJTYPE_PHYS:
841 return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
842
843 case RTR0MEMOBJTYPE_PAGE:
844 case RTR0MEMOBJTYPE_PHYS_NC:
845 case RTR0MEMOBJTYPE_LOW:
846 case RTR0MEMOBJTYPE_CONT:
847 case RTR0MEMOBJTYPE_LOCK:
848 default:
849 AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
850 case RTR0MEMOBJTYPE_RES_VIRT:
851 return NIL_RTHCPHYS;
852 }
853}
854
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette