VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 15212

最後變更 在這個檔案從15212是 15212,由 vboxsync 提交於 16 年 前

SUPDrv: SUPR0AbsKernelCS/SS/DS/ES shouldn't be the LDT version on darwin!

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 179.3 KB
 
1/* $Revision: 15212 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/semaphore.h>
41#include <iprt/spinlock.h>
42#include <iprt/thread.h>
43#include <iprt/process.h>
44#include <iprt/mp.h>
45#include <iprt/power.h>
46#include <iprt/cpuset.h>
47#include <iprt/uuid.h>
48#include <VBox/param.h>
49#include <VBox/log.h>
50#include <VBox/err.h>
51#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
52# include <iprt/crc32.h>
53# include <iprt/net.h>
54#endif
55/* VBox/x86.h not compatible with the Linux kernel sources */
56#ifdef RT_OS_LINUX
57# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
58# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
59# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
60#else
61# include <VBox/x86.h>
62#endif
63
64/*
65 * Logging assignments:
66 * Log - useful stuff, like failures.
67 * LogFlow - program flow, except the really noisy bits.
68 * Log2 - Cleanup.
69 * Log3 - Loader flow noise.
70 * Log4 - Call VMMR0 flow noise.
71 * Log5 - Native yet-to-be-defined noise.
72 * Log6 - Native ioctl flow noise.
73 *
74 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
75 * instanciation in log-vbox.c(pp).
76 */
77
78
79/*******************************************************************************
80* Defined Constants And Macros *
81*******************************************************************************/
82/* from x86.h - clashes with linux thus this duplication */
83#undef X86_CR0_PG
84#define X86_CR0_PG RT_BIT(31)
85#undef X86_CR0_PE
86#define X86_CR0_PE RT_BIT(0)
87#undef X86_CPUID_AMD_FEATURE_EDX_NX
88#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
89#undef MSR_K6_EFER
90#define MSR_K6_EFER 0xc0000080
91#undef MSR_K6_EFER_NXE
92#define MSR_K6_EFER_NXE RT_BIT(11)
93#undef MSR_K6_EFER_LMA
94#define MSR_K6_EFER_LMA RT_BIT(10)
95#undef X86_CR4_PGE
96#define X86_CR4_PGE RT_BIT(7)
97#undef X86_CR4_PAE
98#define X86_CR4_PAE RT_BIT(5)
99#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
100#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
101
102
103/** The frequency by which we recalculate the u32UpdateHz and
104 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
105#define GIP_UPDATEHZ_RECALC_FREQ 0x800
106
107/**
108 * Validates a session pointer.
109 *
110 * @returns true/false accordingly.
111 * @param pSession The session.
112 */
113#define SUP_IS_SESSION_VALID(pSession) \
114 ( VALID_PTR(pSession) \
115 && pSession->u32Cookie == BIRD_INV)
116
117/** @def VBOX_SVN_REV
118 * The makefile should define this if it can. */
119#ifndef VBOX_SVN_REV
120# define VBOX_SVN_REV 0
121#endif
122
123/*******************************************************************************
124* Internal Functions *
125*******************************************************************************/
126static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
127static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
128static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
129static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
130static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
131static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
132static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
133static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
134static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
135static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
136static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
137static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
138static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
139#ifdef RT_OS_WINDOWS
140static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
141static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
142#endif /* RT_OS_WINDOWS */
143static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
144static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
145static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
146static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
147static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
148
149#ifdef RT_WITH_W64_UNWIND_HACK
150DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
151DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, unsigned idCpu, unsigned uOperation);
152DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
153DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
154DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
155DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
156DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
157
158DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
159DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
160DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
161DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
162DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
163DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
164DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
165DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
166DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
167DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
168DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
169DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
170DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
171DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
172DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
173DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
174DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
175DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
176//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
177DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
178DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
179DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
180DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
181DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
182DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
183DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
184DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
185DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
186DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
187DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
188DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
189DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
190DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
191DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
192DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
193DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
194/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
195/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
196/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
197/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
198/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
199DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
200/* RTProcSelf - not necessary */
201/* RTR0ProcHandleSelf - not necessary */
202DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
203DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
204DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
205DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
206DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
207DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
208DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
209DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
210DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
211DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
212DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
213DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
214DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
215DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
216DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
217DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
218DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
219DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
220DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
221DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
222DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
223/* RTTimeNanoTS - not necessary */
224/* RTTimeMilliTS - not necessary */
225/* RTTimeSystemNanoTS - not necessary */
226/* RTTimeSystemMilliTS - not necessary */
227/* RTThreadNativeSelf - not necessary */
228DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
229DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
230#if 0
231/* RTThreadSelf - not necessary */
232DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
233 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
234DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
235DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
236DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
237DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
238DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
239DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
240DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
241DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
242DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
243DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
244#endif
245/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
246/* RTMpCpuId - not necessary */
247/* RTMpCpuIdFromSetIndex - not necessary */
248/* RTMpCpuIdToSetIndex - not necessary */
249/* RTMpIsCpuPossible - not necessary */
250/* RTMpGetCount - not necessary */
251/* RTMpGetMaxCpuId - not necessary */
252/* RTMpGetOnlineCount - not necessary */
253/* RTMpGetOnlineSet - not necessary */
254/* RTMpGetSet - not necessary */
255/* RTMpIsCpuOnline - not necessary */
256DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
257DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
258DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
259/* RTLogRelDefaultInstance - not necessary. */
260DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
261/* RTLogLogger - can't wrap this buster. */
262/* RTLogLoggerEx - can't wrap this buster. */
263DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
264/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
265DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
266DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
267/* AssertMsg2 - can't wrap this buster. */
268#endif /* RT_WITH_W64_UNWIND_HACK */
269
270
271/*******************************************************************************
272* Global Variables *
273*******************************************************************************/
274/**
275 * Array of the R0 SUP API.
276 */
277static SUPFUNC g_aFunctions[] =
278{
279 /* name function */
280 /* Entries with absolute addresses determined at runtime, fixup
281 code makes ugly ASSUMPTIONS about the order here: */
282 { "SUPR0AbsIs64bit", (void *)0 },
283 { "SUPR0Abs64bitKernelCS", (void *)0 },
284 { "SUPR0Abs64bitKernelSS", (void *)0 },
285 { "SUPR0Abs64bitKernelDS", (void *)0 },
286 { "SUPR0AbsKernelCS", (void *)0 },
287 { "SUPR0AbsKernelSS", (void *)0 },
288 { "SUPR0AbsKernelDS", (void *)0 },
289 { "SUPR0AbsKernelES", (void *)0 },
290 { "SUPR0AbsKernelFS", (void *)0 },
291 { "SUPR0AbsKernelGS", (void *)0 },
292 /* Normal function pointers: */
293 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
294 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
295 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
296 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
297 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
298 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
299 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
300 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
301 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
302 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
303 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
304 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
305 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
306 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
307 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
308 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
309 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
310 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
311 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
312 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
313 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
314 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
315 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
316 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
317 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
318 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
319 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
320 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
321 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
322 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
323 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
324 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
325 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
326 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
327 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
328 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
329 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
330 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
331 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
332 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
333 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
334 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
335 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
336/* These don't work yet on linux - use fast mutexes!
337 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
338 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
339 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
340 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
341*/
342 { "RTProcSelf", (void *)RTProcSelf },
343 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
344 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
345 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
346 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
347 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
348 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
349 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
350 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
351 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
352 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
353 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
354 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
355 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
356 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
357 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
358 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
359 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
360 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
361 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
362 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
363 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
364 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
365 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
366 { "RTTimeMillieTS", (void *)RTTimeMilliTS },
367 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
368 { "RTTimeSystemMillieTS", (void *)RTTimeSystemMilliTS },
369 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
370 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
371 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
372#if 0 /* Thread APIs, Part 2. */
373 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
374 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
375 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
376 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
377 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
378 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
379 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
380 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
381 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
382 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
383 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
384 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
385#endif
386 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
387 { "RTMpCpuId", (void *)RTMpCpuId },
388 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
389 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
390 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
391 { "RTMpGetCount", (void *)RTMpGetCount },
392 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
393 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
394 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
395 { "RTMpGetSet", (void *)RTMpGetSet },
396 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
397 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
398 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
399 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
400 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
401 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
402 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
403 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
404 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
405 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
406 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
407 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
408 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
409 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
410 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
411#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
412 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
413#endif
414#if defined(RT_OS_DARWIN)
415 { "RTAssertMsg1", (void *)RTAssertMsg1 },
416 { "RTAssertMsg2", (void *)RTAssertMsg2 },
417 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
418#endif
419};
420
421#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
422/**
423 * Drag in the rest of IRPT since we share it with the
424 * rest of the kernel modules on darwin.
425 */
426PFNRT g_apfnVBoxDrvIPRTDeps[] =
427{
428 (PFNRT)RTCrc32,
429 (PFNRT)RTErrConvertFromErrno,
430 (PFNRT)RTNetIPv4IsHdrValid,
431 (PFNRT)RTNetIPv4TCPChecksum,
432 (PFNRT)RTNetIPv4UDPChecksum,
433 (PFNRT)RTUuidCompare,
434 (PFNRT)RTUuidCompareStr,
435 (PFNRT)RTUuidFromStr,
436 NULL
437};
438#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
439
440
441/**
442 * Initializes the device extentsion structure.
443 *
444 * @returns IPRT status code.
445 * @param pDevExt The device extension to initialize.
446 */
447int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
448{
449 int rc;
450
451#ifdef SUPDRV_WITH_RELEASE_LOGGER
452 /*
453 * Create the release log.
454 */
455 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
456 PRTLOGGER pRelLogger;
457 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
458 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
459 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
460 if (RT_SUCCESS(rc))
461 RTLogRelSetDefaultInstance(pRelLogger);
462#endif
463
464 /*
465 * Initialize it.
466 */
467 memset(pDevExt, 0, sizeof(*pDevExt));
468 rc = RTSpinlockCreate(&pDevExt->Spinlock);
469 if (!rc)
470 {
471 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
472 if (!rc)
473 {
474 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
475 if (!rc)
476 {
477 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
478 if (!rc)
479 {
480 rc = supdrvGipCreate(pDevExt);
481 if (RT_SUCCESS(rc))
482 {
483 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
484
485 /*
486 * Fixup the absolute symbols.
487 *
488 * Because of the table indexing assumptions we'll do #ifdef orgy here rather
489 * than distributing this to OS specific files. At least for now.
490 */
491#ifdef RT_OS_DARWIN
492 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
493 {
494 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
495 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
496 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
497 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
498 }
499 else
500 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
501 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
502 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
503 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
504 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
505#else
506# if ARCH_BITS == 64
507 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
508 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
509 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
510 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
511# elif ARCH_BITS == 32
512 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
513# endif
514 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
515 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
516 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
517 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
518#endif
519 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
520 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
521 return VINF_SUCCESS;
522 }
523
524 RTSemFastMutexDestroy(pDevExt->mtxGip);
525 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
526 }
527 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
528 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
529 }
530 RTSemFastMutexDestroy(pDevExt->mtxLdr);
531 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
532 }
533 RTSpinlockDestroy(pDevExt->Spinlock);
534 pDevExt->Spinlock = NIL_RTSPINLOCK;
535 }
536#ifdef SUPDRV_WITH_RELEASE_LOGGER
537 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
538 RTLogDestroy(RTLogSetDefaultInstance(NULL));
539#endif
540
541 return rc;
542}
543
544
545/**
546 * Delete the device extension (e.g. cleanup members).
547 *
548 * @param pDevExt The device extension to delete.
549 */
550void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
551{
552 PSUPDRVOBJ pObj;
553 PSUPDRVUSAGE pUsage;
554
555 /*
556 * Kill mutexes and spinlocks.
557 */
558 RTSemFastMutexDestroy(pDevExt->mtxGip);
559 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
560 RTSemFastMutexDestroy(pDevExt->mtxLdr);
561 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
562 RTSpinlockDestroy(pDevExt->Spinlock);
563 pDevExt->Spinlock = NIL_RTSPINLOCK;
564 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
565 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
566
567 /*
568 * Free lists.
569 */
570 /* objects. */
571 pObj = pDevExt->pObjs;
572#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
573 Assert(!pObj); /* (can trigger on forced unloads) */
574#endif
575 pDevExt->pObjs = NULL;
576 while (pObj)
577 {
578 void *pvFree = pObj;
579 pObj = pObj->pNext;
580 RTMemFree(pvFree);
581 }
582
583 /* usage records. */
584 pUsage = pDevExt->pUsageFree;
585 pDevExt->pUsageFree = NULL;
586 while (pUsage)
587 {
588 void *pvFree = pUsage;
589 pUsage = pUsage->pNext;
590 RTMemFree(pvFree);
591 }
592
593 /* kill the GIP. */
594 supdrvGipDestroy(pDevExt);
595
596#ifdef SUPDRV_WITH_RELEASE_LOGGER
597 /* destroy the loggers. */
598 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
599 RTLogDestroy(RTLogSetDefaultInstance(NULL));
600#endif
601}
602
603
604/**
605 * Create session.
606 *
607 * @returns IPRT status code.
608 * @param pDevExt Device extension.
609 * @param fUser Flag indicating whether this is a user or kernel session.
610 * @param ppSession Where to store the pointer to the session data.
611 */
612int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
613{
614 /*
615 * Allocate memory for the session data.
616 */
617 int rc = VERR_NO_MEMORY;
618 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
619 if (pSession)
620 {
621 /* Initialize session data. */
622 rc = RTSpinlockCreate(&pSession->Spinlock);
623 if (!rc)
624 {
625 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
626 pSession->pDevExt = pDevExt;
627 pSession->u32Cookie = BIRD_INV;
628 /*pSession->pLdrUsage = NULL;
629 pSession->pVM = NULL;
630 pSession->pUsage = NULL;
631 pSession->pGip = NULL;
632 pSession->fGipReferenced = false;
633 pSession->Bundle.cUsed = 0; */
634 pSession->Uid = NIL_RTUID;
635 pSession->Gid = NIL_RTGID;
636 if (fUser)
637 {
638 pSession->Process = RTProcSelf();
639 pSession->R0Process = RTR0ProcHandleSelf();
640 }
641 else
642 {
643 pSession->Process = NIL_RTPROCESS;
644 pSession->R0Process = NIL_RTR0PROCESS;
645 }
646
647 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
648 return VINF_SUCCESS;
649 }
650
651 RTMemFree(pSession);
652 *ppSession = NULL;
653 Log(("Failed to create spinlock, rc=%d!\n", rc));
654 }
655
656 return rc;
657}
658
659
660/**
661 * Shared code for cleaning up a session.
662 *
663 * @param pDevExt Device extension.
664 * @param pSession Session data.
665 * This data will be freed by this routine.
666 */
667void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
668{
669 /*
670 * Cleanup the session first.
671 */
672 supdrvCleanupSession(pDevExt, pSession);
673
674 /*
675 * Free the rest of the session stuff.
676 */
677 RTSpinlockDestroy(pSession->Spinlock);
678 pSession->Spinlock = NIL_RTSPINLOCK;
679 pSession->pDevExt = NULL;
680 RTMemFree(pSession);
681 LogFlow(("supdrvCloseSession: returns\n"));
682}
683
684
685/**
686 * Shared code for cleaning up a session (but not quite freeing it).
687 *
688 * This is primarily intended for MAC OS X where we have to clean up the memory
689 * stuff before the file handle is closed.
690 *
691 * @param pDevExt Device extension.
692 * @param pSession Session data.
693 * This data will be freed by this routine.
694 */
695void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
696{
697 PSUPDRVBUNDLE pBundle;
698 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
699
700 /*
701 * Remove logger instances related to this session.
702 */
703 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
704
705 /*
706 * Release object references made in this session.
707 * In theory there should be noone racing us in this session.
708 */
709 Log2(("release objects - start\n"));
710 if (pSession->pUsage)
711 {
712 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
713 PSUPDRVUSAGE pUsage;
714 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
715
716 while ((pUsage = pSession->pUsage) != NULL)
717 {
718 PSUPDRVOBJ pObj = pUsage->pObj;
719 pSession->pUsage = pUsage->pNext;
720
721 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
722 if (pUsage->cUsage < pObj->cUsage)
723 {
724 pObj->cUsage -= pUsage->cUsage;
725 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
726 }
727 else
728 {
729 /* Destroy the object and free the record. */
730 if (pDevExt->pObjs == pObj)
731 pDevExt->pObjs = pObj->pNext;
732 else
733 {
734 PSUPDRVOBJ pObjPrev;
735 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
736 if (pObjPrev->pNext == pObj)
737 {
738 pObjPrev->pNext = pObj->pNext;
739 break;
740 }
741 Assert(pObjPrev);
742 }
743 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
744
745 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
746 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
747 if (pObj->pfnDestructor)
748#ifdef RT_WITH_W64_UNWIND_HACK
749 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
750#else
751 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
752#endif
753 RTMemFree(pObj);
754 }
755
756 /* free it and continue. */
757 RTMemFree(pUsage);
758
759 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
760 }
761
762 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
763 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
764 }
765 Log2(("release objects - done\n"));
766
767 /*
768 * Release memory allocated in the session.
769 *
770 * We do not serialize this as we assume that the application will
771 * not allocated memory while closing the file handle object.
772 */
773 Log2(("freeing memory:\n"));
774 pBundle = &pSession->Bundle;
775 while (pBundle)
776 {
777 PSUPDRVBUNDLE pToFree;
778 unsigned i;
779
780 /*
781 * Check and unlock all entries in the bundle.
782 */
783 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
784 {
785 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
786 {
787 int rc;
788 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
789 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
790 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
791 {
792 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
793 AssertRC(rc); /** @todo figure out how to handle this. */
794 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
795 }
796 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
797 AssertRC(rc); /** @todo figure out how to handle this. */
798 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
799 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
800 }
801 }
802
803 /*
804 * Advance and free previous bundle.
805 */
806 pToFree = pBundle;
807 pBundle = pBundle->pNext;
808
809 pToFree->pNext = NULL;
810 pToFree->cUsed = 0;
811 if (pToFree != &pSession->Bundle)
812 RTMemFree(pToFree);
813 }
814 Log2(("freeing memory - done\n"));
815
816 /*
817 * Deregister component factories.
818 */
819 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
820 Log2(("deregistering component factories:\n"));
821 if (pDevExt->pComponentFactoryHead)
822 {
823 PSUPDRVFACTORYREG pPrev = NULL;
824 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
825 while (pCur)
826 {
827 if (pCur->pSession == pSession)
828 {
829 /* unlink it */
830 PSUPDRVFACTORYREG pNext = pCur->pNext;
831 if (pPrev)
832 pPrev->pNext = pNext;
833 else
834 pDevExt->pComponentFactoryHead = pNext;
835
836 /* free it */
837 pCur->pNext = NULL;
838 pCur->pSession = NULL;
839 pCur->pFactory = NULL;
840 RTMemFree(pCur);
841
842 /* next */
843 pCur = pNext;
844 }
845 else
846 {
847 /* next */
848 pPrev = pCur;
849 pCur = pCur->pNext;
850 }
851 }
852 }
853 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
854 Log2(("deregistering component factories - done\n"));
855
856 /*
857 * Loaded images needs to be dereferenced and possibly freed up.
858 */
859 RTSemFastMutexRequest(pDevExt->mtxLdr);
860 Log2(("freeing images:\n"));
861 if (pSession->pLdrUsage)
862 {
863 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
864 pSession->pLdrUsage = NULL;
865 while (pUsage)
866 {
867 void *pvFree = pUsage;
868 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
869 if (pImage->cUsage > pUsage->cUsage)
870 pImage->cUsage -= pUsage->cUsage;
871 else
872 supdrvLdrFree(pDevExt, pImage);
873 pUsage->pImage = NULL;
874 pUsage = pUsage->pNext;
875 RTMemFree(pvFree);
876 }
877 }
878 RTSemFastMutexRelease(pDevExt->mtxLdr);
879 Log2(("freeing images - done\n"));
880
881 /*
882 * Unmap the GIP.
883 */
884 Log2(("umapping GIP:\n"));
885 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
886 {
887 SUPR0GipUnmap(pSession);
888 pSession->fGipReferenced = 0;
889 }
890 Log2(("umapping GIP - done\n"));
891}
892
893
894/**
895 * Fast path I/O Control worker.
896 *
897 * @returns VBox status code that should be passed down to ring-3 unchanged.
898 * @param uIOCtl Function number.
899 * @param idCpu VMCPU id.
900 * @param pDevExt Device extention.
901 * @param pSession Session data.
902 */
903int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsigned idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
904{
905 /*
906 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
907 */
908 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
909 {
910 switch (uIOCtl)
911 {
912 case SUP_IOCTL_FAST_DO_RAW_RUN:
913#ifdef RT_WITH_W64_UNWIND_HACK
914 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
915#else
916 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
917#endif
918 break;
919 case SUP_IOCTL_FAST_DO_HWACC_RUN:
920#ifdef RT_WITH_W64_UNWIND_HACK
921 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
922#else
923 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
924#endif
925 break;
926 case SUP_IOCTL_FAST_DO_NOP:
927#ifdef RT_WITH_W64_UNWIND_HACK
928 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
929#else
930 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
931#endif
932 break;
933 default:
934 return VERR_INTERNAL_ERROR;
935 }
936 return VINF_SUCCESS;
937 }
938 return VERR_INTERNAL_ERROR;
939}
940
941
942/**
943 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
944 * We would use strpbrk here if this function would be contained in the RedHat kABI white
945 * list, see http://www.kerneldrivers.org/RHEL5.
946 *
947 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
948 * @param pszStr String to check
949 * @param pszChars Character set
950 */
951static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
952{
953 int chCur;
954 while ((chCur = *pszStr++) != '\0')
955 {
956 int ch;
957 const char *psz = pszChars;
958 while ((ch = *psz++) != '\0')
959 if (ch == chCur)
960 return 1;
961
962 }
963 return 0;
964}
965
966
967/**
968 * I/O Control worker.
969 *
970 * @returns 0 on success.
971 * @returns VERR_INVALID_PARAMETER if the request is invalid.
972 *
973 * @param uIOCtl Function number.
974 * @param pDevExt Device extention.
975 * @param pSession Session data.
976 * @param pReqHdr The request header.
977 */
978int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
979{
980 /*
981 * Validate the request.
982 */
983 /* this first check could probably be omitted as its also done by the OS specific code... */
984 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
985 || pReqHdr->cbIn < sizeof(*pReqHdr)
986 || pReqHdr->cbOut < sizeof(*pReqHdr)))
987 {
988 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
989 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
990 return VERR_INVALID_PARAMETER;
991 }
992 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
993 {
994 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
995 {
996 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
997 return VERR_INVALID_PARAMETER;
998 }
999 }
1000 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1001 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1002 {
1003 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1004 return VERR_INVALID_PARAMETER;
1005 }
1006
1007/*
1008 * Validation macros
1009 */
1010#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1011 do { \
1012 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1013 { \
1014 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1015 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1016 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1017 } \
1018 } while (0)
1019
1020#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1021
1022#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1023 do { \
1024 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1025 { \
1026 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1027 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1028 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1029 } \
1030 } while (0)
1031
1032#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1033 do { \
1034 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1035 { \
1036 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1037 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1038 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1039 } \
1040 } while (0)
1041
1042#define REQ_CHECK_EXPR(Name, expr) \
1043 do { \
1044 if (RT_UNLIKELY(!(expr))) \
1045 { \
1046 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1047 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1048 } \
1049 } while (0)
1050
1051#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1052 do { \
1053 if (RT_UNLIKELY(!(expr))) \
1054 { \
1055 OSDBGPRINT( fmt ); \
1056 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1057 } \
1058 } while (0)
1059
1060
1061 /*
1062 * The switch.
1063 */
1064 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1065 {
1066 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1067 {
1068 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1069 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1070 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1071 {
1072 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1073 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1074 return 0;
1075 }
1076
1077#if 0
1078 /*
1079 * Call out to the OS specific code and let it do permission checks on the
1080 * client process.
1081 */
1082 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1083 {
1084 pReq->u.Out.u32Cookie = 0xffffffff;
1085 pReq->u.Out.u32SessionCookie = 0xffffffff;
1086 pReq->u.Out.u32SessionVersion = 0xffffffff;
1087 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1088 pReq->u.Out.pSession = NULL;
1089 pReq->u.Out.cFunctions = 0;
1090 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1091 return 0;
1092 }
1093#endif
1094
1095 /*
1096 * Match the version.
1097 * The current logic is very simple, match the major interface version.
1098 */
1099 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1100 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1101 {
1102 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1103 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1104 pReq->u.Out.u32Cookie = 0xffffffff;
1105 pReq->u.Out.u32SessionCookie = 0xffffffff;
1106 pReq->u.Out.u32SessionVersion = 0xffffffff;
1107 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1108 pReq->u.Out.pSession = NULL;
1109 pReq->u.Out.cFunctions = 0;
1110 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1111 return 0;
1112 }
1113
1114 /*
1115 * Fill in return data and be gone.
1116 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1117 * u32SessionVersion <= u32ReqVersion!
1118 */
1119 /** @todo Somehow validate the client and negotiate a secure cookie... */
1120 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1121 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1122 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1123 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1124 pReq->u.Out.pSession = pSession;
1125 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1126 pReq->Hdr.rc = VINF_SUCCESS;
1127 return 0;
1128 }
1129
1130 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1131 {
1132 /* validate */
1133 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1134 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1135
1136 /* execute */
1137 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1138 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1139 pReq->Hdr.rc = VINF_SUCCESS;
1140 return 0;
1141 }
1142
1143 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1144 {
1145 /* validate */
1146 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1147 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1148
1149 /* execute */
1150 pReq->u.Out.u8Idt = 3;
1151 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1152 return 0;
1153 }
1154
1155 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1156 {
1157 /* validate */
1158 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1159 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1160
1161 /* execute */
1162 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1163 return 0;
1164 }
1165
1166 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1167 {
1168 /* validate */
1169 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1170 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1171 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1172 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1173 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1174
1175 /* execute */
1176 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1177 if (RT_FAILURE(pReq->Hdr.rc))
1178 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1179 return 0;
1180 }
1181
1182 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1183 {
1184 /* validate */
1185 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1186 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1187
1188 /* execute */
1189 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1190 return 0;
1191 }
1192
1193 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1194 {
1195 /* validate */
1196 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1197 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1198
1199 /* execute */
1200 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1201 if (RT_FAILURE(pReq->Hdr.rc))
1202 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1203 return 0;
1204 }
1205
1206 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1207 {
1208 /* validate */
1209 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1210 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1211
1212 /* execute */
1213 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1214 return 0;
1215 }
1216
1217 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1218 {
1219 /* validate */
1220 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1221 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1222 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1223 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1224 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1225 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1226 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1227
1228 /* execute */
1229 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1230 return 0;
1231 }
1232
1233 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1234 {
1235 /* validate */
1236 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1237 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1238 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1239 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1240 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1241 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1242 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1243 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1244 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1245 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1246 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1247 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1248 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1249 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1250 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1251
1252 if (pReq->u.In.cSymbols)
1253 {
1254 uint32_t i;
1255 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1256 for (i = 0; i < pReq->u.In.cSymbols; i++)
1257 {
1258 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1259 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1260 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1261 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1262 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1263 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1264 }
1265 }
1266
1267 /* execute */
1268 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1269 return 0;
1270 }
1271
1272 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1273 {
1274 /* validate */
1275 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1276 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1277
1278 /* execute */
1279 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1280 return 0;
1281 }
1282
1283 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1284 {
1285 /* validate */
1286 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1287 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1288 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1289
1290 /* execute */
1291 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1292 return 0;
1293 }
1294
1295 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1296 {
1297 /* validate */
1298 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1299 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1300 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1301
1302 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1303 {
1304 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1305
1306 /* execute */
1307 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1308#ifdef RT_WITH_W64_UNWIND_HACK
1309 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1310#else
1311 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1312#endif
1313 else
1314 pReq->Hdr.rc = VERR_WRONG_ORDER;
1315 }
1316 else
1317 {
1318 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1319 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1320 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1321 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1322 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1323
1324 /* execute */
1325 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1326#ifdef RT_WITH_W64_UNWIND_HACK
1327 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1328#else
1329 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1330#endif
1331 else
1332 pReq->Hdr.rc = VERR_WRONG_ORDER;
1333 }
1334
1335 if ( RT_FAILURE(pReq->Hdr.rc)
1336 && pReq->Hdr.rc != VERR_INTERRUPTED
1337 && pReq->Hdr.rc != VERR_TIMEOUT)
1338 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1339 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1340 else
1341 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1342 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1343 return 0;
1344 }
1345
1346 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1347 {
1348 /* validate */
1349 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1350 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1351
1352 /* execute */
1353 pReq->Hdr.rc = VINF_SUCCESS;
1354 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1355 return 0;
1356 }
1357
1358 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1359 {
1360 /* validate */
1361 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1362 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1363 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1364
1365 /* execute */
1366 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1367 if (RT_FAILURE(pReq->Hdr.rc))
1368 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1369 return 0;
1370 }
1371
1372 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1373 {
1374 /* validate */
1375 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1376 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1377
1378 /* execute */
1379 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1380 return 0;
1381 }
1382
1383 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1384 {
1385 /* validate */
1386 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1387 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1388
1389 /* execute */
1390 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1391 if (RT_SUCCESS(pReq->Hdr.rc))
1392 pReq->u.Out.pGipR0 = pDevExt->pGip;
1393 return 0;
1394 }
1395
1396 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1397 {
1398 /* validate */
1399 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1400 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1401
1402 /* execute */
1403 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1404 return 0;
1405 }
1406
1407 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1408 {
1409 /* validate */
1410 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1411 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1412 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1413 || ( VALID_PTR(pReq->u.In.pVMR0)
1414 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1415 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1416 /* execute */
1417 pSession->pVM = pReq->u.In.pVMR0;
1418 pReq->Hdr.rc = VINF_SUCCESS;
1419 return 0;
1420 }
1421
1422 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1423 {
1424 /* validate */
1425 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1426 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1427 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1428
1429 /* execute */
1430 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1431 if (RT_FAILURE(pReq->Hdr.rc))
1432 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1433 return 0;
1434 }
1435
1436 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1437 {
1438 /* validate */
1439 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1440 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1441 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1442 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1443 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1444 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1445 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1446 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1447 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1448
1449 /* execute */
1450 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1451 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1452 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1453 &pReq->u.Out.aPages[0]);
1454 if (RT_FAILURE(pReq->Hdr.rc))
1455 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1456 return 0;
1457 }
1458
1459 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1460 {
1461 /* validate */
1462 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1463 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1464 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1465 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1466 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1467 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1468
1469 /* execute */
1470 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1471 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1472 if (RT_FAILURE(pReq->Hdr.rc))
1473 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1474 return 0;
1475 }
1476
1477 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1478 {
1479 /* validate */
1480 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1481 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1482
1483 /* execute */
1484 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1485 return 0;
1486 }
1487
1488 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1489 {
1490 /* validate */
1491 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1492 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1493 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1494
1495 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1496 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1497 else
1498 {
1499 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1500 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1501 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1502 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1503 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1504 }
1505 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1506
1507 /* execute */
1508 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1509 return 0;
1510 }
1511
1512 default:
1513 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1514 break;
1515 }
1516 return SUPDRV_ERR_GENERAL_FAILURE;
1517}
1518
1519
1520/**
1521 * Inter-Driver Communcation (IDC) worker.
1522 *
1523 * @returns VBox status code.
1524 * @retval VINF_SUCCESS on success.
1525 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1526 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1527 *
1528 * @param uReq The request (function) code.
1529 * @param pDevExt Device extention.
1530 * @param pSession Session data.
1531 * @param pReqHdr The request header.
1532 */
1533int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1534{
1535 /*
1536 * The OS specific code has already validated the pSession
1537 * pointer, and the request size being greater or equal to
1538 * size of the header.
1539 *
1540 * So, just check that pSession is a kernel context session.
1541 */
1542 if (RT_UNLIKELY( pSession
1543 && pSession->R0Process != NIL_RTR0PROCESS))
1544 return VERR_INVALID_PARAMETER;
1545
1546/*
1547 * Validation macro.
1548 */
1549#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1550 do { \
1551 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1552 { \
1553 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1554 (long)pReqHdr->cb, (long)(cbExpect))); \
1555 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1556 } \
1557 } while (0)
1558
1559 switch (uReq)
1560 {
1561 case SUPDRV_IDC_REQ_CONNECT:
1562 {
1563 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1564 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1565
1566 /*
1567 * Validate the cookie and other input.
1568 */
1569 if (pReq->Hdr.pSession != NULL)
1570 {
1571 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1572 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1573 }
1574 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1575 {
1576 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1577 pReq->u.In.u32MagicCookie, SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1578 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1579 }
1580 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1581 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1582 {
1583 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1584 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1585 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1586 }
1587
1588 /*
1589 * Match the version.
1590 * The current logic is very simple, match the major interface version.
1591 */
1592 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1593 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1594 {
1595 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1596 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, SUPDRV_IDC_VERSION));
1597 pReq->u.Out.pSession = NULL;
1598 pReq->u.Out.uSessionVersion = 0xffffffff;
1599 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1600 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1601 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1602 return VINF_SUCCESS;
1603 }
1604
1605 pReq->u.Out.pSession = NULL;
1606 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1607 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1608 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1609
1610 /*
1611 * On NT we will already have a session associated with the
1612 * client, just like with the SUP_IOCTL_COOKIE request, while
1613 * the other doesn't.
1614 */
1615#ifdef RT_OS_WINDOWS
1616 pReq->Hdr.rc = VINF_SUCCESS;
1617#else
1618 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1619 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1620 if (RT_FAILURE(pReq->Hdr.rc))
1621 {
1622 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1623 return VINF_SUCCESS;
1624 }
1625#endif
1626
1627 pReq->u.Out.pSession = pSession;
1628 pReq->Hdr.pSession = pSession;
1629
1630 return VINF_SUCCESS;
1631 }
1632
1633 case SUPDRV_IDC_REQ_DISCONNECT:
1634 {
1635 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1636
1637#ifdef RT_OS_WINDOWS
1638 /* Windows will destroy the session when the file object is destroyed. */
1639#else
1640 supdrvCloseSession(pDevExt, pSession);
1641#endif
1642 return pReqHdr->rc = VINF_SUCCESS;
1643 }
1644
1645 case SUPDRV_IDC_REQ_GET_SYMBOL:
1646 {
1647 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1648 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1649
1650 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1651 return VINF_SUCCESS;
1652 }
1653
1654 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1655 {
1656 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1657 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1658
1659 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1660 return VINF_SUCCESS;
1661 }
1662
1663 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1664 {
1665 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1666 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1667
1668 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1669 return VINF_SUCCESS;
1670 }
1671
1672 default:
1673 Log(("Unknown IDC %#lx\n", (long)uReq));
1674 break;
1675 }
1676
1677#undef REQ_CHECK_IDC_SIZE
1678 return VERR_NOT_SUPPORTED;
1679}
1680
1681
1682/**
1683 * Register a object for reference counting.
1684 * The object is registered with one reference in the specified session.
1685 *
1686 * @returns Unique identifier on success (pointer).
1687 * All future reference must use this identifier.
1688 * @returns NULL on failure.
1689 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1690 * @param pvUser1 The first user argument.
1691 * @param pvUser2 The second user argument.
1692 */
1693SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1694{
1695 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1696 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1697 PSUPDRVOBJ pObj;
1698 PSUPDRVUSAGE pUsage;
1699
1700 /*
1701 * Validate the input.
1702 */
1703 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1704 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1705 AssertPtrReturn(pfnDestructor, NULL);
1706
1707 /*
1708 * Allocate and initialize the object.
1709 */
1710 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1711 if (!pObj)
1712 return NULL;
1713 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1714 pObj->enmType = enmType;
1715 pObj->pNext = NULL;
1716 pObj->cUsage = 1;
1717 pObj->pfnDestructor = pfnDestructor;
1718 pObj->pvUser1 = pvUser1;
1719 pObj->pvUser2 = pvUser2;
1720 pObj->CreatorUid = pSession->Uid;
1721 pObj->CreatorGid = pSession->Gid;
1722 pObj->CreatorProcess= pSession->Process;
1723 supdrvOSObjInitCreator(pObj, pSession);
1724
1725 /*
1726 * Allocate the usage record.
1727 * (We keep freed usage records around to simplify SUPR0ObjAddRef().)
1728 */
1729 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1730
1731 pUsage = pDevExt->pUsageFree;
1732 if (pUsage)
1733 pDevExt->pUsageFree = pUsage->pNext;
1734 else
1735 {
1736 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1737 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1738 if (!pUsage)
1739 {
1740 RTMemFree(pObj);
1741 return NULL;
1742 }
1743 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1744 }
1745
1746 /*
1747 * Insert the object and create the session usage record.
1748 */
1749 /* The object. */
1750 pObj->pNext = pDevExt->pObjs;
1751 pDevExt->pObjs = pObj;
1752
1753 /* The session record. */
1754 pUsage->cUsage = 1;
1755 pUsage->pObj = pObj;
1756 pUsage->pNext = pSession->pUsage;
1757 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1758 pSession->pUsage = pUsage;
1759
1760 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1761
1762 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1763 return pObj;
1764}
1765
1766
1767/**
1768 * Increment the reference counter for the object associating the reference
1769 * with the specified session.
1770 *
1771 * @returns IPRT status code.
1772 * @param pvObj The identifier returned by SUPR0ObjRegister().
1773 * @param pSession The session which is referencing the object.
1774 *
1775 * @remarks The caller should not own any spinlocks and must carefully protect
1776 * itself against potential race with the destructor so freed memory
1777 * isn't accessed here.
1778 */
1779SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1780{
1781 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1782 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1783 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1784 PSUPDRVUSAGE pUsagePre;
1785 PSUPDRVUSAGE pUsage;
1786
1787 /*
1788 * Validate the input.
1789 * Be ready for the destruction race (someone might be stuck in the
1790 * destructor waiting a lock we own).
1791 */
1792 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1793 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1794 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC + 1,
1795 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC + 1),
1796 VERR_INVALID_PARAMETER);
1797
1798 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1799
1800 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1801 {
1802 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1803
1804 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1805 return VERR_WRONG_ORDER;
1806 }
1807
1808 /*
1809 * Preallocate the usage record.
1810 */
1811 pUsagePre = pDevExt->pUsageFree;
1812 if (pUsagePre)
1813 pDevExt->pUsageFree = pUsagePre->pNext;
1814 else
1815 {
1816 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1817 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1818 if (!pUsagePre)
1819 return VERR_NO_MEMORY;
1820
1821 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1822 }
1823
1824 /*
1825 * Reference the object.
1826 */
1827 pObj->cUsage++;
1828
1829 /*
1830 * Look for the session record.
1831 */
1832 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1833 {
1834 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1835 if (pUsage->pObj == pObj)
1836 break;
1837 }
1838 if (pUsage)
1839 pUsage->cUsage++;
1840 else
1841 {
1842 /* create a new session record. */
1843 pUsagePre->cUsage = 1;
1844 pUsagePre->pObj = pObj;
1845 pUsagePre->pNext = pSession->pUsage;
1846 pSession->pUsage = pUsagePre;
1847 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1848
1849 pUsagePre = NULL;
1850 }
1851
1852 /*
1853 * Put any unused usage record into the free list..
1854 */
1855 if (pUsagePre)
1856 {
1857 pUsagePre->pNext = pDevExt->pUsageFree;
1858 pDevExt->pUsageFree = pUsagePre;
1859 }
1860
1861 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1862
1863 return VINF_SUCCESS;
1864}
1865
1866
1867/**
1868 * Decrement / destroy a reference counter record for an object.
1869 *
1870 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1871 *
1872 * @returns IPRT status code.
1873 * @param pvObj The identifier returned by SUPR0ObjRegister().
1874 * @param pSession The session which is referencing the object.
1875 */
1876SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1877{
1878 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1879 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1880 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1881 bool fDestroy = false;
1882 PSUPDRVUSAGE pUsage;
1883 PSUPDRVUSAGE pUsagePrev;
1884
1885 /*
1886 * Validate the input.
1887 */
1888 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1889 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1890 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1891 VERR_INVALID_PARAMETER);
1892
1893 /*
1894 * Acquire the spinlock and look for the usage record.
1895 */
1896 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1897
1898 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1899 pUsage;
1900 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1901 {
1902 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1903 if (pUsage->pObj == pObj)
1904 {
1905 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1906 if (pUsage->cUsage > 1)
1907 {
1908 pObj->cUsage--;
1909 pUsage->cUsage--;
1910 }
1911 else
1912 {
1913 /*
1914 * Free the session record.
1915 */
1916 if (pUsagePrev)
1917 pUsagePrev->pNext = pUsage->pNext;
1918 else
1919 pSession->pUsage = pUsage->pNext;
1920 pUsage->pNext = pDevExt->pUsageFree;
1921 pDevExt->pUsageFree = pUsage;
1922
1923 /* What about the object? */
1924 if (pObj->cUsage > 1)
1925 pObj->cUsage--;
1926 else
1927 {
1928 /*
1929 * Object is to be destroyed, unlink it.
1930 */
1931 pObj->u32Magic = SUPDRVOBJ_MAGIC + 1;
1932 fDestroy = true;
1933 if (pDevExt->pObjs == pObj)
1934 pDevExt->pObjs = pObj->pNext;
1935 else
1936 {
1937 PSUPDRVOBJ pObjPrev;
1938 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1939 if (pObjPrev->pNext == pObj)
1940 {
1941 pObjPrev->pNext = pObj->pNext;
1942 break;
1943 }
1944 Assert(pObjPrev);
1945 }
1946 }
1947 }
1948 break;
1949 }
1950 }
1951
1952 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1953
1954 /*
1955 * Call the destructor and free the object if required.
1956 */
1957 if (fDestroy)
1958 {
1959 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
1960 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
1961 if (pObj->pfnDestructor)
1962#ifdef RT_WITH_W64_UNWIND_HACK
1963 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
1964#else
1965 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
1966#endif
1967 RTMemFree(pObj);
1968 }
1969
1970 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
1971 return pUsage ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
1972}
1973
1974/**
1975 * Verifies that the current process can access the specified object.
1976 *
1977 * @returns The following IPRT status code:
1978 * @retval VINF_SUCCESS if access was granted.
1979 * @retval VERR_PERMISSION_DENIED if denied access.
1980 * @retval VERR_INVALID_PARAMETER if invalid parameter.
1981 *
1982 * @param pvObj The identifier returned by SUPR0ObjRegister().
1983 * @param pSession The session which wishes to access the object.
1984 * @param pszObjName Object string name. This is optional and depends on the object type.
1985 *
1986 * @remark The caller is responsible for making sure the object isn't removed while
1987 * we're inside this function. If uncertain about this, just call AddRef before calling us.
1988 */
1989SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
1990{
1991 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1992 int rc;
1993
1994 /*
1995 * Validate the input.
1996 */
1997 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1998 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1999 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2000 VERR_INVALID_PARAMETER);
2001
2002 /*
2003 * Check access. (returns true if a decision has been made.)
2004 */
2005 rc = VERR_INTERNAL_ERROR;
2006 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2007 return rc;
2008
2009 /*
2010 * Default policy is to allow the user to access his own
2011 * stuff but nothing else.
2012 */
2013 if (pObj->CreatorUid == pSession->Uid)
2014 return VINF_SUCCESS;
2015 return VERR_PERMISSION_DENIED;
2016}
2017
2018
2019/**
2020 * Lock pages.
2021 *
2022 * @returns IPRT status code.
2023 * @param pSession Session to which the locked memory should be associated.
2024 * @param pvR3 Start of the memory range to lock.
2025 * This must be page aligned.
2026 * @param cb Size of the memory range to lock.
2027 * This must be page aligned.
2028 */
2029SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2030{
2031 int rc;
2032 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2033 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2034 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2035
2036 /*
2037 * Verify input.
2038 */
2039 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2040 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2041 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2042 || !pvR3)
2043 {
2044 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2045 return VERR_INVALID_PARAMETER;
2046 }
2047
2048#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2049 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2050 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2051 if (RT_SUCCESS(rc))
2052 return rc;
2053#endif
2054
2055 /*
2056 * Let IPRT do the job.
2057 */
2058 Mem.eType = MEMREF_TYPE_LOCKED;
2059 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2060 if (RT_SUCCESS(rc))
2061 {
2062 uint32_t iPage = cPages;
2063 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2064 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2065
2066 while (iPage-- > 0)
2067 {
2068 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2069 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2070 {
2071 AssertMsgFailed(("iPage=%d\n", iPage));
2072 rc = VERR_INTERNAL_ERROR;
2073 break;
2074 }
2075 }
2076 if (RT_SUCCESS(rc))
2077 rc = supdrvMemAdd(&Mem, pSession);
2078 if (RT_FAILURE(rc))
2079 {
2080 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2081 AssertRC(rc2);
2082 }
2083 }
2084
2085 return rc;
2086}
2087
2088
2089/**
2090 * Unlocks the memory pointed to by pv.
2091 *
2092 * @returns IPRT status code.
2093 * @param pSession Session to which the memory was locked.
2094 * @param pvR3 Memory to unlock.
2095 */
2096SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2097{
2098 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2099 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2100#ifdef RT_OS_WINDOWS
2101 /*
2102 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2103 * allocations; ignore this call.
2104 */
2105 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2106 {
2107 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2108 return VINF_SUCCESS;
2109 }
2110#endif
2111 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2112}
2113
2114
2115/**
2116 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2117 * backing.
2118 *
2119 * @returns IPRT status code.
2120 * @param pSession Session data.
2121 * @param cb Number of bytes to allocate.
2122 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2123 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2124 * @param pHCPhys Where to put the physical address of allocated memory.
2125 */
2126SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2127{
2128 int rc;
2129 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2130 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2131
2132 /*
2133 * Validate input.
2134 */
2135 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2136 if (!ppvR3 || !ppvR0 || !pHCPhys)
2137 {
2138 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2139 pSession, ppvR0, ppvR3, pHCPhys));
2140 return VERR_INVALID_PARAMETER;
2141
2142 }
2143 if (cPages < 1 || cPages >= 256)
2144 {
2145 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2146 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2147 }
2148
2149 /*
2150 * Let IPRT do the job.
2151 */
2152 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2153 if (RT_SUCCESS(rc))
2154 {
2155 int rc2;
2156 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2157 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2158 if (RT_SUCCESS(rc))
2159 {
2160 Mem.eType = MEMREF_TYPE_CONT;
2161 rc = supdrvMemAdd(&Mem, pSession);
2162 if (!rc)
2163 {
2164 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2165 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2166 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2167 return 0;
2168 }
2169
2170 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2171 AssertRC(rc2);
2172 }
2173 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2174 AssertRC(rc2);
2175 }
2176
2177 return rc;
2178}
2179
2180
2181/**
2182 * Frees memory allocated using SUPR0ContAlloc().
2183 *
2184 * @returns IPRT status code.
2185 * @param pSession The session to which the memory was allocated.
2186 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2187 */
2188SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2189{
2190 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2191 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2192 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2193}
2194
2195
2196/**
2197 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2198 *
2199 * The memory isn't zeroed.
2200 *
2201 * @returns IPRT status code.
2202 * @param pSession Session data.
2203 * @param cPages Number of pages to allocate.
2204 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2205 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2206 * @param paPages Where to put the physical addresses of allocated memory.
2207 */
2208SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2209{
2210 unsigned iPage;
2211 int rc;
2212 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2213 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2214
2215 /*
2216 * Validate input.
2217 */
2218 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2219 if (!ppvR3 || !ppvR0 || !paPages)
2220 {
2221 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2222 pSession, ppvR3, ppvR0, paPages));
2223 return VERR_INVALID_PARAMETER;
2224
2225 }
2226 if (cPages < 1 || cPages >= 256)
2227 {
2228 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2229 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2230 }
2231
2232 /*
2233 * Let IPRT do the work.
2234 */
2235 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2236 if (RT_SUCCESS(rc))
2237 {
2238 int rc2;
2239 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2240 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2241 if (RT_SUCCESS(rc))
2242 {
2243 Mem.eType = MEMREF_TYPE_LOW;
2244 rc = supdrvMemAdd(&Mem, pSession);
2245 if (!rc)
2246 {
2247 for (iPage = 0; iPage < cPages; iPage++)
2248 {
2249 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2250 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2251 }
2252 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2253 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2254 return 0;
2255 }
2256
2257 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2258 AssertRC(rc2);
2259 }
2260
2261 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2262 AssertRC(rc2);
2263 }
2264
2265 return rc;
2266}
2267
2268
2269/**
2270 * Frees memory allocated using SUPR0LowAlloc().
2271 *
2272 * @returns IPRT status code.
2273 * @param pSession The session to which the memory was allocated.
2274 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2275 */
2276SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2277{
2278 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2279 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2280 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2281}
2282
2283
2284
2285/**
2286 * Allocates a chunk of memory with both R0 and R3 mappings.
2287 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2288 *
2289 * @returns IPRT status code.
2290 * @param pSession The session to associated the allocation with.
2291 * @param cb Number of bytes to allocate.
2292 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2293 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2294 */
2295SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2296{
2297 int rc;
2298 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2299 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2300
2301 /*
2302 * Validate input.
2303 */
2304 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2305 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2306 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2307 if (cb < 1 || cb >= _4M)
2308 {
2309 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2310 return VERR_INVALID_PARAMETER;
2311 }
2312
2313 /*
2314 * Let IPRT do the work.
2315 */
2316 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2317 if (RT_SUCCESS(rc))
2318 {
2319 int rc2;
2320 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2321 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2322 if (RT_SUCCESS(rc))
2323 {
2324 Mem.eType = MEMREF_TYPE_MEM;
2325 rc = supdrvMemAdd(&Mem, pSession);
2326 if (!rc)
2327 {
2328 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2329 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2330 return VINF_SUCCESS;
2331 }
2332
2333 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2334 AssertRC(rc2);
2335 }
2336
2337 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2338 AssertRC(rc2);
2339 }
2340
2341 return rc;
2342}
2343
2344
2345/**
2346 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2347 *
2348 * @returns IPRT status code.
2349 * @param pSession The session to which the memory was allocated.
2350 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2351 * @param paPages Where to store the physical addresses.
2352 */
2353SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2354{
2355 PSUPDRVBUNDLE pBundle;
2356 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2357 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2358
2359 /*
2360 * Validate input.
2361 */
2362 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2363 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2364 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2365
2366 /*
2367 * Search for the address.
2368 */
2369 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2370 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2371 {
2372 if (pBundle->cUsed > 0)
2373 {
2374 unsigned i;
2375 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2376 {
2377 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2378 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2379 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2380 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2381 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2382 )
2383 )
2384 {
2385 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2386 size_t iPage;
2387 for (iPage = 0; iPage < cPages; iPage++)
2388 {
2389 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2390 paPages[iPage].uReserved = 0;
2391 }
2392 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2393 return VINF_SUCCESS;
2394 }
2395 }
2396 }
2397 }
2398 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2399 Log(("Failed to find %p!!!\n", (void *)uPtr));
2400 return VERR_INVALID_PARAMETER;
2401}
2402
2403
2404/**
2405 * Free memory allocated by SUPR0MemAlloc().
2406 *
2407 * @returns IPRT status code.
2408 * @param pSession The session owning the allocation.
2409 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2410 */
2411SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2412{
2413 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2414 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2415 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2416}
2417
2418
2419/**
2420 * Allocates a chunk of memory with only a R3 mappings.
2421 *
2422 * The memory is fixed and it's possible to query the physical addresses using
2423 * SUPR0MemGetPhys().
2424 *
2425 * @returns IPRT status code.
2426 * @param pSession The session to associated the allocation with.
2427 * @param cPages The number of pages to allocate.
2428 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2429 * @param paPages Where to store the addresses of the pages. Optional.
2430 */
2431SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2432{
2433 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2434 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2435}
2436
2437
2438/**
2439 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2440 *
2441 * The memory is fixed and it's possible to query the physical addresses using
2442 * SUPR0MemGetPhys().
2443 *
2444 * @returns IPRT status code.
2445 * @param pSession The session to associated the allocation with.
2446 * @param cPages The number of pages to allocate.
2447 * @param fFlags Flags, reserved for the future. Must be zero.
2448 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2449 * NULL if no ring-3 mapping.
2450 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2451 * NULL if no ring-0 mapping.
2452 * @param paPages Where to store the addresses of the pages. Optional.
2453 */
2454SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2455{
2456 int rc;
2457 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2458 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2459
2460 /*
2461 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2462 */
2463 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2464 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2465 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2466 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2467 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2468 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2469 {
2470 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2471 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2472 }
2473
2474 /*
2475 * Let IPRT do the work.
2476 */
2477 if (ppvR0)
2478 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2479 else
2480 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2481 if (RT_SUCCESS(rc))
2482 {
2483 int rc2;
2484 if (ppvR3)
2485 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2486 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2487 else
2488 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2489 if (RT_SUCCESS(rc))
2490 {
2491 Mem.eType = MEMREF_TYPE_PAGE;
2492 rc = supdrvMemAdd(&Mem, pSession);
2493 if (!rc)
2494 {
2495 if (ppvR3)
2496 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2497 if (ppvR0)
2498 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2499 if (paPages)
2500 {
2501 uint32_t iPage = cPages;
2502 while (iPage-- > 0)
2503 {
2504 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2505 Assert(paPages[iPage] != NIL_RTHCPHYS);
2506 }
2507 }
2508 return VINF_SUCCESS;
2509 }
2510
2511 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2512 AssertRC(rc2);
2513 }
2514
2515 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2516 AssertRC(rc2);
2517 }
2518 return rc;
2519}
2520
2521
2522/**
2523 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2524 *
2525 * The memory is fixed and it's possible to query the physical addresses using
2526 * SUPR0MemGetPhys().
2527 *
2528 * @returns IPRT status code.
2529 * @param pSession The session to associated the allocation with.
2530 * @param cPages The number of pages to allocate.
2531 * @param fFlags Flags, reserved for the future. Must be zero.
2532 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2533 * NULL if no ring-3 mapping.
2534 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2535 * NULL if no ring-0 mapping.
2536 * @param paPages Where to store the addresses of the pages. Optional.
2537 */
2538SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2539 uint32_t fFlags, PRTR0PTR ppvR0)
2540{
2541 int rc;
2542 PSUPDRVBUNDLE pBundle;
2543 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2544 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2545 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2546
2547 /*
2548 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2549 */
2550 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2551 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2552 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2553 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2554 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2555 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2556
2557 /*
2558 * Find the memory object.
2559 */
2560 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2561 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2562 {
2563 if (pBundle->cUsed > 0)
2564 {
2565 unsigned i;
2566 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2567 {
2568 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2569 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2570 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2571 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2572 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2573 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2574 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2575 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2576 {
2577 hMemObj = pBundle->aMem[i].MemObj;
2578 break;
2579 }
2580 }
2581 }
2582 }
2583 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2584
2585 rc = VERR_INVALID_PARAMETER;
2586 if (hMemObj != NIL_RTR0MEMOBJ)
2587 {
2588 /*
2589 * Do some furter input validations before calling IPRT.
2590 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2591 */
2592 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2593 if ( offSub < cbMemObj
2594 && cbSub <= cbMemObj
2595 && offSub + cbSub <= cbMemObj)
2596 {
2597 RTR0MEMOBJ hMapObj;
2598 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2599 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2600 if (RT_SUCCESS(rc))
2601 *ppvR0 = RTR0MemObjAddress(hMapObj);
2602 }
2603 else
2604 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2605
2606 }
2607 return rc;
2608}
2609
2610
2611
2612#ifdef RT_OS_WINDOWS
2613/**
2614 * Check if the pages were locked by SUPR0PageAlloc
2615 *
2616 * This function will be removed along with the lock/unlock hacks when
2617 * we've cleaned up the ring-3 code properly.
2618 *
2619 * @returns boolean
2620 * @param pSession The session to which the memory was allocated.
2621 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2622 */
2623static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2624{
2625 PSUPDRVBUNDLE pBundle;
2626 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2627 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2628
2629 /*
2630 * Search for the address.
2631 */
2632 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2633 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2634 {
2635 if (pBundle->cUsed > 0)
2636 {
2637 unsigned i;
2638 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2639 {
2640 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2641 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2642 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2643 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2644 {
2645 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2646 return true;
2647 }
2648 }
2649 }
2650 }
2651 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2652 return false;
2653}
2654
2655
2656/**
2657 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
2658 *
2659 * This function will be removed along with the lock/unlock hacks when
2660 * we've cleaned up the ring-3 code properly.
2661 *
2662 * @returns IPRT status code.
2663 * @param pSession The session to which the memory was allocated.
2664 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2665 * @param cPages Number of pages in paPages
2666 * @param paPages Where to store the physical addresses.
2667 */
2668static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2669{
2670 PSUPDRVBUNDLE pBundle;
2671 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2672 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2673
2674 /*
2675 * Search for the address.
2676 */
2677 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2678 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2679 {
2680 if (pBundle->cUsed > 0)
2681 {
2682 unsigned i;
2683 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2684 {
2685 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2686 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2687 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2688 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2689 {
2690 uint32_t iPage;
2691 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2692 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
2693 for (iPage = 0; iPage < cPages; iPage++)
2694 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2695 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2696 return VINF_SUCCESS;
2697 }
2698 }
2699 }
2700 }
2701 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2702 return VERR_INVALID_PARAMETER;
2703}
2704#endif /* RT_OS_WINDOWS */
2705
2706
2707/**
2708 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2709 *
2710 * @returns IPRT status code.
2711 * @param pSession The session owning the allocation.
2712 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2713 * SUPR0PageAllocEx().
2714 */
2715SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2716{
2717 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2718 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2719 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2720}
2721
2722
2723/**
2724 * Maps the GIP into userspace and/or get the physical address of the GIP.
2725 *
2726 * @returns IPRT status code.
2727 * @param pSession Session to which the GIP mapping should belong.
2728 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2729 * @param pHCPhysGip Where to store the physical address. (optional)
2730 *
2731 * @remark There is no reference counting on the mapping, so one call to this function
2732 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2733 * and remove the session as a GIP user.
2734 */
2735SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2736{
2737 int rc = 0;
2738 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2739 RTR3PTR pGip = NIL_RTR3PTR;
2740 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2741 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2742
2743 /*
2744 * Validate
2745 */
2746 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2747 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2748 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2749
2750 RTSemFastMutexRequest(pDevExt->mtxGip);
2751 if (pDevExt->pGip)
2752 {
2753 /*
2754 * Map it?
2755 */
2756 if (ppGipR3)
2757 {
2758 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2759 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2760 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2761 if (RT_SUCCESS(rc))
2762 {
2763 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2764 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2765 }
2766 }
2767
2768 /*
2769 * Get physical address.
2770 */
2771 if (pHCPhysGip && !rc)
2772 HCPhys = pDevExt->HCPhysGip;
2773
2774 /*
2775 * Reference globally.
2776 */
2777 if (!pSession->fGipReferenced && !rc)
2778 {
2779 pSession->fGipReferenced = 1;
2780 pDevExt->cGipUsers++;
2781 if (pDevExt->cGipUsers == 1)
2782 {
2783 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2784 unsigned i;
2785
2786 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2787
2788 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2789 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2790 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2791
2792 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2793 AssertRC(rc); rc = VINF_SUCCESS;
2794 }
2795 }
2796 }
2797 else
2798 {
2799 rc = SUPDRV_ERR_GENERAL_FAILURE;
2800 Log(("SUPR0GipMap: GIP is not available!\n"));
2801 }
2802 RTSemFastMutexRelease(pDevExt->mtxGip);
2803
2804 /*
2805 * Write returns.
2806 */
2807 if (pHCPhysGip)
2808 *pHCPhysGip = HCPhys;
2809 if (ppGipR3)
2810 *ppGipR3 = pGip;
2811
2812#ifdef DEBUG_DARWIN_GIP
2813 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGip=%p GipMapObjR3\n", rc, (unsigned long)HCPhys, pGip, pSession->GipMapObjR3));
2814#else
2815 LogFlow(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)(uintptr_t)pGip));
2816#endif
2817 return rc;
2818}
2819
2820
2821/**
2822 * Unmaps any user mapping of the GIP and terminates all GIP access
2823 * from this session.
2824 *
2825 * @returns IPRT status code.
2826 * @param pSession Session to which the GIP mapping should belong.
2827 */
2828SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2829{
2830 int rc = VINF_SUCCESS;
2831 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2832#ifdef DEBUG_DARWIN_GIP
2833 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2834 pSession,
2835 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2836 pSession->GipMapObjR3));
2837#else
2838 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2839#endif
2840 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2841
2842 RTSemFastMutexRequest(pDevExt->mtxGip);
2843
2844 /*
2845 * Unmap anything?
2846 */
2847 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2848 {
2849 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2850 AssertRC(rc);
2851 if (RT_SUCCESS(rc))
2852 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2853 }
2854
2855 /*
2856 * Dereference global GIP.
2857 */
2858 if (pSession->fGipReferenced && !rc)
2859 {
2860 pSession->fGipReferenced = 0;
2861 if ( pDevExt->cGipUsers > 0
2862 && !--pDevExt->cGipUsers)
2863 {
2864 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2865 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2866 }
2867 }
2868
2869 RTSemFastMutexRelease(pDevExt->mtxGip);
2870
2871 return rc;
2872}
2873
2874
2875/**
2876 * Register a component factory with the support driver.
2877 *
2878 * This is currently restricted to kernel sessions only.
2879 *
2880 * @returns VBox status code.
2881 * @retval VINF_SUCCESS on success.
2882 * @retval VERR_NO_MEMORY if we're out of memory.
2883 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2884 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2885 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2886 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2887 *
2888 * @param pSession The SUPDRV session (must be a ring-0 session).
2889 * @param pFactory Pointer to the component factory registration structure.
2890 *
2891 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2892 */
2893SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2894{
2895 PSUPDRVFACTORYREG pNewReg;
2896 const char *psz;
2897 int rc;
2898
2899 /*
2900 * Validate parameters.
2901 */
2902 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2903 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2904 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2905 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2906 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
2907 AssertReturn(psz, VERR_INVALID_PARAMETER);
2908
2909 /*
2910 * Allocate and initialize a new registration structure.
2911 */
2912 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
2913 if (pNewReg)
2914 {
2915 pNewReg->pNext = NULL;
2916 pNewReg->pFactory = pFactory;
2917 pNewReg->pSession = pSession;
2918 pNewReg->cchName = psz - &pFactory->szName[0];
2919
2920 /*
2921 * Add it to the tail of the list after checking for prior registration.
2922 */
2923 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
2924 if (RT_SUCCESS(rc))
2925 {
2926 PSUPDRVFACTORYREG pPrev = NULL;
2927 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
2928 while (pCur && pCur->pFactory != pFactory)
2929 {
2930 pPrev = pCur;
2931 pCur = pCur->pNext;
2932 }
2933 if (!pCur)
2934 {
2935 if (pPrev)
2936 pPrev->pNext = pNewReg;
2937 else
2938 pSession->pDevExt->pComponentFactoryHead = pNewReg;
2939 rc = VINF_SUCCESS;
2940 }
2941 else
2942 rc = VERR_ALREADY_EXISTS;
2943
2944 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
2945 }
2946
2947 if (RT_FAILURE(rc))
2948 RTMemFree(pNewReg);
2949 }
2950 else
2951 rc = VERR_NO_MEMORY;
2952 return rc;
2953}
2954
2955
2956/**
2957 * Deregister a component factory.
2958 *
2959 * @returns VBox status code.
2960 * @retval VINF_SUCCESS on success.
2961 * @retval VERR_NOT_FOUND if the factory wasn't registered.
2962 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2963 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2964 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2965 *
2966 * @param pSession The SUPDRV session (must be a ring-0 session).
2967 * @param pFactory Pointer to the component factory registration structure
2968 * previously passed SUPR0ComponentRegisterFactory().
2969 *
2970 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
2971 */
2972SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2973{
2974 int rc;
2975
2976 /*
2977 * Validate parameters.
2978 */
2979 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2980 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2981 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2982
2983 /*
2984 * Take the lock and look for the registration record.
2985 */
2986 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
2987 if (RT_SUCCESS(rc))
2988 {
2989 PSUPDRVFACTORYREG pPrev = NULL;
2990 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
2991 while (pCur && pCur->pFactory != pFactory)
2992 {
2993 pPrev = pCur;
2994 pCur = pCur->pNext;
2995 }
2996 if (pCur)
2997 {
2998 if (!pPrev)
2999 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3000 else
3001 pPrev->pNext = pCur->pNext;
3002
3003 pCur->pNext = NULL;
3004 pCur->pFactory = NULL;
3005 pCur->pSession = NULL;
3006 rc = VINF_SUCCESS;
3007 }
3008 else
3009 rc = VERR_NOT_FOUND;
3010
3011 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3012
3013 RTMemFree(pCur);
3014 }
3015 return rc;
3016}
3017
3018
3019/**
3020 * Queries a component factory.
3021 *
3022 * @returns VBox status code.
3023 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3024 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3025 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3026 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3027 *
3028 * @param pSession The SUPDRV session.
3029 * @param pszName The name of the component factory.
3030 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3031 * @param ppvFactoryIf Where to store the factory interface.
3032 */
3033SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3034{
3035 const char *pszEnd;
3036 size_t cchName;
3037 int rc;
3038
3039 /*
3040 * Validate parameters.
3041 */
3042 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3043
3044 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3045 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3046 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3047 cchName = pszEnd - pszName;
3048
3049 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3050 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3051 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3052
3053 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3054 *ppvFactoryIf = NULL;
3055
3056 /*
3057 * Take the lock and try all factories by this name.
3058 */
3059 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3060 if (RT_SUCCESS(rc))
3061 {
3062 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3063 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3064 while (pCur)
3065 {
3066 if ( pCur->cchName == cchName
3067 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3068 {
3069#ifdef RT_WITH_W64_UNWIND_HACK
3070 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3071#else
3072 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3073#endif
3074 if (pvFactory)
3075 {
3076 *ppvFactoryIf = pvFactory;
3077 rc = VINF_SUCCESS;
3078 break;
3079 }
3080 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3081 }
3082
3083 /* next */
3084 pCur = pCur->pNext;
3085 }
3086
3087 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3088 }
3089 return rc;
3090}
3091
3092
3093/**
3094 * Adds a memory object to the session.
3095 *
3096 * @returns IPRT status code.
3097 * @param pMem Memory tracking structure containing the
3098 * information to track.
3099 * @param pSession The session.
3100 */
3101static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3102{
3103 PSUPDRVBUNDLE pBundle;
3104 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3105
3106 /*
3107 * Find free entry and record the allocation.
3108 */
3109 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3110 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3111 {
3112 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3113 {
3114 unsigned i;
3115 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3116 {
3117 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3118 {
3119 pBundle->cUsed++;
3120 pBundle->aMem[i] = *pMem;
3121 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3122 return VINF_SUCCESS;
3123 }
3124 }
3125 AssertFailed(); /* !!this can't be happening!!! */
3126 }
3127 }
3128 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3129
3130 /*
3131 * Need to allocate a new bundle.
3132 * Insert into the last entry in the bundle.
3133 */
3134 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3135 if (!pBundle)
3136 return VERR_NO_MEMORY;
3137
3138 /* take last entry. */
3139 pBundle->cUsed++;
3140 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3141
3142 /* insert into list. */
3143 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3144 pBundle->pNext = pSession->Bundle.pNext;
3145 pSession->Bundle.pNext = pBundle;
3146 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3147
3148 return VINF_SUCCESS;
3149}
3150
3151
3152/**
3153 * Releases a memory object referenced by pointer and type.
3154 *
3155 * @returns IPRT status code.
3156 * @param pSession Session data.
3157 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3158 * @param eType Memory type.
3159 */
3160static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3161{
3162 PSUPDRVBUNDLE pBundle;
3163 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3164
3165 /*
3166 * Validate input.
3167 */
3168 if (!uPtr)
3169 {
3170 Log(("Illegal address %p\n", (void *)uPtr));
3171 return VERR_INVALID_PARAMETER;
3172 }
3173
3174 /*
3175 * Search for the address.
3176 */
3177 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3178 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3179 {
3180 if (pBundle->cUsed > 0)
3181 {
3182 unsigned i;
3183 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3184 {
3185 if ( pBundle->aMem[i].eType == eType
3186 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3187 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3188 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3189 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3190 )
3191 {
3192 /* Make a copy of it and release it outside the spinlock. */
3193 SUPDRVMEMREF Mem = pBundle->aMem[i];
3194 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3195 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3196 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3197 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3198
3199 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3200 {
3201 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3202 AssertRC(rc); /** @todo figure out how to handle this. */
3203 }
3204 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3205 {
3206 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3207 AssertRC(rc); /** @todo figure out how to handle this. */
3208 }
3209 return VINF_SUCCESS;
3210 }
3211 }
3212 }
3213 }
3214 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3215 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3216 return VERR_INVALID_PARAMETER;
3217}
3218
3219
3220/**
3221 * Opens an image. If it's the first time it's opened the call must upload
3222 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3223 *
3224 * This is the 1st step of the loading.
3225 *
3226 * @returns IPRT status code.
3227 * @param pDevExt Device globals.
3228 * @param pSession Session data.
3229 * @param pReq The open request.
3230 */
3231static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3232{
3233 PSUPDRVLDRIMAGE pImage;
3234 unsigned cb;
3235 void *pv;
3236 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3237
3238 /*
3239 * Check if we got an instance of the image already.
3240 */
3241 RTSemFastMutexRequest(pDevExt->mtxLdr);
3242 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3243 {
3244 if (!strcmp(pImage->szName, pReq->u.In.szName))
3245 {
3246 pImage->cUsage++;
3247 pReq->u.Out.pvImageBase = pImage->pvImage;
3248 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3249 supdrvLdrAddUsage(pSession, pImage);
3250 RTSemFastMutexRelease(pDevExt->mtxLdr);
3251 return VINF_SUCCESS;
3252 }
3253 }
3254 /* (not found - add it!) */
3255
3256 /*
3257 * Allocate memory.
3258 */
3259 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3260 pv = RTMemExecAlloc(cb);
3261 if (!pv)
3262 {
3263 RTSemFastMutexRelease(pDevExt->mtxLdr);
3264 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3265 return VERR_NO_MEMORY;
3266 }
3267
3268 /*
3269 * Setup and link in the LDR stuff.
3270 */
3271 pImage = (PSUPDRVLDRIMAGE)pv;
3272 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3273 pImage->cbImage = pReq->u.In.cbImage;
3274 pImage->pfnModuleInit = NULL;
3275 pImage->pfnModuleTerm = NULL;
3276 pImage->pfnServiceReqHandler = NULL;
3277 pImage->uState = SUP_IOCTL_LDR_OPEN;
3278 pImage->cUsage = 1;
3279 strcpy(pImage->szName, pReq->u.In.szName);
3280
3281 pImage->pNext = pDevExt->pLdrImages;
3282 pDevExt->pLdrImages = pImage;
3283
3284 supdrvLdrAddUsage(pSession, pImage);
3285
3286 pReq->u.Out.pvImageBase = pImage->pvImage;
3287 pReq->u.Out.fNeedsLoading = true;
3288 RTSemFastMutexRelease(pDevExt->mtxLdr);
3289
3290#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3291 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3292#endif
3293 return VINF_SUCCESS;
3294}
3295
3296
3297/**
3298 * Loads the image bits.
3299 *
3300 * This is the 2nd step of the loading.
3301 *
3302 * @returns IPRT status code.
3303 * @param pDevExt Device globals.
3304 * @param pSession Session data.
3305 * @param pReq The request.
3306 */
3307static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3308{
3309 PSUPDRVLDRUSAGE pUsage;
3310 PSUPDRVLDRIMAGE pImage;
3311 int rc;
3312 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3313
3314 /*
3315 * Find the ldr image.
3316 */
3317 RTSemFastMutexRequest(pDevExt->mtxLdr);
3318 pUsage = pSession->pLdrUsage;
3319 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3320 pUsage = pUsage->pNext;
3321 if (!pUsage)
3322 {
3323 RTSemFastMutexRelease(pDevExt->mtxLdr);
3324 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3325 return VERR_INVALID_HANDLE;
3326 }
3327 pImage = pUsage->pImage;
3328 if (pImage->cbImage != pReq->u.In.cbImage)
3329 {
3330 RTSemFastMutexRelease(pDevExt->mtxLdr);
3331 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3332 return VERR_INVALID_HANDLE;
3333 }
3334 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3335 {
3336 unsigned uState = pImage->uState;
3337 RTSemFastMutexRelease(pDevExt->mtxLdr);
3338 if (uState != SUP_IOCTL_LDR_LOAD)
3339 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3340 return SUPDRV_ERR_ALREADY_LOADED;
3341 }
3342 switch (pReq->u.In.eEPType)
3343 {
3344 case SUPLDRLOADEP_NOTHING:
3345 break;
3346
3347 case SUPLDRLOADEP_VMMR0:
3348 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3349 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3350 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3351 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3352 {
3353 RTSemFastMutexRelease(pDevExt->mtxLdr);
3354 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3355 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3356 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3357 return VERR_INVALID_PARAMETER;
3358 }
3359 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3360 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3361 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3362 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3363 {
3364 RTSemFastMutexRelease(pDevExt->mtxLdr);
3365 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3366 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3367 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3368 return VERR_INVALID_PARAMETER;
3369 }
3370 break;
3371
3372 case SUPLDRLOADEP_SERVICE:
3373 if (!pReq->u.In.EP.Service.pfnServiceReq)
3374 {
3375 RTSemFastMutexRelease(pDevExt->mtxLdr);
3376 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3377 return VERR_INVALID_PARAMETER;
3378 }
3379 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3380 {
3381 RTSemFastMutexRelease(pDevExt->mtxLdr);
3382 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3383 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3384 return VERR_INVALID_PARAMETER;
3385 }
3386 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3387 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3388 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3389 {
3390 RTSemFastMutexRelease(pDevExt->mtxLdr);
3391 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3392 pImage->pvImage, pReq->u.In.cbImage,
3393 pReq->u.In.EP.Service.apvReserved[0],
3394 pReq->u.In.EP.Service.apvReserved[1],
3395 pReq->u.In.EP.Service.apvReserved[2]));
3396 return VERR_INVALID_PARAMETER;
3397 }
3398 break;
3399
3400 default:
3401 RTSemFastMutexRelease(pDevExt->mtxLdr);
3402 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3403 return VERR_INVALID_PARAMETER;
3404 }
3405 if ( pReq->u.In.pfnModuleInit
3406 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3407 {
3408 RTSemFastMutexRelease(pDevExt->mtxLdr);
3409 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3410 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3411 return VERR_INVALID_PARAMETER;
3412 }
3413 if ( pReq->u.In.pfnModuleTerm
3414 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3415 {
3416 RTSemFastMutexRelease(pDevExt->mtxLdr);
3417 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3418 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3419 return VERR_INVALID_PARAMETER;
3420 }
3421
3422 /*
3423 * Copy the memory.
3424 */
3425 /* no need to do try/except as this is a buffered request. */
3426 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3427 pImage->uState = SUP_IOCTL_LDR_LOAD;
3428 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3429 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3430 pImage->offSymbols = pReq->u.In.offSymbols;
3431 pImage->cSymbols = pReq->u.In.cSymbols;
3432 pImage->offStrTab = pReq->u.In.offStrTab;
3433 pImage->cbStrTab = pReq->u.In.cbStrTab;
3434
3435 /*
3436 * Update any entry points.
3437 */
3438 switch (pReq->u.In.eEPType)
3439 {
3440 default:
3441 case SUPLDRLOADEP_NOTHING:
3442 rc = VINF_SUCCESS;
3443 break;
3444 case SUPLDRLOADEP_VMMR0:
3445 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3446 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3447 break;
3448 case SUPLDRLOADEP_SERVICE:
3449 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3450 rc = VINF_SUCCESS;
3451 break;
3452 }
3453
3454 /*
3455 * On success call the module initialization.
3456 */
3457 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3458 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3459 {
3460 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3461#ifdef RT_WITH_W64_UNWIND_HACK
3462 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3463#else
3464 rc = pImage->pfnModuleInit();
3465#endif
3466 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3467 supdrvLdrUnsetVMMR0EPs(pDevExt);
3468 }
3469
3470 if (rc)
3471 pImage->uState = SUP_IOCTL_LDR_OPEN;
3472
3473 RTSemFastMutexRelease(pDevExt->mtxLdr);
3474 return rc;
3475}
3476
3477
3478/**
3479 * Frees a previously loaded (prep'ed) image.
3480 *
3481 * @returns IPRT status code.
3482 * @param pDevExt Device globals.
3483 * @param pSession Session data.
3484 * @param pReq The request.
3485 */
3486static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3487{
3488 int rc;
3489 PSUPDRVLDRUSAGE pUsagePrev;
3490 PSUPDRVLDRUSAGE pUsage;
3491 PSUPDRVLDRIMAGE pImage;
3492 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3493
3494 /*
3495 * Find the ldr image.
3496 */
3497 RTSemFastMutexRequest(pDevExt->mtxLdr);
3498 pUsagePrev = NULL;
3499 pUsage = pSession->pLdrUsage;
3500 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3501 {
3502 pUsagePrev = pUsage;
3503 pUsage = pUsage->pNext;
3504 }
3505 if (!pUsage)
3506 {
3507 RTSemFastMutexRelease(pDevExt->mtxLdr);
3508 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3509 return VERR_INVALID_HANDLE;
3510 }
3511
3512 /*
3513 * Check if we can remove anything.
3514 */
3515 rc = VINF_SUCCESS;
3516 pImage = pUsage->pImage;
3517 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3518 {
3519 /*
3520 * Check if there are any objects with destructors in the image, if
3521 * so leave it for the session cleanup routine so we get a chance to
3522 * clean things up in the right order and not leave them all dangling.
3523 */
3524 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3525 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3526 if (pImage->cUsage <= 1)
3527 {
3528 PSUPDRVOBJ pObj;
3529 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3530 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3531 {
3532 rc = VERR_SHARING_VIOLATION; /** @todo VERR_DANGLING_OBJECTS */
3533 break;
3534 }
3535 }
3536 else
3537 {
3538 PSUPDRVUSAGE pGenUsage;
3539 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3540 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3541 {
3542 rc = VERR_SHARING_VIOLATION; /** @todo VERR_DANGLING_OBJECTS */
3543 break;
3544 }
3545 }
3546 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3547 if (rc == VINF_SUCCESS)
3548 {
3549 /* unlink it */
3550 if (pUsagePrev)
3551 pUsagePrev->pNext = pUsage->pNext;
3552 else
3553 pSession->pLdrUsage = pUsage->pNext;
3554
3555 /* free it */
3556 pUsage->pImage = NULL;
3557 pUsage->pNext = NULL;
3558 RTMemFree(pUsage);
3559
3560 /*
3561 * Derefrence the image.
3562 */
3563 if (pImage->cUsage <= 1)
3564 supdrvLdrFree(pDevExt, pImage);
3565 else
3566 pImage->cUsage--;
3567 }
3568 else
3569 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3570 }
3571 else
3572 {
3573 /*
3574 * Dereference both image and usage.
3575 */
3576 pImage->cUsage--;
3577 pUsage->cUsage--;
3578 }
3579
3580 RTSemFastMutexRelease(pDevExt->mtxLdr);
3581 return VINF_SUCCESS;
3582}
3583
3584
3585/**
3586 * Gets the address of a symbol in an open image.
3587 *
3588 * @returns 0 on success.
3589 * @returns SUPDRV_ERR_* on failure.
3590 * @param pDevExt Device globals.
3591 * @param pSession Session data.
3592 * @param pReq The request buffer.
3593 */
3594static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3595{
3596 PSUPDRVLDRIMAGE pImage;
3597 PSUPDRVLDRUSAGE pUsage;
3598 uint32_t i;
3599 PSUPLDRSYM paSyms;
3600 const char *pchStrings;
3601 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3602 void *pvSymbol = NULL;
3603 int rc = VERR_GENERAL_FAILURE;
3604 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3605
3606 /*
3607 * Find the ldr image.
3608 */
3609 RTSemFastMutexRequest(pDevExt->mtxLdr);
3610 pUsage = pSession->pLdrUsage;
3611 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3612 pUsage = pUsage->pNext;
3613 if (!pUsage)
3614 {
3615 RTSemFastMutexRelease(pDevExt->mtxLdr);
3616 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3617 return VERR_INVALID_HANDLE;
3618 }
3619 pImage = pUsage->pImage;
3620 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3621 {
3622 unsigned uState = pImage->uState;
3623 RTSemFastMutexRelease(pDevExt->mtxLdr);
3624 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3625 return VERR_ALREADY_LOADED;
3626 }
3627
3628 /*
3629 * Search the symbol strings.
3630 */
3631 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3632 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3633 for (i = 0; i < pImage->cSymbols; i++)
3634 {
3635 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3636 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3637 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3638 {
3639 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3640 rc = VINF_SUCCESS;
3641 break;
3642 }
3643 }
3644 RTSemFastMutexRelease(pDevExt->mtxLdr);
3645 pReq->u.Out.pvSymbol = pvSymbol;
3646 return rc;
3647}
3648
3649
3650/**
3651 * Gets the address of a symbol in an open image or the support driver.
3652 *
3653 * @returns VINF_SUCCESS on success.
3654 * @returns
3655 * @param pDevExt Device globals.
3656 * @param pSession Session data.
3657 * @param pReq The request buffer.
3658 */
3659static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3660{
3661 int rc = VINF_SUCCESS;
3662 const char *pszSymbol = pReq->u.In.pszSymbol;
3663 const char *pszModule = pReq->u.In.pszModule;
3664 size_t cbSymbol;
3665 char const *pszEnd;
3666 uint32_t i;
3667
3668 /*
3669 * Input validation.
3670 */
3671 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3672 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3673 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3674 cbSymbol = pszEnd - pszSymbol + 1;
3675
3676 if (pszModule)
3677 {
3678 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3679 pszEnd = (char *)memchr(pszModule, '\0', 64);
3680 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3681 }
3682 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3683
3684
3685 if ( !pszModule
3686 || !strcmp(pszModule, "SupDrv"))
3687 {
3688 /*
3689 * Search the support driver export table.
3690 */
3691 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3692 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3693 {
3694 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3695 break;
3696 }
3697 }
3698 else
3699 {
3700 /*
3701 * Find the loader image.
3702 */
3703 PSUPDRVLDRIMAGE pImage;
3704
3705 RTSemFastMutexRequest(pDevExt->mtxLdr);
3706
3707 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3708 if (!strcmp(pImage->szName, pszModule))
3709 break;
3710 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3711 {
3712 /*
3713 * Search the symbol strings.
3714 */
3715 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3716 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3717 for (i = 0; i < pImage->cSymbols; i++)
3718 {
3719 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3720 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3721 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3722 {
3723 /*
3724 * Found it! Calc the symbol address and add a reference to the module.
3725 */
3726 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3727 rc = supdrvLdrAddUsage(pSession, pImage);
3728 break;
3729 }
3730 }
3731 }
3732 else
3733 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3734
3735 RTSemFastMutexRelease(pDevExt->mtxLdr);
3736 }
3737 return rc;
3738}
3739
3740
3741/**
3742 * Updates the VMMR0 entry point pointers.
3743 *
3744 * @returns IPRT status code.
3745 * @param pDevExt Device globals.
3746 * @param pSession Session data.
3747 * @param pVMMR0 VMMR0 image handle.
3748 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3749 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3750 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3751 * @remark Caller must own the loader mutex.
3752 */
3753static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3754{
3755 int rc = VINF_SUCCESS;
3756 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3757
3758
3759 /*
3760 * Check if not yet set.
3761 */
3762 if (!pDevExt->pvVMMR0)
3763 {
3764 pDevExt->pvVMMR0 = pvVMMR0;
3765 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3766 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3767 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3768 }
3769 else
3770 {
3771 /*
3772 * Return failure or success depending on whether the values match or not.
3773 */
3774 if ( pDevExt->pvVMMR0 != pvVMMR0
3775 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3776 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3777 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3778 {
3779 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3780 rc = VERR_INVALID_PARAMETER;
3781 }
3782 }
3783 return rc;
3784}
3785
3786
3787/**
3788 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3789 *
3790 * @param pDevExt Device globals.
3791 */
3792static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3793{
3794 pDevExt->pvVMMR0 = NULL;
3795 pDevExt->pfnVMMR0EntryInt = NULL;
3796 pDevExt->pfnVMMR0EntryFast = NULL;
3797 pDevExt->pfnVMMR0EntryEx = NULL;
3798}
3799
3800
3801/**
3802 * Adds a usage reference in the specified session of an image.
3803 *
3804 * Called while owning the loader semaphore.
3805 *
3806 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3807 * @param pSession Session in question.
3808 * @param pImage Image which the session is using.
3809 */
3810static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3811{
3812 PSUPDRVLDRUSAGE pUsage;
3813 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3814
3815 /*
3816 * Referenced it already?
3817 */
3818 pUsage = pSession->pLdrUsage;
3819 while (pUsage)
3820 {
3821 if (pUsage->pImage == pImage)
3822 {
3823 pUsage->cUsage++;
3824 return VINF_SUCCESS;
3825 }
3826 pUsage = pUsage->pNext;
3827 }
3828
3829 /*
3830 * Allocate new usage record.
3831 */
3832 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3833 AssertReturn(pUsage, VERR_NO_MEMORY);
3834 pUsage->cUsage = 1;
3835 pUsage->pImage = pImage;
3836 pUsage->pNext = pSession->pLdrUsage;
3837 pSession->pLdrUsage = pUsage;
3838 return VINF_SUCCESS;
3839}
3840
3841
3842/**
3843 * Frees a load image.
3844 *
3845 * @param pDevExt Pointer to device extension.
3846 * @param pImage Pointer to the image we're gonna free.
3847 * This image must exit!
3848 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3849 */
3850static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3851{
3852 PSUPDRVLDRIMAGE pImagePrev;
3853 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3854
3855 /* find it - arg. should've used doubly linked list. */
3856 Assert(pDevExt->pLdrImages);
3857 pImagePrev = NULL;
3858 if (pDevExt->pLdrImages != pImage)
3859 {
3860 pImagePrev = pDevExt->pLdrImages;
3861 while (pImagePrev->pNext != pImage)
3862 pImagePrev = pImagePrev->pNext;
3863 Assert(pImagePrev->pNext == pImage);
3864 }
3865
3866 /* unlink */
3867 if (pImagePrev)
3868 pImagePrev->pNext = pImage->pNext;
3869 else
3870 pDevExt->pLdrImages = pImage->pNext;
3871
3872 /* check if this is VMMR0.r0 unset its entry point pointers. */
3873 if (pDevExt->pvVMMR0 == pImage->pvImage)
3874 supdrvLdrUnsetVMMR0EPs(pDevExt);
3875
3876 /* check for objects with destructors in this image. (Shouldn't happen.) */
3877 if (pDevExt->pObjs)
3878 {
3879 unsigned cObjs = 0;
3880 PSUPDRVOBJ pObj;
3881 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3882 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3883 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3884 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3885 {
3886 pObj->pfnDestructor = NULL;
3887 cObjs++;
3888 }
3889 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3890 if (cObjs)
3891 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
3892 }
3893
3894 /* call termination function if fully loaded. */
3895 if ( pImage->pfnModuleTerm
3896 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3897 {
3898 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3899#ifdef RT_WITH_W64_UNWIND_HACK
3900 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
3901#else
3902 pImage->pfnModuleTerm();
3903#endif
3904 }
3905
3906 /* free the image */
3907 pImage->cUsage = 0;
3908 pImage->pNext = 0;
3909 pImage->uState = SUP_IOCTL_LDR_FREE;
3910 RTMemExecFree(pImage);
3911}
3912
3913
3914/**
3915 * Implements the service call request.
3916 *
3917 * @returns VBox status code.
3918 * @param pDevExt The device extension.
3919 * @param pSession The calling session.
3920 * @param pReq The request packet, valid.
3921 */
3922static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
3923{
3924#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
3925 int rc;
3926
3927 /*
3928 * Find the module first in the module referenced by the calling session.
3929 */
3930 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
3931 if (RT_SUCCESS(rc))
3932 {
3933 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
3934 PSUPDRVLDRUSAGE pUsage;
3935
3936 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
3937 if ( pUsage->pImage->pfnServiceReqHandler
3938 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
3939 {
3940 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
3941 break;
3942 }
3943 RTSemFastMutexRelease(pDevExt->mtxLdr);
3944
3945 if (pfnServiceReqHandler)
3946 {
3947 /*
3948 * Call it.
3949 */
3950 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
3951#ifdef RT_WITH_W64_UNWIND_HACK
3952 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
3953#else
3954 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
3955#endif
3956 else
3957#ifdef RT_WITH_W64_UNWIND_HACK
3958 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
3959 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
3960#else
3961 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
3962#endif
3963 }
3964 else
3965 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
3966 }
3967
3968 /* log it */
3969 if ( RT_FAILURE(rc)
3970 && rc != VERR_INTERRUPTED
3971 && rc != VERR_TIMEOUT)
3972 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
3973 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
3974 else
3975 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
3976 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
3977 return rc;
3978#else /* RT_OS_WINDOWS && !DEBUG */
3979 return VERR_NOT_IMPLEMENTED;
3980#endif /* RT_OS_WINDOWS && !DEBUG */
3981}
3982
3983
3984/**
3985 * Gets the paging mode of the current CPU.
3986 *
3987 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
3988 */
3989SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
3990{
3991 SUPPAGINGMODE enmMode;
3992
3993 RTR0UINTREG cr0 = ASMGetCR0();
3994 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
3995 enmMode = SUPPAGINGMODE_INVALID;
3996 else
3997 {
3998 RTR0UINTREG cr4 = ASMGetCR4();
3999 uint32_t fNXEPlusLMA = 0;
4000 if (cr4 & X86_CR4_PAE)
4001 {
4002 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
4003 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
4004 {
4005 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4006 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4007 fNXEPlusLMA |= RT_BIT(0);
4008 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4009 fNXEPlusLMA |= RT_BIT(1);
4010 }
4011 }
4012
4013 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4014 {
4015 case 0:
4016 enmMode = SUPPAGINGMODE_32_BIT;
4017 break;
4018
4019 case X86_CR4_PGE:
4020 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4021 break;
4022
4023 case X86_CR4_PAE:
4024 enmMode = SUPPAGINGMODE_PAE;
4025 break;
4026
4027 case X86_CR4_PAE | RT_BIT(0):
4028 enmMode = SUPPAGINGMODE_PAE_NX;
4029 break;
4030
4031 case X86_CR4_PAE | X86_CR4_PGE:
4032 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4033 break;
4034
4035 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4036 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4037 break;
4038
4039 case RT_BIT(1) | X86_CR4_PAE:
4040 enmMode = SUPPAGINGMODE_AMD64;
4041 break;
4042
4043 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4044 enmMode = SUPPAGINGMODE_AMD64_NX;
4045 break;
4046
4047 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4048 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4049 break;
4050
4051 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4052 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4053 break;
4054
4055 default:
4056 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4057 enmMode = SUPPAGINGMODE_INVALID;
4058 break;
4059 }
4060 }
4061 return enmMode;
4062}
4063
4064
4065/**
4066 * Enables or disabled hardware virtualization extensions using native OS APIs.
4067 *
4068 * @returns VBox status code.
4069 * @retval VINF_SUCCESS on success.
4070 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4071 *
4072 * @param fEnable Whether to enable or disable.
4073 */
4074SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4075{
4076#ifdef RT_OS_DARWIN
4077 return supdrvOSEnableVTx(fEnable);
4078#else
4079 return VERR_NOT_SUPPORTED;
4080#endif
4081}
4082
4083
4084/**
4085 * Creates the GIP.
4086 *
4087 * @returns VBox status code.
4088 * @param pDevExt Instance data. GIP stuff may be updated.
4089 */
4090static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4091{
4092 PSUPGLOBALINFOPAGE pGip;
4093 RTHCPHYS HCPhysGip;
4094 uint32_t u32SystemResolution;
4095 uint32_t u32Interval;
4096 int rc;
4097
4098 LogFlow(("supdrvGipCreate:\n"));
4099
4100 /* assert order */
4101 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4102 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4103 Assert(!pDevExt->pGipTimer);
4104
4105 /*
4106 * Allocate a suitable page with a default kernel mapping.
4107 */
4108 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4109 if (RT_FAILURE(rc))
4110 {
4111 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4112 return rc;
4113 }
4114 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4115 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4116
4117#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4118 * It only applies to Windows and should probably revisited later, if possible made part of the
4119 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4120 /*
4121 * Try bump up the system timer resolution.
4122 * The more interrupts the better...
4123 */
4124 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4125 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4126 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4127 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4128 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4129 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4130 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4131 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4132 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4133 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4134 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4135 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4136 )
4137 {
4138 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4139 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4140 }
4141#endif
4142
4143 /*
4144 * Find a reasonable update interval and initialize the structure.
4145 */
4146 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4147 while (u32Interval < 10000000 /* 10 ms */)
4148 u32Interval += u32SystemResolution;
4149
4150 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4151
4152 /*
4153 * Create the timer.
4154 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4155 */
4156 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4157 {
4158 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4159 if (rc == VERR_NOT_SUPPORTED)
4160 {
4161 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4162 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4163 }
4164 }
4165 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4166 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4167 if (RT_SUCCESS(rc))
4168 {
4169 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4170 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4171 if (RT_SUCCESS(rc))
4172 {
4173 /*
4174 * We're good.
4175 */
4176 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4177 return VINF_SUCCESS;
4178 }
4179
4180 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4181 }
4182 else
4183 {
4184 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4185 Assert(!pDevExt->pGipTimer);
4186 }
4187 supdrvGipDestroy(pDevExt);
4188 return rc;
4189}
4190
4191
4192/**
4193 * Terminates the GIP.
4194 *
4195 * @param pDevExt Instance data. GIP stuff may be updated.
4196 */
4197static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4198{
4199 int rc;
4200#ifdef DEBUG_DARWIN_GIP
4201 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4202 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4203 pDevExt->pGipTimer, pDevExt->GipMemObj));
4204#endif
4205
4206 /*
4207 * Invalid the GIP data.
4208 */
4209 if (pDevExt->pGip)
4210 {
4211 supdrvGipTerm(pDevExt->pGip);
4212 pDevExt->pGip = NULL;
4213 }
4214
4215 /*
4216 * Destroy the timer and free the GIP memory object.
4217 */
4218 if (pDevExt->pGipTimer)
4219 {
4220 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4221 pDevExt->pGipTimer = NULL;
4222 }
4223
4224 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4225 {
4226 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4227 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4228 }
4229
4230 /*
4231 * Finally, release the system timer resolution request if one succeeded.
4232 */
4233 if (pDevExt->u32SystemTimerGranularityGrant)
4234 {
4235 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4236 pDevExt->u32SystemTimerGranularityGrant = 0;
4237 }
4238}
4239
4240
4241/**
4242 * Timer callback function sync GIP mode.
4243 * @param pTimer The timer.
4244 * @param pvUser The device extension.
4245 */
4246static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4247{
4248 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4249 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4250
4251 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4252
4253 ASMSetFlags(fOldFlags);
4254}
4255
4256
4257/**
4258 * Timer callback function for async GIP mode.
4259 * @param pTimer The timer.
4260 * @param pvUser The device extension.
4261 */
4262static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4263{
4264 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4265 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4266 RTCPUID idCpu = RTMpCpuId();
4267 uint64_t NanoTS = RTTimeSystemNanoTS();
4268
4269 /** @todo reset the transaction number and whatnot when iTick == 1. */
4270 if (pDevExt->idGipMaster == idCpu)
4271 supdrvGipUpdate(pDevExt->pGip, NanoTS);
4272 else
4273 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
4274
4275 ASMSetFlags(fOldFlags);
4276}
4277
4278
4279/**
4280 * Multiprocessor event notification callback.
4281 *
4282 * This is used to make sue that the GIP master gets passed on to
4283 * another CPU.
4284 *
4285 * @param enmEvent The event.
4286 * @param idCpu The cpu it applies to.
4287 * @param pvUser Pointer to the device extension.
4288 */
4289static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4290{
4291 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4292 if (enmEvent == RTMPEVENT_OFFLINE)
4293 {
4294 RTCPUID idGipMaster;
4295 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4296 if (idGipMaster == idCpu)
4297 {
4298 /*
4299 * Find a new GIP master.
4300 */
4301 bool fIgnored;
4302 unsigned i;
4303 RTCPUID idNewGipMaster = NIL_RTCPUID;
4304 RTCPUSET OnlineCpus;
4305 RTMpGetOnlineSet(&OnlineCpus);
4306
4307 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4308 {
4309 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4310 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4311 && idCurCpu != idGipMaster)
4312 {
4313 idNewGipMaster = idCurCpu;
4314 break;
4315 }
4316 }
4317
4318 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4319 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4320 NOREF(fIgnored);
4321 }
4322 }
4323}
4324
4325
4326/**
4327 * Initializes the GIP data.
4328 *
4329 * @returns IPRT status code.
4330 * @param pDevExt Pointer to the device instance data.
4331 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4332 * @param HCPhys The physical address of the GIP.
4333 * @param u64NanoTS The current nanosecond timestamp.
4334 * @param uUpdateHz The update freqence.
4335 */
4336int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4337{
4338 unsigned i;
4339#ifdef DEBUG_DARWIN_GIP
4340 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4341#else
4342 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4343#endif
4344
4345 /*
4346 * Initialize the structure.
4347 */
4348 memset(pGip, 0, PAGE_SIZE);
4349 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4350 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4351 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4352 pGip->u32UpdateHz = uUpdateHz;
4353 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4354 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4355
4356 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4357 {
4358 pGip->aCPUs[i].u32TransactionId = 2;
4359 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4360 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4361
4362 /*
4363 * We don't know the following values until we've executed updates.
4364 * So, we'll just insert very high values.
4365 */
4366 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4367 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4368 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4369 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4370 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4371 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4372 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4373 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4374 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4375 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4376 }
4377
4378 /*
4379 * Link it to the device extension.
4380 */
4381 pDevExt->pGip = pGip;
4382 pDevExt->HCPhysGip = HCPhys;
4383 pDevExt->cGipUsers = 0;
4384
4385 return VINF_SUCCESS;
4386}
4387
4388
4389/**
4390 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4391 *
4392 * @param idCpu Ignored.
4393 * @param pvUser1 Where to put the TSC.
4394 * @param pvUser2 Ignored.
4395 */
4396static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4397{
4398#if 1
4399 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4400#else
4401 *(uint64_t *)pvUser1 = ASMReadTSC();
4402#endif
4403}
4404
4405
4406/**
4407 * Determine if Async GIP mode is required because of TSC drift.
4408 *
4409 * When using the default/normal timer code it is essential that the time stamp counter
4410 * (TSC) runs never backwards, that is, a read operation to the counter should return
4411 * a bigger value than any previous read operation. This is guaranteed by the latest
4412 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4413 * case we have to choose the asynchronous timer mode.
4414 *
4415 * @param poffMin Pointer to the determined difference between different cores.
4416 * @return false if the time stamp counters appear to be synchron, true otherwise.
4417 */
4418bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4419{
4420 /*
4421 * Just iterate all the cpus 8 times and make sure that the TSC is
4422 * ever increasing. We don't bother taking TSC rollover into account.
4423 */
4424 RTCPUSET CpuSet;
4425 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4426 int iCpu;
4427 int cLoops = 8;
4428 bool fAsync = false;
4429 int rc = VINF_SUCCESS;
4430 uint64_t offMax = 0;
4431 uint64_t offMin = ~(uint64_t)0;
4432 uint64_t PrevTsc = ASMReadTSC();
4433
4434 while (cLoops-- > 0)
4435 {
4436 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4437 {
4438 uint64_t CurTsc;
4439 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4440 if (RT_SUCCESS(rc))
4441 {
4442 if (CurTsc <= PrevTsc)
4443 {
4444 fAsync = true;
4445 offMin = offMax = PrevTsc - CurTsc;
4446 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4447 iCpu, cLoops, CurTsc, PrevTsc));
4448 break;
4449 }
4450
4451 /* Gather statistics (except the first time). */
4452 if (iCpu != 0 || cLoops != 7)
4453 {
4454 uint64_t off = CurTsc - PrevTsc;
4455 if (off < offMin)
4456 offMin = off;
4457 if (off > offMax)
4458 offMax = off;
4459 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4460 }
4461
4462 /* Next */
4463 PrevTsc = CurTsc;
4464 }
4465 else if (rc == VERR_NOT_SUPPORTED)
4466 break;
4467 else
4468 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4469 }
4470
4471 /* broke out of the loop. */
4472 if (iCpu <= iLastCpu)
4473 break;
4474 }
4475
4476 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4477 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4478 fAsync, iLastCpu, rc, offMin, offMax));
4479#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4480 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4481#endif
4482 return fAsync;
4483}
4484
4485
4486/**
4487 * Determin the GIP TSC mode.
4488 *
4489 * @returns The most suitable TSC mode.
4490 * @param pDevExt Pointer to the device instance data.
4491 */
4492static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
4493{
4494 /*
4495 * On SMP we're faced with two problems:
4496 * (1) There might be a skew between the CPU, so that cpu0
4497 * returns a TSC that is sligtly different from cpu1.
4498 * (2) Power management (and other things) may cause the TSC
4499 * to run at a non-constant speed, and cause the speed
4500 * to be different on the cpus. This will result in (1).
4501 *
4502 * So, on SMP systems we'll have to select the ASYNC update method
4503 * if there are symphoms of these problems.
4504 */
4505 if (RTMpGetCount() > 1)
4506 {
4507 uint32_t uEAX, uEBX, uECX, uEDX;
4508 uint64_t u64DiffCoresIgnored;
4509
4510 /* Permit the user and/or the OS specfic bits to force async mode. */
4511 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
4512 return SUPGIPMODE_ASYNC_TSC;
4513
4514 /* Try check for current differences between the cpus. */
4515 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
4516 return SUPGIPMODE_ASYNC_TSC;
4517
4518 /*
4519 * If the CPU supports power management and is an AMD one we
4520 * won't trust it unless it has the TscInvariant bit is set.
4521 */
4522 /* Check for "AuthenticAMD" */
4523 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4524 if ( uEAX >= 1
4525 && uEBX == X86_CPUID_VENDOR_AMD_EBX
4526 && uECX == X86_CPUID_VENDOR_AMD_ECX
4527 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
4528 {
4529 /* Check for APM support and that TscInvariant is cleared. */
4530 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4531 if (uEAX >= 0x80000007)
4532 {
4533 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4534 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
4535 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4536 return SUPGIPMODE_ASYNC_TSC;
4537 }
4538 }
4539 }
4540 return SUPGIPMODE_SYNC_TSC;
4541}
4542
4543
4544/**
4545 * Invalidates the GIP data upon termination.
4546 *
4547 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4548 */
4549void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4550{
4551 unsigned i;
4552 pGip->u32Magic = 0;
4553 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4554 {
4555 pGip->aCPUs[i].u64NanoTS = 0;
4556 pGip->aCPUs[i].u64TSC = 0;
4557 pGip->aCPUs[i].iTSCHistoryHead = 0;
4558 }
4559}
4560
4561
4562/**
4563 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4564 * updates all the per cpu data except the transaction id.
4565 *
4566 * @param pGip The GIP.
4567 * @param pGipCpu Pointer to the per cpu data.
4568 * @param u64NanoTS The current time stamp.
4569 */
4570static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4571{
4572 uint64_t u64TSC;
4573 uint64_t u64TSCDelta;
4574 uint32_t u32UpdateIntervalTSC;
4575 uint32_t u32UpdateIntervalTSCSlack;
4576 unsigned iTSCHistoryHead;
4577 uint64_t u64CpuHz;
4578
4579 /*
4580 * Update the NanoTS.
4581 */
4582 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4583
4584 /*
4585 * Calc TSC delta.
4586 */
4587 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4588 u64TSC = ASMReadTSC();
4589 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4590 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4591
4592 if (u64TSCDelta >> 32)
4593 {
4594 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4595 pGipCpu->cErrors++;
4596 }
4597
4598 /*
4599 * TSC History.
4600 */
4601 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4602
4603 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4604 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4605 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4606
4607 /*
4608 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4609 */
4610 if (pGip->u32UpdateHz >= 1000)
4611 {
4612 uint32_t u32;
4613 u32 = pGipCpu->au32TSCHistory[0];
4614 u32 += pGipCpu->au32TSCHistory[1];
4615 u32 += pGipCpu->au32TSCHistory[2];
4616 u32 += pGipCpu->au32TSCHistory[3];
4617 u32 >>= 2;
4618 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4619 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4620 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4621 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4622 u32UpdateIntervalTSC >>= 2;
4623 u32UpdateIntervalTSC += u32;
4624 u32UpdateIntervalTSC >>= 1;
4625
4626 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4627 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4628 }
4629 else if (pGip->u32UpdateHz >= 90)
4630 {
4631 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4632 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4633 u32UpdateIntervalTSC >>= 1;
4634
4635 /* value choosen on a 2GHz thinkpad running windows */
4636 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4637 }
4638 else
4639 {
4640 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4641
4642 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4643 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4644 }
4645 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4646
4647 /*
4648 * CpuHz.
4649 */
4650 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4651 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4652}
4653
4654
4655/**
4656 * Updates the GIP.
4657 *
4658 * @param pGip Pointer to the GIP.
4659 * @param u64NanoTS The current nanosecond timesamp.
4660 */
4661void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4662{
4663 /*
4664 * Determin the relevant CPU data.
4665 */
4666 PSUPGIPCPU pGipCpu;
4667 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4668 pGipCpu = &pGip->aCPUs[0];
4669 else
4670 {
4671 unsigned iCpu = ASMGetApicId();
4672 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4673 return;
4674 pGipCpu = &pGip->aCPUs[iCpu];
4675 }
4676
4677 /*
4678 * Start update transaction.
4679 */
4680 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4681 {
4682 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4683 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4684 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4685 pGipCpu->cErrors++;
4686 return;
4687 }
4688
4689 /*
4690 * Recalc the update frequency every 0x800th time.
4691 */
4692 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4693 {
4694 if (pGip->u64NanoTSLastUpdateHz)
4695 {
4696#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4697 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4698 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4699 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4700 {
4701 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4702 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4703 }
4704#endif
4705 }
4706 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4707 }
4708
4709 /*
4710 * Update the data.
4711 */
4712 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4713
4714 /*
4715 * Complete transaction.
4716 */
4717 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4718}
4719
4720
4721/**
4722 * Updates the per cpu GIP data for the calling cpu.
4723 *
4724 * @param pGip Pointer to the GIP.
4725 * @param u64NanoTS The current nanosecond timesamp.
4726 * @param iCpu The CPU index.
4727 */
4728void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4729{
4730 PSUPGIPCPU pGipCpu;
4731
4732 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4733 {
4734 pGipCpu = &pGip->aCPUs[iCpu];
4735
4736 /*
4737 * Start update transaction.
4738 */
4739 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4740 {
4741 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4742 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4743 pGipCpu->cErrors++;
4744 return;
4745 }
4746
4747 /*
4748 * Update the data.
4749 */
4750 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4751
4752 /*
4753 * Complete transaction.
4754 */
4755 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4756 }
4757}
4758
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette