VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/CSAM.cpp@ 23777

最後變更 在這個檔案從23777是 23777,由 vboxsync 提交於 15 年 前

SSM,CSAM: More systematic naming SSMFIELDTRANS_HCPTR -> SSMFIELDTRANS_IGN_HCPTR and SSMFIELD_ENTRY_HCPTR -> SSMFIELD_ENTRY_IGN_HCPTR.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 96.3 KB
 
1/* $Id: CSAM.cpp 23777 2009-10-14 21:39:19Z vboxsync $ */
2/** @file
3 * CSAM - Guest OS Code Scanning and Analysis Manager
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_CSAM
26#include <VBox/cpum.h>
27#include <VBox/stam.h>
28#include <VBox/patm.h>
29#include <VBox/csam.h>
30#include <VBox/cpumdis.h>
31#include <VBox/pgm.h>
32#include <VBox/iom.h>
33#include <VBox/sup.h>
34#include <VBox/mm.h>
35#include <VBox/em.h>
36#include <VBox/rem.h>
37#include <VBox/selm.h>
38#include <VBox/trpm.h>
39#include <VBox/cfgm.h>
40#include <VBox/param.h>
41#include <iprt/avl.h>
42#include <iprt/asm.h>
43#include <iprt/thread.h>
44#include "CSAMInternal.h"
45#include <VBox/vm.h>
46#include <VBox/dbg.h>
47#include <VBox/err.h>
48#include <VBox/ssm.h>
49#include <VBox/log.h>
50#include <iprt/assert.h>
51#include <iprt/string.h>
52#include <VBox/dis.h>
53#include <VBox/disopcode.h>
54#include <stdlib.h>
55#include <stdio.h>
56
57
58/* Enabled by default */
59#define CSAM_ENABLE
60
61/* Enable to monitor code pages for self-modifying code. */
62#define CSAM_MONITOR_CODE_PAGES
63/* Enable to monitor all scanned pages
64#define CSAM_MONITOR_CSAM_CODE_PAGES */
65/* Enable to scan beyond ret instructions.
66#define CSAM_ANALYSE_BEYOND_RET */
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71static DECLCALLBACK(int) csamr3Save(PVM pVM, PSSMHANDLE pSSM);
72static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
73static DECLCALLBACK(int) CSAMCodePageWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
74static DECLCALLBACK(int) CSAMCodePageInvalidate(PVM pVM, RTGCPTR GCPtr);
75
76bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage);
77int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstr);
78static PCSAMPAGE csamCreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation = false);
79static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr);
80static int csamReinit(PVM pVM);
81static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t opsize, bool fScanned);
82static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
83 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec);
84
85/** @todo Temporary for debugging. */
86static bool fInCSAMCodePageInvalidate = false;
87
88/*******************************************************************************
89* Global Variables *
90*******************************************************************************/
91#ifdef VBOX_WITH_DEBUGGER
92static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
93static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
94
95/** Command descriptors. */
96static const DBGCCMD g_aCmds[] =
97{
98 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
99 { "csamon", 0, 0, NULL, 0, NULL, 0, csamr3CmdOn, "", "Enable CSAM code scanning." },
100 { "csamoff", 0, 0, NULL, 0, NULL, 0, csamr3CmdOff, "", "Disable CSAM code scanning." },
101};
102#endif
103
104/**
105 * SSM descriptor table for the CSAM structure.
106 */
107static const SSMFIELD g_aCsamFields[] =
108{
109 SSMFIELD_ENTRY_IGNORE( CSAM, offVM),
110 SSMFIELD_ENTRY_PAD_HC64( CSAM, Alignment0, sizeof(uint32_t)),
111 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPageTree),
112 SSMFIELD_ENTRY( CSAM, aDangerousInstr),
113 SSMFIELD_ENTRY( CSAM, cDangerousInstr),
114 SSMFIELD_ENTRY( CSAM, iDangerousInstr),
115 SSMFIELD_ENTRY_RCPTR( CSAM, pPDBitmapGC),
116 SSMFIELD_ENTRY_RCPTR( CSAM, pPDHCBitmapGC),
117 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDBitmapHC),
118 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, pPDGCBitmapHC),
119 SSMFIELD_ENTRY_IGN_HCPTR( CSAM, savedstate.pSSM),
120 SSMFIELD_ENTRY( CSAM, savedstate.cPageRecords),
121 SSMFIELD_ENTRY( CSAM, savedstate.cPatchPageRecords),
122 SSMFIELD_ENTRY( CSAM, cDirtyPages),
123 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyBasePage),
124 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvDirtyFaultPage),
125 SSMFIELD_ENTRY( CSAM, cPossibleCodePages),
126 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvPossibleCodePage),
127 SSMFIELD_ENTRY_RCPTR_ARRAY( CSAM, pvCallInstruction),
128 SSMFIELD_ENTRY( CSAM, iCallInstruction),
129 SSMFIELD_ENTRY( CSAM, fScanningStarted),
130 SSMFIELD_ENTRY( CSAM, fGatesChecked),
131 SSMFIELD_ENTRY_PAD_HC( CSAM, Alignment1, 6, 2),
132 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrTraps),
133 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPages),
134 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPagesInv),
135 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrRemovedPages),
136 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPatchPages),
137 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPHC),
138 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrPageNPGC),
139 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushes),
140 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrFlushesSkipped),
141 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesHC),
142 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrKnownPagesGC),
143 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrInstr),
144 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrBytesRead),
145 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrOpcodeRead),
146 SSMFIELD_ENTRY_IGNORE( CSAM, StatTime),
147 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeCheckAddr),
148 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeAddrConv),
149 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeFlushPage),
150 SSMFIELD_ENTRY_IGNORE( CSAM, StatTimeDisasm),
151 SSMFIELD_ENTRY_IGNORE( CSAM, StatFlushDirtyPages),
152 SSMFIELD_ENTRY_IGNORE( CSAM, StatCheckGates),
153 SSMFIELD_ENTRY_IGNORE( CSAM, StatCodePageModified),
154 SSMFIELD_ENTRY_IGNORE( CSAM, StatDangerousWrite),
155 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheHit),
156 SSMFIELD_ENTRY_IGNORE( CSAM, StatInstrCacheMiss),
157 SSMFIELD_ENTRY_IGNORE( CSAM, StatPagePATM),
158 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageCSAM),
159 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageREM),
160 SSMFIELD_ENTRY_IGNORE( CSAM, StatNrUserPages),
161 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageMonitor),
162 SSMFIELD_ENTRY_IGNORE( CSAM, StatPageRemoveREMFlush),
163 SSMFIELD_ENTRY_IGNORE( CSAM, StatBitmapAlloc),
164 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunction),
165 SSMFIELD_ENTRY_IGNORE( CSAM, StatScanNextFunctionFailed),
166 SSMFIELD_ENTRY_TERM()
167};
168
169/** Fake type to simplify g_aCsamPDBitmapArray construction. */
170typedef struct
171{
172 uint8_t *a[CSAM_PGDIRBMP_CHUNKS];
173} CSAMPDBITMAPARRAY;
174
175/**
176 * SSM descriptor table for the CSAM::pPDBitmapHC array.
177 */
178static SSMFIELD const g_aCsamPDBitmapArray[] =
179{
180 SSMFIELD_ENTRY_HCPTR_NI_ARRAY(CSAMPDBITMAPARRAY, a),
181 SSMFIELD_ENTRY_TERM()
182};
183
184/**
185 * SSM descriptor table for the CSAMPAGEREC structure.
186 */
187static const SSMFIELD g_aCsamPageRecFields[] =
188{
189 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.Key),
190 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pLeft),
191 SSMFIELD_ENTRY_IGN_HCPTR( CSAMPAGEREC, Core.pRight),
192 SSMFIELD_ENTRY_IGNORE( CSAMPAGEREC, Core.uchHeight),
193 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
194 SSMFIELD_ENTRY_RCPTR( CSAMPAGEREC, page.pPageGC),
195 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
196 SSMFIELD_ENTRY_PAD_MSC32_AUTO( 4),
197 SSMFIELD_ENTRY_GCPHYS( CSAMPAGEREC, page.GCPhys),
198 SSMFIELD_ENTRY( CSAMPAGEREC, page.fFlags),
199 SSMFIELD_ENTRY( CSAMPAGEREC, page.uSize),
200 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
201 SSMFIELD_ENTRY_HCPTR_NI( CSAMPAGEREC, page.pBitmap),
202 SSMFIELD_ENTRY( CSAMPAGEREC, page.fCode32),
203 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorActive),
204 SSMFIELD_ENTRY( CSAMPAGEREC, page.fMonitorInvalidation),
205 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 1),
206 SSMFIELD_ENTRY( CSAMPAGEREC, page.enmTag),
207 SSMFIELD_ENTRY( CSAMPAGEREC, page.u64Hash),
208 SSMFIELD_ENTRY_TERM()
209};
210
211
212/**
213 * Initializes the CSAM.
214 *
215 * @returns VBox status code.
216 * @param pVM The VM to operate on.
217 */
218VMMR3DECL(int) CSAMR3Init(PVM pVM)
219{
220 int rc;
221
222 LogFlow(("CSAMR3Init\n"));
223
224 /* Allocate bitmap for the page directory. */
225 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC);
226 AssertRCReturn(rc, rc);
227 rc = MMR3HyperAllocOnceNoRel(pVM, CSAM_PGDIRBMP_CHUNKS*sizeof(RTRCPTR), 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDGCBitmapHC);
228 AssertRCReturn(rc, rc);
229 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
230 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
231
232 rc = csamReinit(pVM);
233 AssertRCReturn(rc, rc);
234
235 /*
236 * Register save and load state notificators.
237 */
238 rc = SSMR3RegisterInternal(pVM, "CSAM", 0, CSAM_SSM_VERSION, sizeof(pVM->csam.s) + PAGE_SIZE*16,
239 NULL, NULL, NULL,
240 NULL, csamr3Save, NULL,
241 NULL, csamr3Load, NULL);
242 AssertRCReturn(rc, rc);
243
244 STAM_REG(pVM, &pVM->csam.s.StatNrTraps, STAMTYPE_COUNTER, "/CSAM/PageTraps", STAMUNIT_OCCURENCES, "The number of CSAM page traps.");
245 STAM_REG(pVM, &pVM->csam.s.StatDangerousWrite, STAMTYPE_COUNTER, "/CSAM/DangerousWrites", STAMUNIT_OCCURENCES, "The number of dangerous writes that cause a context switch.");
246
247 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPHC, STAMTYPE_COUNTER, "/CSAM/HC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
248 STAM_REG(pVM, &pVM->csam.s.StatNrPageNPGC, STAMTYPE_COUNTER, "/CSAM/GC/PageNotPresent", STAMUNIT_OCCURENCES, "The number of CSAM pages marked not present.");
249 STAM_REG(pVM, &pVM->csam.s.StatNrPages, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRW", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW monitoring).");
250 STAM_REG(pVM, &pVM->csam.s.StatNrPagesInv, STAMTYPE_COUNTER, "/CSAM/PageRec/AddedRWI", STAMUNIT_OCCURENCES, "The number of CSAM page records (RW & invalidation monitoring).");
251 STAM_REG(pVM, &pVM->csam.s.StatNrRemovedPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Removed", STAMUNIT_OCCURENCES, "The number of removed CSAM page records.");
252 STAM_REG(pVM, &pVM->csam.s.StatPageRemoveREMFlush,STAMTYPE_COUNTER, "/CSAM/PageRec/Removed/REMFlush", STAMUNIT_OCCURENCES, "The number of removed CSAM page records that caused a REM flush.");
253
254 STAM_REG(pVM, &pVM->csam.s.StatNrPatchPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Patch", STAMUNIT_OCCURENCES, "The number of CSAM patch page records.");
255 STAM_REG(pVM, &pVM->csam.s.StatNrUserPages, STAMTYPE_COUNTER, "/CSAM/PageRec/Ignore/User", STAMUNIT_OCCURENCES, "The number of CSAM user page records (ignored).");
256 STAM_REG(pVM, &pVM->csam.s.StatPagePATM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/PATM", STAMUNIT_OCCURENCES, "The number of PATM page records.");
257 STAM_REG(pVM, &pVM->csam.s.StatPageCSAM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/CSAM", STAMUNIT_OCCURENCES, "The number of CSAM page records.");
258 STAM_REG(pVM, &pVM->csam.s.StatPageREM, STAMTYPE_COUNTER, "/CSAM/PageRec/Type/REM", STAMUNIT_OCCURENCES, "The number of REM page records.");
259 STAM_REG(pVM, &pVM->csam.s.StatPageMonitor, STAMTYPE_COUNTER, "/CSAM/PageRec/Monitored", STAMUNIT_OCCURENCES, "The number of monitored pages.");
260
261 STAM_REG(pVM, &pVM->csam.s.StatCodePageModified, STAMTYPE_COUNTER, "/CSAM/Monitor/DirtyPage", STAMUNIT_OCCURENCES, "The number of code page modifications.");
262
263 STAM_REG(pVM, &pVM->csam.s.StatNrFlushes, STAMTYPE_COUNTER, "/CSAM/PageFlushes", STAMUNIT_OCCURENCES, "The number of CSAM page flushes.");
264 STAM_REG(pVM, &pVM->csam.s.StatNrFlushesSkipped, STAMTYPE_COUNTER, "/CSAM/PageFlushesSkipped", STAMUNIT_OCCURENCES, "The number of CSAM page flushes that were skipped.");
265 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesHC, STAMTYPE_COUNTER, "/CSAM/HC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
266 STAM_REG(pVM, &pVM->csam.s.StatNrKnownPagesGC, STAMTYPE_COUNTER, "/CSAM/GC/KnownPageRecords", STAMUNIT_OCCURENCES, "The number of known CSAM page records.");
267 STAM_REG(pVM, &pVM->csam.s.StatNrInstr, STAMTYPE_COUNTER, "/CSAM/ScannedInstr", STAMUNIT_OCCURENCES, "The number of scanned instructions.");
268 STAM_REG(pVM, &pVM->csam.s.StatNrBytesRead, STAMTYPE_COUNTER, "/CSAM/BytesRead", STAMUNIT_OCCURENCES, "The number of bytes read for scanning.");
269 STAM_REG(pVM, &pVM->csam.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/CSAM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
270
271 STAM_REG(pVM, &pVM->csam.s.StatBitmapAlloc, STAMTYPE_COUNTER, "/CSAM/Alloc/PageBitmap", STAMUNIT_OCCURENCES, "The number of page bitmap allocations.");
272
273 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheHit, STAMTYPE_COUNTER, "/CSAM/Cache/Hit", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache hits.");
274 STAM_REG(pVM, &pVM->csam.s.StatInstrCacheMiss, STAMTYPE_COUNTER, "/CSAM/Cache/Miss", STAMUNIT_OCCURENCES, "The number of dangerous instruction cache misses.");
275
276 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunction, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Success", STAMUNIT_OCCURENCES, "The number of found functions beyond the ret border.");
277 STAM_REG(pVM, &pVM->csam.s.StatScanNextFunctionFailed, STAMTYPE_COUNTER, "/CSAM/Function/Scan/Failed", STAMUNIT_OCCURENCES, "The number of refused functions beyond the ret border.");
278
279 STAM_REG(pVM, &pVM->csam.s.StatTime, STAMTYPE_PROFILE, "/PROF/CSAM/Scan", STAMUNIT_TICKS_PER_CALL, "Scanning overhead.");
280 STAM_REG(pVM, &pVM->csam.s.StatTimeCheckAddr, STAMTYPE_PROFILE, "/PROF/CSAM/CheckAddr", STAMUNIT_TICKS_PER_CALL, "Address check overhead.");
281 STAM_REG(pVM, &pVM->csam.s.StatTimeAddrConv, STAMTYPE_PROFILE, "/PROF/CSAM/AddrConv", STAMUNIT_TICKS_PER_CALL, "Address conversion overhead.");
282 STAM_REG(pVM, &pVM->csam.s.StatTimeFlushPage, STAMTYPE_PROFILE, "/PROF/CSAM/FlushPage", STAMUNIT_TICKS_PER_CALL, "Page flushing overhead.");
283 STAM_REG(pVM, &pVM->csam.s.StatTimeDisasm, STAMTYPE_PROFILE, "/PROF/CSAM/Disasm", STAMUNIT_TICKS_PER_CALL, "Disassembly overhead.");
284 STAM_REG(pVM, &pVM->csam.s.StatFlushDirtyPages, STAMTYPE_PROFILE, "/PROF/CSAM/FlushDirtyPage", STAMUNIT_TICKS_PER_CALL, "Dirty page flushing overhead.");
285 STAM_REG(pVM, &pVM->csam.s.StatCheckGates, STAMTYPE_PROFILE, "/PROF/CSAM/CheckGates", STAMUNIT_TICKS_PER_CALL, "CSAMR3CheckGates overhead.");
286
287 /*
288 * Check CFGM option and enable/disable CSAM.
289 */
290 bool fEnabled;
291 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "CSAMEnabled", &fEnabled);
292 if (RT_FAILURE(rc))
293#ifdef CSAM_ENABLE
294 fEnabled = true;
295#else
296 fEnabled = false;
297#endif
298 if (fEnabled)
299 CSAMEnableScanning(pVM);
300
301#ifdef VBOX_WITH_DEBUGGER
302 /*
303 * Debugger commands.
304 */
305 static bool fRegisteredCmds = false;
306 if (!fRegisteredCmds)
307 {
308 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
309 if (RT_SUCCESS(rc))
310 fRegisteredCmds = true;
311 }
312#endif
313
314 return VINF_SUCCESS;
315}
316
317/**
318 * (Re)initializes CSAM
319 *
320 * @param pVM The VM.
321 */
322static int csamReinit(PVM pVM)
323{
324 /*
325 * Assert alignment and sizes.
326 */
327 AssertRelease(!(RT_OFFSETOF(VM, csam.s) & 31));
328 AssertRelease(sizeof(pVM->csam.s) <= sizeof(pVM->csam.padding));
329
330 /*
331 * Setup any fixed pointers and offsets.
332 */
333 pVM->csam.s.offVM = RT_OFFSETOF(VM, patm);
334
335 pVM->csam.s.fGatesChecked = false;
336 pVM->csam.s.fScanningStarted = false;
337
338 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies 1 VPCU */
339 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
340 pVM->csam.s.cDirtyPages = 0;
341 /* not necessary */
342 memset(pVM->csam.s.pvDirtyBasePage, 0, sizeof(pVM->csam.s.pvDirtyBasePage));
343 memset(pVM->csam.s.pvDirtyFaultPage, 0, sizeof(pVM->csam.s.pvDirtyFaultPage));
344
345 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
346 pVM->csam.s.cDangerousInstr = 0;
347 pVM->csam.s.iDangerousInstr = 0;
348
349 memset(pVM->csam.s.pvCallInstruction, 0, sizeof(pVM->csam.s.pvCallInstruction));
350 pVM->csam.s.iCallInstruction = 0;
351
352 /** @note never mess with the pgdir bitmap here! */
353 return VINF_SUCCESS;
354}
355
356/**
357 * Applies relocations to data and code managed by this
358 * component. This function will be called at init and
359 * whenever the VMM need to relocate itself inside the GC.
360 *
361 * The csam will update the addresses used by the switcher.
362 *
363 * @param pVM The VM.
364 * @param offDelta Relocation delta.
365 */
366VMMR3DECL(void) CSAMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
367{
368 if (offDelta)
369 {
370 /* Adjust pgdir and page bitmap pointers. */
371 pVM->csam.s.pPDBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDGCBitmapHC);
372 pVM->csam.s.pPDHCBitmapGC = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC);
373
374 for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
375 {
376 if (pVM->csam.s.pPDGCBitmapHC[i])
377 {
378 pVM->csam.s.pPDGCBitmapHC[i] += offDelta;
379 }
380 }
381 }
382 return;
383}
384
385/**
386 * Terminates the csam.
387 *
388 * Termination means cleaning up and freeing all resources,
389 * the VM it self is at this point powered off or suspended.
390 *
391 * @returns VBox status code.
392 * @param pVM The VM to operate on.
393 */
394VMMR3DECL(int) CSAMR3Term(PVM pVM)
395{
396 int rc;
397
398 rc = CSAMR3Reset(pVM);
399 AssertRC(rc);
400
401 /* @todo triggers assertion in MMHyperFree */
402#if 0
403 for(int i=0;i<CSAM_PAGEBMP_CHUNKS;i++)
404 {
405 if (pVM->csam.s.pPDBitmapHC[i])
406 MMHyperFree(pVM, pVM->csam.s.pPDBitmapHC[i]);
407 }
408#endif
409
410 return VINF_SUCCESS;
411}
412
413/**
414 * CSAM reset callback.
415 *
416 * @returns VBox status code.
417 * @param pVM The VM which is reset.
418 */
419VMMR3DECL(int) CSAMR3Reset(PVM pVM)
420{
421 /* Clear page bitmaps. */
422 for(int i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
423 {
424 if (pVM->csam.s.pPDBitmapHC[i])
425 {
426 Assert((CSAM_PAGE_BITMAP_SIZE& 3) == 0);
427 ASMMemZero32(pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
428 }
429 }
430
431 /* Remove all CSAM page records. */
432 while(true)
433 {
434 PCSAMPAGEREC pPageRec = (PCSAMPAGEREC)RTAvlPVGetBestFit(&pVM->csam.s.pPageTree, 0, true);
435 if (pPageRec)
436 {
437 csamRemovePageRecord(pVM, pPageRec->page.pPageGC);
438 }
439 else
440 break;
441 }
442 Assert(!pVM->csam.s.pPageTree);
443
444 csamReinit(pVM);
445
446 return VINF_SUCCESS;
447}
448
449
450/**
451 * Callback function for RTAvlPVDoWithAll
452 *
453 * Counts the number of records in the tree
454 *
455 * @returns VBox status code.
456 * @param pNode Current node
457 * @param pcPatches Pointer to patch counter
458 */
459static DECLCALLBACK(int) CountRecord(PAVLPVNODECORE pNode, void *pcPatches)
460{
461 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
462 return VINF_SUCCESS;
463}
464
465/**
466 * Callback function for RTAvlPVDoWithAll
467 *
468 * Saves the state of the page record
469 *
470 * @returns VBox status code.
471 * @param pNode Current node
472 * @param pVM1 VM Handle
473 */
474static DECLCALLBACK(int) SavePageState(PAVLPVNODECORE pNode, void *pVM1)
475{
476 PVM pVM = (PVM)pVM1;
477 PCSAMPAGEREC pPage = (PCSAMPAGEREC)pNode;
478 CSAMPAGEREC page = *pPage;
479 PSSMHANDLE pSSM = pVM->csam.s.savedstate.pSSM;
480 int rc;
481
482 /* Save the page record itself */
483 rc = SSMR3PutMem(pSSM, &page, sizeof(page));
484 AssertRCReturn(rc, rc);
485
486 if (page.page.pBitmap)
487 {
488 rc = SSMR3PutMem(pSSM, page.page.pBitmap, CSAM_PAGE_BITMAP_SIZE);
489 AssertRCReturn(rc, rc);
490 }
491
492 return VINF_SUCCESS;
493}
494
495/**
496 * Execute state save operation.
497 *
498 * @returns VBox status code.
499 * @param pVM VM Handle.
500 * @param pSSM SSM operation handle.
501 */
502static DECLCALLBACK(int) csamr3Save(PVM pVM, PSSMHANDLE pSSM)
503{
504 CSAM csamInfo = pVM->csam.s;
505 int rc;
506
507 /*
508 * Count the number of page records in the tree (feeling lazy)
509 */
510 csamInfo.savedstate.cPageRecords = 0;
511 RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, CountRecord, &csamInfo.savedstate.cPageRecords);
512
513 /*
514 * Save CSAM structure
515 */
516 pVM->csam.s.savedstate.pSSM = pSSM;
517 rc = SSMR3PutMem(pSSM, &csamInfo, sizeof(csamInfo));
518 AssertRCReturn(rc, rc);
519
520 /* Save pgdir bitmap */
521 rc = SSMR3PutMem(pSSM, csamInfo.pPDBitmapHC, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR));
522 AssertRCReturn(rc, rc);
523
524 for (unsigned i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
525 {
526 if(csamInfo.pPDBitmapHC[i])
527 {
528 /* Save the page bitmap. */
529 rc = SSMR3PutMem(pSSM, csamInfo.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
530 AssertRCReturn(rc, rc);
531 }
532 }
533
534 /*
535 * Save page records
536 */
537 rc = RTAvlPVDoWithAll(&pVM->csam.s.pPageTree, true, SavePageState, pVM);
538 AssertRCReturn(rc, rc);
539
540 /** @note we don't restore aDangerousInstr; it will be recreated automatically. */
541 return VINF_SUCCESS;
542}
543
544/**
545 * Execute state load operation.
546 *
547 * @returns VBox status code.
548 * @param pVM VM Handle.
549 * @param pSSM SSM operation handle.
550 * @param uVersion Data layout version.
551 * @param uPass The data pass.
552 */
553static DECLCALLBACK(int) csamr3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
554{
555 int rc;
556 CSAM csamInfo;
557
558 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
559 if (uVersion != CSAM_SSM_VERSION)
560 {
561 AssertMsgFailed(("csamR3Load: Invalid version uVersion=%d!\n", uVersion));
562 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
563 }
564
565 pVM->csam.s.savedstate.pSSM = pSSM;
566
567 /*
568 * Restore CSAM structure
569 */
570#if 0
571 rc = SSMR3GetMem(pSSM, &csamInfo, sizeof(csamInfo));
572#else
573 RT_ZERO(csamInfo);
574 rc = SSMR3GetStructEx(pSSM, &csamInfo, sizeof(csamInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aCsamFields[0], NULL);
575#endif
576 AssertRCReturn(rc, rc);
577
578 pVM->csam.s.fGatesChecked = csamInfo.fGatesChecked;
579 pVM->csam.s.fScanningStarted = csamInfo.fScanningStarted;
580
581 /* Restore dirty code page info. */
582 pVM->csam.s.cDirtyPages = csamInfo.cDirtyPages;
583 memcpy(pVM->csam.s.pvDirtyBasePage, csamInfo.pvDirtyBasePage, sizeof(pVM->csam.s.pvDirtyBasePage));
584 memcpy(pVM->csam.s.pvDirtyFaultPage, csamInfo.pvDirtyFaultPage, sizeof(pVM->csam.s.pvDirtyFaultPage));
585
586 /* Restore possible code page */
587 pVM->csam.s.cPossibleCodePages = csamInfo.cPossibleCodePages;
588 memcpy(pVM->csam.s.pvPossibleCodePage, csamInfo.pvPossibleCodePage, sizeof(pVM->csam.s.pvPossibleCodePage));
589
590 /* Restore pgdir bitmap (we'll change the pointers next). */
591#if 0
592 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC, CSAM_PGDIRBMP_CHUNKS*sizeof(RTHCPTR));
593#else
594 rc = SSMR3GetStructEx(pSSM, pVM->csam.s.pPDBitmapHC, sizeof(uint8_t *) * CSAM_PGDIRBMP_CHUNKS,
595 SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aCsamPDBitmapArray[0], NULL);
596#endif
597 AssertRCReturn(rc, rc);
598
599 /*
600 * Restore page bitmaps
601 */
602 for (unsigned i=0;i<CSAM_PGDIRBMP_CHUNKS;i++)
603 {
604 if(pVM->csam.s.pPDBitmapHC[i])
605 {
606 rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.pPDBitmapHC[i]);
607 if (RT_FAILURE(rc))
608 {
609 Log(("MMHyperAlloc failed with %Rrc\n", rc));
610 return rc;
611 }
612 /* Convert to GC pointer. */
613 pVM->csam.s.pPDGCBitmapHC[i] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[i]);
614 Assert(pVM->csam.s.pPDGCBitmapHC[i]);
615
616 /* Restore the bitmap. */
617 rc = SSMR3GetMem(pSSM, pVM->csam.s.pPDBitmapHC[i], CSAM_PAGE_BITMAP_SIZE);
618 AssertRCReturn(rc, rc);
619 }
620 else
621 {
622 Assert(!pVM->csam.s.pPDGCBitmapHC[i]);
623 pVM->csam.s.pPDGCBitmapHC[i] = 0;
624 }
625 }
626
627 /*
628 * Restore page records
629 */
630 for (uint32_t i=0;i<csamInfo.savedstate.cPageRecords + csamInfo.savedstate.cPatchPageRecords;i++)
631 {
632 CSAMPAGEREC page;
633 PCSAMPAGE pPage;
634
635#if 0
636 rc = SSMR3GetMem(pSSM, &page, sizeof(page));
637#else
638 RT_ZERO(page);
639 rc = SSMR3GetStructEx(pSSM, &page, sizeof(page), SSMSTRUCT_FLAGS_MEM_BAND_AID, &g_aCsamPageRecFields[0], NULL);
640#endif
641 AssertRCReturn(rc, rc);
642
643 /*
644 * Recreate the page record
645 */
646 pPage = csamCreatePageRecord(pVM, page.page.pPageGC, page.page.enmTag, page.page.fCode32, page.page.fMonitorInvalidation);
647 AssertReturn(pPage, VERR_NO_MEMORY);
648
649 pPage->GCPhys = page.page.GCPhys;
650 pPage->fFlags = page.page.fFlags;
651 pPage->u64Hash = page.page.u64Hash;
652
653 if (page.page.pBitmap)
654 {
655 rc = SSMR3GetMem(pSSM, pPage->pBitmap, CSAM_PAGE_BITMAP_SIZE);
656 AssertRCReturn(rc, rc);
657 }
658 else
659 {
660 MMR3HeapFree(pPage->pBitmap);
661 pPage->pBitmap = 0;
662 }
663 }
664
665 /* Note: we don't restore aDangerousInstr; it will be recreated automatically. */
666 memset(&pVM->csam.s.aDangerousInstr, 0, sizeof(pVM->csam.s.aDangerousInstr));
667 pVM->csam.s.cDangerousInstr = 0;
668 pVM->csam.s.iDangerousInstr = 0;
669 return VINF_SUCCESS;
670}
671
672/**
673 * Convert guest context address to host context pointer
674 *
675 * @returns VBox status code.
676 * @param pVM The VM to operate on.
677 * @param pCacheRec Address conversion cache record
678 * @param pGCPtr Guest context pointer
679 *
680 * @returns Host context pointer or NULL in case of an error
681 *
682 */
683static R3PTRTYPE(void *) CSAMGCVirtToHCVirt(PVM pVM, PCSAMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
684{
685 int rc;
686 R3PTRTYPE(void *) pHCPtr;
687 Assert(pVM->cCpus == 1);
688 PVMCPU pVCpu = VMMGetCpu0(pVM);
689
690 STAM_PROFILE_START(&pVM->csam.s.StatTimeAddrConv, a);
691
692 pHCPtr = PATMR3GCPtrToHCPtr(pVM, pGCPtr);
693 if (pHCPtr) return pHCPtr;
694
695 if (pCacheRec->pPageLocStartHC)
696 {
697 uint32_t offset = pGCPtr & PAGE_OFFSET_MASK;
698 if (pCacheRec->pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
699 {
700 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
701 return pCacheRec->pPageLocStartHC + offset;
702 }
703 }
704
705 rc = PGMPhysGCPtr2R3Ptr(pVCpu, pGCPtr, &pHCPtr);
706 if (rc != VINF_SUCCESS)
707 {
708//// AssertMsgRC(rc, ("MMR3PhysGCVirt2HCVirtEx failed for %RRv\n", pGCPtr));
709 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
710 return NULL;
711 }
712
713 pCacheRec->pPageLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
714 pCacheRec->pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
715 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeAddrConv, a);
716 return pHCPtr;
717}
718
719/**
720 * Read callback for disassembly function; supports reading bytes that cross a page boundary
721 *
722 * @returns VBox status code.
723 * @param pSrc GC source pointer
724 * @param pDest HC destination pointer
725 * @param size Number of bytes to read
726 * @param dwUserdata Callback specific user data (pCpu)
727 *
728 */
729static DECLCALLBACK(int) CSAMR3ReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
730{
731 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
732 PVM pVM = (PVM)pCpu->apvUserData[0];
733 RTHCUINTPTR pInstrHC = (RTHCUINTPTR)pCpu->apvUserData[1];
734 RTGCUINTPTR32 pInstrGC = (uintptr_t)pCpu->apvUserData[2];
735 int orgsize = size;
736 Assert(pVM->cCpus == 1);
737 PVMCPU pVCpu = VMMGetCpu0(pVM);
738
739 /* We are not interested in patched instructions, so read the original opcode bytes. */
740 /** @note single instruction patches (int3) are checked in CSAMR3AnalyseCallback */
741 for (int i=0;i<orgsize;i++)
742 {
743 int rc = PATMR3QueryOpcode(pVM, (RTRCPTR)pSrc, pDest);
744 if (RT_SUCCESS(rc))
745 {
746 pSrc++;
747 pDest++;
748 size--;
749 }
750 else
751 break;
752 }
753 if (size == 0)
754 return VINF_SUCCESS;
755
756 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pVM, pSrc))
757 {
758 return PGMPhysSimpleReadGCPtr(pVCpu, pDest, pSrc, size);
759 }
760 else
761 {
762 Assert(pInstrHC);
763
764 /* pInstrHC is the base address; adjust according to the GC pointer. */
765 pInstrHC = pInstrHC + (pSrc - pInstrGC);
766
767 memcpy(pDest, (void *)pInstrHC, size);
768 }
769
770 return VINF_SUCCESS;
771}
772
773inline int CSAMR3DISInstr(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR InstrGC, uint8_t *InstrHC, uint32_t *pOpsize, char *pszOutput)
774{
775 (pCpu)->pfnReadBytes = CSAMR3ReadBytes;
776 (pCpu)->apvUserData[0] = pVM;
777 (pCpu)->apvUserData[1] = InstrHC;
778 (pCpu)->apvUserData[2] = (void *)InstrGC; Assert(sizeof(InstrGC) <= sizeof(pCpu->apvUserData[0]));
779#ifdef DEBUG
780 return DISInstrEx(pCpu, InstrGC, 0, pOpsize, pszOutput, OPTYPE_ALL);
781#else
782 /* We are interested in everything except harmless stuff */
783 return DISInstrEx(pCpu, InstrGC, 0, pOpsize, pszOutput, ~(OPTYPE_INVALID | OPTYPE_HARMLESS | OPTYPE_RRM_MASK));
784#endif
785}
786
787/**
788 * Analyses the instructions following the cli for compliance with our heuristics for cli
789 *
790 * @returns VBox status code.
791 * @param pVM The VM to operate on.
792 * @param pCpu CPU disassembly state
793 * @param pInstrGC Guest context pointer to privileged instruction
794 * @param pCurInstrGC Guest context pointer to the current instruction
795 * @param pCacheRec GC to HC cache record
796 * @param pUserData User pointer (callback specific)
797 *
798 */
799static int CSAMR3AnalyseCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC,
800 PCSAMP2GLOOKUPREC pCacheRec, void *pUserData)
801{
802 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
803 int rc;
804
805 switch(pCpu->pCurInstr->opcode)
806 {
807 case OP_INT:
808 Assert(pCpu->param1.flags & USE_IMMEDIATE8);
809 if (pCpu->param1.parval == 3)
810 {
811 //two byte int 3
812 return VINF_SUCCESS;
813 }
814 break;
815
816 case OP_ILLUD2:
817 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */
818 case OP_RETN:
819 case OP_INT3:
820 case OP_INVALID:
821#if 1
822 /* removing breaks win2k guests? */
823 case OP_IRET:
824#endif
825 return VINF_SUCCESS;
826 }
827
828 // Check for exit points
829 switch (pCpu->pCurInstr->opcode)
830 {
831 /* It's not a good idea to patch pushf instructions:
832 * - increases the chance of conflicts (code jumping to the next instruction)
833 * - better to patch the cli
834 * - code that branches before the cli will likely hit an int 3
835 * - in general doesn't offer any benefits as we don't allow nested patch blocks (IF is always 1)
836 */
837 case OP_PUSHF:
838 case OP_POPF:
839 break;
840
841 case OP_CLI:
842 {
843 uint32_t cbInstr = 0;
844 uint32_t opsize = pCpu->opsize;
845 bool fCode32 = pPage->fCode32;
846
847 Assert(fCode32);
848
849 PATMR3AddHint(pVM, pCurInstrGC, (fCode32) ? PATMFL_CODE32 : 0);
850
851 /* Make sure the instructions that follow the cli have not been encountered before. */
852 while (true)
853 {
854 DISCPUSTATE cpu;
855 uint8_t *pCurInstrHC = 0;
856
857 if (cbInstr + opsize >= SIZEOF_NEARJUMP32)
858 break;
859
860 if (csamIsCodeScanned(pVM, pCurInstrGC + opsize, &pPage) == true)
861 {
862 /* We've scanned the next instruction(s) already. This means we've followed a branch that ended up there before -> dangerous!! */
863 PATMR3DetectConflict(pVM, pCurInstrGC, pCurInstrGC + opsize);
864 break;
865 }
866 pCurInstrGC += opsize;
867 cbInstr += opsize;
868
869 pCurInstrHC = (uint8_t *)CSAMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
870 if (pCurInstrHC == NULL)
871 {
872 Log(("CSAMGCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
873 break;
874 }
875 Assert(VALID_PTR(pCurInstrHC));
876
877 cpu.mode = (fCode32) ? CPUMODE_32BIT : CPUMODE_16BIT;
878 rc = CSAMR3DISInstr(pVM, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
879 Assert(RT_SUCCESS(rc));
880 if (RT_FAILURE(rc))
881 break;
882 }
883 break;
884 }
885
886 case OP_PUSH:
887 if (pCpu->pCurInstr->param1 != OP_PARM_REG_CS)
888 break;
889
890 /* no break */
891 case OP_STR:
892 case OP_LSL:
893 case OP_LAR:
894 case OP_SGDT:
895 case OP_SLDT:
896 case OP_SIDT:
897 case OP_SMSW:
898 case OP_VERW:
899 case OP_VERR:
900 case OP_CPUID:
901 case OP_IRET:
902#ifdef DEBUG
903 switch(pCpu->pCurInstr->opcode)
904 {
905 case OP_STR:
906 Log(("Privileged instruction at %RRv: str!!\n", pCurInstrGC));
907 break;
908 case OP_LSL:
909 Log(("Privileged instruction at %RRv: lsl!!\n", pCurInstrGC));
910 break;
911 case OP_LAR:
912 Log(("Privileged instruction at %RRv: lar!!\n", pCurInstrGC));
913 break;
914 case OP_SGDT:
915 Log(("Privileged instruction at %RRv: sgdt!!\n", pCurInstrGC));
916 break;
917 case OP_SLDT:
918 Log(("Privileged instruction at %RRv: sldt!!\n", pCurInstrGC));
919 break;
920 case OP_SIDT:
921 Log(("Privileged instruction at %RRv: sidt!!\n", pCurInstrGC));
922 break;
923 case OP_SMSW:
924 Log(("Privileged instruction at %RRv: smsw!!\n", pCurInstrGC));
925 break;
926 case OP_VERW:
927 Log(("Privileged instruction at %RRv: verw!!\n", pCurInstrGC));
928 break;
929 case OP_VERR:
930 Log(("Privileged instruction at %RRv: verr!!\n", pCurInstrGC));
931 break;
932 case OP_CPUID:
933 Log(("Privileged instruction at %RRv: cpuid!!\n", pCurInstrGC));
934 break;
935 case OP_PUSH:
936 Log(("Privileged instruction at %RRv: push cs!!\n", pCurInstrGC));
937 break;
938 case OP_IRET:
939 Log(("Privileged instruction at %RRv: iret!!\n", pCurInstrGC));
940 break;
941 }
942#endif
943
944 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false)
945 {
946 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0);
947 if (RT_FAILURE(rc))
948 {
949 Log(("PATMR3InstallPatch failed with %d\n", rc));
950 return VWRN_CONTINUE_ANALYSIS;
951 }
952 }
953 if (pCpu->pCurInstr->opcode == OP_IRET)
954 return VINF_SUCCESS; /* Look no further in this branch. */
955
956 return VWRN_CONTINUE_ANALYSIS;
957
958 case OP_JMP:
959 case OP_CALL:
960 {
961 // return or jump/call through a jump table
962 if (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J)
963 {
964#ifdef DEBUG
965 switch(pCpu->pCurInstr->opcode)
966 {
967 case OP_JMP:
968 Log(("Control Flow instruction at %RRv: jmp!!\n", pCurInstrGC));
969 break;
970 case OP_CALL:
971 Log(("Control Flow instruction at %RRv: call!!\n", pCurInstrGC));
972 break;
973 }
974#endif
975 return VWRN_CONTINUE_ANALYSIS;
976 }
977 return VWRN_CONTINUE_ANALYSIS;
978 }
979
980 }
981
982 return VWRN_CONTINUE_ANALYSIS;
983}
984
985#ifdef CSAM_ANALYSE_BEYOND_RET
986/**
987 * Wrapper for csamAnalyseCodeStream for call instructions.
988 *
989 * @returns VBox status code.
990 * @param pVM The VM to operate on.
991 * @param pInstrGC Guest context pointer to privileged instruction
992 * @param pCurInstrGC Guest context pointer to the current instruction
993 * @param fCode32 16 or 32 bits code
994 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
995 * @param pUserData User pointer (callback specific)
996 *
997 */
998static int csamAnalyseCallCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
999 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1000{
1001 int rc;
1002 CSAMCALLEXITREC CallExitRec;
1003 PCSAMCALLEXITREC pOldCallRec;
1004 PCSAMPAGE pPage = 0;
1005 uint32_t i;
1006
1007 CallExitRec.cInstrAfterRet = 0;
1008
1009 pOldCallRec = pCacheRec->pCallExitRec;
1010 pCacheRec->pCallExitRec = &CallExitRec;
1011
1012 rc = csamAnalyseCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1013
1014 for (i=0;i<CallExitRec.cInstrAfterRet;i++)
1015 {
1016 PCSAMPAGE pPage = 0;
1017
1018 pCurInstrGC = CallExitRec.pInstrAfterRetGC[i];
1019
1020 /* Check if we've previously encountered the instruction after the ret. */
1021 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1022 {
1023 DISCPUSTATE cpu;
1024 uint32_t opsize;
1025 uint8_t *pCurInstrHC = 0;
1026 int rc2;
1027#ifdef DEBUG
1028 char szOutput[256];
1029#endif
1030 if (pPage == NULL)
1031 {
1032 /* New address; let's take a look at it. */
1033 pPage = csamCreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1034 if (pPage == NULL)
1035 {
1036 rc = VERR_NO_MEMORY;
1037 goto done;
1038 }
1039 }
1040
1041 /**
1042 * Some generic requirements for recognizing an adjacent function:
1043 * - alignment fillers that consist of:
1044 * - nop
1045 * - lea genregX, [genregX (+ 0)]
1046 * - push ebp after the filler (can extend this later); aligned at at least a 4 byte boundary
1047 */
1048 for (int j=0;j<16;j++)
1049 {
1050 pCurInstrHC = (uint8_t *)CSAMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1051 if (pCurInstrHC == NULL)
1052 {
1053 Log(("CSAMGCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1054 goto done;
1055 }
1056 Assert(VALID_PTR(pCurInstrHC));
1057
1058 cpu.mode = (fCode32) ? CPUMODE_32BIT : CPUMODE_16BIT;
1059 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1060#ifdef DEBUG
1061 rc2 = CSAMR3DISInstr(pVM, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
1062 if (RT_SUCCESS(rc2)) Log(("CSAM Call Analysis: %s", szOutput));
1063#else
1064 rc2 = CSAMR3DISInstr(pVM, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
1065#endif
1066 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1067 if (RT_FAILURE(rc2))
1068 {
1069 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1070 goto done;
1071 }
1072
1073 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, opsize);
1074
1075 RCPTRTYPE(uint8_t *) addr = 0;
1076 PCSAMPAGE pJmpPage = NULL;
1077
1078 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + opsize - 1))
1079 {
1080 if (!PGMGstIsPagePresent(pVM, pCurInstrGC + opsize - 1))
1081 {
1082 /// @todo fault in the page
1083 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1084 goto done;
1085 }
1086 //all is fine, let's continue
1087 csamR3CheckPageRecord(pVM, pCurInstrGC + opsize - 1);
1088 }
1089
1090 switch (cpu.pCurInstr->opcode)
1091 {
1092 case OP_NOP:
1093 case OP_INT3:
1094 break; /* acceptable */
1095
1096 case OP_LEA:
1097 /* Must be similar to:
1098 *
1099 * lea esi, [esi]
1100 * lea esi, [esi+0]
1101 * Any register is allowed as long as source and destination are identical.
1102 */
1103 if ( cpu.param1.flags != USE_REG_GEN32
1104 || ( cpu.param2.flags != USE_REG_GEN32
1105 && ( !(cpu.param2.flags & USE_REG_GEN32)
1106 || !(cpu.param2.flags & (USE_DISPLACEMENT8|USE_DISPLACEMENT16|USE_DISPLACEMENT32))
1107 || cpu.param2.parval != 0
1108 )
1109 )
1110 || cpu.param1.base.reg_gen32 != cpu.param2.base.reg_gen32
1111 )
1112 {
1113 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1114 goto next_function;
1115 }
1116 break;
1117
1118 case OP_PUSH:
1119 {
1120 if ( (pCurInstrGC & 0x3) != 0
1121 || cpu.param1.flags != USE_REG_GEN32
1122 || cpu.param1.base.reg_gen32 != USE_REG_EBP
1123 )
1124 {
1125 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1126 goto next_function;
1127 }
1128
1129 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1130 {
1131 CSAMCALLEXITREC CallExitRec2;
1132 CallExitRec2.cInstrAfterRet = 0;
1133
1134 pCacheRec->pCallExitRec = &CallExitRec2;
1135
1136 /* Analyse the function. */
1137 Log(("Found new function at %RRv\n", pCurInstrGC));
1138 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1139 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1140 }
1141 goto next_function;
1142 }
1143
1144 case OP_SUB:
1145 {
1146 if ( (pCurInstrGC & 0x3) != 0
1147 || cpu.param1.flags != USE_REG_GEN32
1148 || cpu.param1.base.reg_gen32 != USE_REG_ESP
1149 )
1150 {
1151 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1152 goto next_function;
1153 }
1154
1155 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1156 {
1157 CSAMCALLEXITREC CallExitRec2;
1158 CallExitRec2.cInstrAfterRet = 0;
1159
1160 pCacheRec->pCallExitRec = &CallExitRec2;
1161
1162 /* Analyse the function. */
1163 Log(("Found new function at %RRv\n", pCurInstrGC));
1164 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunction);
1165 csamAnalyseCallCodeStream(pVM, pInstrGC, pCurInstrGC, fCode32, pfnCSAMR3Analyse, pUserData, pCacheRec);
1166 }
1167 goto next_function;
1168 }
1169
1170 default:
1171 STAM_COUNTER_INC(&pVM->csam.s.StatScanNextFunctionFailed);
1172 goto next_function;
1173 }
1174 /* Mark it as scanned. */
1175 csamMarkCode(pVM, pPage, pCurInstrGC, opsize, true);
1176 pCurInstrGC += opsize;
1177 } /* for at most 16 instructions */
1178next_function:
1179 ; /* MSVC complains otherwise */
1180 }
1181 }
1182done:
1183 pCacheRec->pCallExitRec = pOldCallRec;
1184 return rc;
1185}
1186#else
1187#define csamAnalyseCallCodeStream csamAnalyseCodeStream
1188#endif
1189
1190/**
1191 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
1192 *
1193 * @returns VBox status code.
1194 * @param pVM The VM to operate on.
1195 * @param pInstrGC Guest context pointer to privileged instruction
1196 * @param pCurInstrGC Guest context pointer to the current instruction
1197 * @param fCode32 16 or 32 bits code
1198 * @param pfnCSAMR3Analyse Callback for testing the disassembled instruction
1199 * @param pUserData User pointer (callback specific)
1200 *
1201 */
1202static int csamAnalyseCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, bool fCode32,
1203 PFN_CSAMR3ANALYSE pfnCSAMR3Analyse, void *pUserData, PCSAMP2GLOOKUPREC pCacheRec)
1204{
1205 DISCPUSTATE cpu;
1206 PCSAMPAGE pPage = (PCSAMPAGE)pUserData;
1207 int rc = VWRN_CONTINUE_ANALYSIS;
1208 uint32_t opsize;
1209 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
1210 int rc2;
1211 Assert(pVM->cCpus == 1);
1212 PVMCPU pVCpu = VMMGetCpu0(pVM);
1213
1214#ifdef DEBUG
1215 char szOutput[256];
1216#endif
1217
1218 LogFlow(("csamAnalyseCodeStream: code at %RRv depth=%d\n", pCurInstrGC, pCacheRec->depth));
1219
1220 pVM->csam.s.fScanningStarted = true;
1221
1222 pCacheRec->depth++;
1223 /*
1224 * Limit the call depth. (rather arbitrary upper limit; too low and we won't detect certain
1225 * cpuid instructions in Linux kernels; too high and we waste too much time scanning code)
1226 * (512 is necessary to detect cpuid instructions in Red Hat EL4; see defect 1355)
1227 * @note we are using a lot of stack here. couple of 100k when we go to the full depth (!)
1228 */
1229 if (pCacheRec->depth > 512)
1230 {
1231 LogFlow(("CSAM: maximum calldepth reached for %RRv\n", pCurInstrGC));
1232 pCacheRec->depth--;
1233 return VINF_SUCCESS; //let's not go on forever
1234 }
1235
1236 Assert(!PATMIsPatchGCAddr(pVM, pCurInstrGC));
1237 csamR3CheckPageRecord(pVM, pCurInstrGC);
1238
1239 while(rc == VWRN_CONTINUE_ANALYSIS)
1240 {
1241 if (csamIsCodeScanned(pVM, pCurInstrGC, &pPage) == false)
1242 {
1243 if (pPage == NULL)
1244 {
1245 /* New address; let's take a look at it. */
1246 pPage = csamCreatePageRecord(pVM, pCurInstrGC, CSAM_TAG_CSAM, fCode32);
1247 if (pPage == NULL)
1248 {
1249 rc = VERR_NO_MEMORY;
1250 goto done;
1251 }
1252 }
1253 }
1254 else
1255 {
1256 LogFlow(("Code at %RRv has been scanned before\n", pCurInstrGC));
1257 rc = VINF_SUCCESS;
1258 goto done;
1259 }
1260
1261 pCurInstrHC = (uint8_t *)CSAMGCVirtToHCVirt(pVM, pCacheRec, pCurInstrGC);
1262 if (pCurInstrHC == NULL)
1263 {
1264 Log(("CSAMGCVirtToHCVirt failed for %RRv\n", pCurInstrGC));
1265 rc = VERR_PATCHING_REFUSED;
1266 goto done;
1267 }
1268 Assert(VALID_PTR(pCurInstrHC));
1269
1270 cpu.mode = (fCode32) ? CPUMODE_32BIT : CPUMODE_16BIT;
1271 STAM_PROFILE_START(&pVM->csam.s.StatTimeDisasm, a);
1272#ifdef DEBUG
1273 rc2 = CSAMR3DISInstr(pVM, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
1274 if (RT_SUCCESS(rc2)) Log(("CSAM Analysis: %s", szOutput));
1275#else
1276 rc2 = CSAMR3DISInstr(pVM, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
1277#endif
1278 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeDisasm, a);
1279 if (RT_FAILURE(rc2))
1280 {
1281 Log(("Disassembly failed at %RRv with %Rrc (probably page not present) -> return to caller\n", pCurInstrGC, rc2));
1282 rc = VINF_SUCCESS;
1283 goto done;
1284 }
1285
1286 STAM_COUNTER_ADD(&pVM->csam.s.StatNrBytesRead, opsize);
1287
1288 csamMarkCode(pVM, pPage, pCurInstrGC, opsize, true);
1289
1290 RCPTRTYPE(uint8_t *) addr = 0;
1291 PCSAMPAGE pJmpPage = NULL;
1292
1293 if (PAGE_ADDRESS(pCurInstrGC) != PAGE_ADDRESS(pCurInstrGC + opsize - 1))
1294 {
1295 if (!PGMGstIsPagePresent(pVCpu, pCurInstrGC + opsize - 1))
1296 {
1297 /// @todo fault in the page
1298 Log(("Page for current instruction %RRv is not present!!\n", pCurInstrGC));
1299 rc = VWRN_CONTINUE_ANALYSIS;
1300 goto next_please;
1301 }
1302 //all is fine, let's continue
1303 csamR3CheckPageRecord(pVM, pCurInstrGC + opsize - 1);
1304 }
1305 /*
1306 * If it's harmless, then don't bother checking it (the disasm tables had better be accurate!)
1307 */
1308 if ((cpu.pCurInstr->optype & ~OPTYPE_RRM_MASK) == OPTYPE_HARMLESS)
1309 {
1310 AssertMsg(pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage) == VWRN_CONTINUE_ANALYSIS, ("Instruction incorrectly marked harmless?!?!?\n"));
1311 rc = VWRN_CONTINUE_ANALYSIS;
1312 goto next_please;
1313 }
1314
1315#ifdef CSAM_ANALYSE_BEYOND_RET
1316 /* Remember the address of the instruction following the ret in case the parent instruction was a call. */
1317 if ( pCacheRec->pCallExitRec
1318 && cpu.pCurInstr->opcode == OP_RETN
1319 && pCacheRec->pCallExitRec->cInstrAfterRet < CSAM_MAX_CALLEXIT_RET)
1320 {
1321 pCacheRec->pCallExitRec->pInstrAfterRetGC[pCacheRec->pCallExitRec->cInstrAfterRet] = pCurInstrGC + opsize;
1322 pCacheRec->pCallExitRec->cInstrAfterRet++;
1323 }
1324#endif
1325
1326 rc = pfnCSAMR3Analyse(pVM, &cpu, pInstrGC, pCurInstrGC, pCacheRec, (void *)pPage);
1327 if (rc == VINF_SUCCESS)
1328 goto done;
1329
1330 // For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction)
1331 if ( ((cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J))
1332 || (cpu.pCurInstr->opcode == OP_CALL && cpu.param1.flags == USE_DISPLACEMENT32)) /* simple indirect call (call dword ptr [address]) */
1333 {
1334 /* We need to parse 'call dword ptr [address]' type of calls to catch cpuid instructions in some recent Linux distributions (e.g. OpenSuse 10.3) */
1335 if ( cpu.pCurInstr->opcode == OP_CALL
1336 && cpu.param1.flags == USE_DISPLACEMENT32)
1337 {
1338 addr = 0;
1339 PGMPhysSimpleReadGCPtr(pVCpu, &addr, (RTRCUINTPTR)cpu.param1.disp32, sizeof(addr));
1340 }
1341 else
1342 addr = CSAMResolveBranch(&cpu, pCurInstrGC);
1343
1344 if (addr == 0)
1345 {
1346 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
1347 rc = VINF_SUCCESS;
1348 break;
1349 }
1350 Assert(!PATMIsPatchGCAddr(pVM, addr));
1351
1352 /* If the target address lies in a patch generated jump, then special action needs to be taken. */
1353 PATMR3DetectConflict(pVM, pCurInstrGC, addr);
1354
1355 /* Same page? */
1356 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pCurInstrGC ))
1357 {
1358 if (!PGMGstIsPagePresent(pVCpu, addr))
1359 {
1360 Log(("Page for current instruction %RRv is not present!!\n", addr));
1361 rc = VWRN_CONTINUE_ANALYSIS;
1362 goto next_please;
1363 }
1364
1365 /* All is fine, let's continue. */
1366 csamR3CheckPageRecord(pVM, addr);
1367 }
1368
1369 pJmpPage = NULL;
1370 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1371 {
1372 if (pJmpPage == NULL)
1373 {
1374 /* New branch target; let's take a look at it. */
1375 pJmpPage = csamCreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1376 if (pJmpPage == NULL)
1377 {
1378 rc = VERR_NO_MEMORY;
1379 goto done;
1380 }
1381 Assert(pPage);
1382 }
1383 if (cpu.pCurInstr->opcode == OP_CALL)
1384 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1385 else
1386 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1387
1388 if (rc != VINF_SUCCESS) {
1389 goto done;
1390 }
1391 }
1392 if (cpu.pCurInstr->opcode == OP_JMP)
1393 {//unconditional jump; return to caller
1394 rc = VINF_SUCCESS;
1395 goto done;
1396 }
1397
1398 rc = VWRN_CONTINUE_ANALYSIS;
1399 } //if ((cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW) && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J))
1400#ifdef CSAM_SCAN_JUMP_TABLE
1401 else
1402 if ( cpu.pCurInstr->opcode == OP_JMP
1403 && (cpu.param1.flags & (USE_DISPLACEMENT32|USE_INDEX|USE_SCALE)) == (USE_DISPLACEMENT32|USE_INDEX|USE_SCALE)
1404 )
1405 {
1406 RTRCPTR pJumpTableGC = (RTRCPTR)cpu.param1.disp32;
1407 uint8_t *pJumpTableHC;
1408 int rc2;
1409
1410 Log(("Jump through jump table\n"));
1411
1412 rc2 = PGMPhysGCPtr2R3Ptr(pVCpu, pJumpTableGC, (PRTHCPTR)&pJumpTableHC);
1413 if (rc2 == VINF_SUCCESS)
1414 {
1415 for (uint32_t i=0;i<2;i++)
1416 {
1417 uint64_t fFlags;
1418
1419 addr = pJumpTableGC + cpu.param1.scale * i;
1420 /* Same page? */
1421 if (PAGE_ADDRESS(addr) != PAGE_ADDRESS(pJumpTableGC))
1422 break;
1423
1424 addr = *(RTRCPTR *)(pJumpTableHC + cpu.param1.scale * i);
1425
1426 rc2 = PGMGstGetPage(pVCpu, addr, &fFlags, NULL);
1427 if ( rc2 != VINF_SUCCESS
1428 || (fFlags & X86_PTE_US)
1429 || !(fFlags & X86_PTE_P)
1430 )
1431 break;
1432
1433 Log(("Jump to %RRv\n", addr));
1434
1435 pJmpPage = NULL;
1436 if (csamIsCodeScanned(pVM, addr, &pJmpPage) == false)
1437 {
1438 if (pJmpPage == NULL)
1439 {
1440 /* New branch target; let's take a look at it. */
1441 pJmpPage = csamCreatePageRecord(pVM, addr, CSAM_TAG_CSAM, fCode32);
1442 if (pJmpPage == NULL)
1443 {
1444 rc = VERR_NO_MEMORY;
1445 goto done;
1446 }
1447 Assert(pPage);
1448 }
1449 rc = csamAnalyseCodeStream(pVM, pInstrGC, addr, fCode32, pfnCSAMR3Analyse, (void *)pJmpPage, pCacheRec);
1450 if (rc != VINF_SUCCESS) {
1451 goto done;
1452 }
1453 }
1454 }
1455 }
1456 }
1457#endif
1458 if (rc != VWRN_CONTINUE_ANALYSIS) {
1459 break; //done!
1460 }
1461next_please:
1462 if (cpu.pCurInstr->opcode == OP_JMP)
1463 {
1464 rc = VINF_SUCCESS;
1465 goto done;
1466 }
1467 pCurInstrGC += opsize;
1468 }
1469done:
1470 pCacheRec->depth--;
1471 return rc;
1472}
1473
1474
1475/**
1476 * Calculates the 64 bits hash value for the current page
1477 *
1478 * @returns hash value
1479 * @param pVM The VM to operate on.
1480 * @param pInstr Page address
1481 */
1482uint64_t csamR3CalcPageHash(PVM pVM, RTRCPTR pInstr)
1483{
1484 uint64_t hash = 0;
1485 uint32_t val[5];
1486 int rc;
1487 Assert(pVM->cCpus == 1);
1488 PVMCPU pVCpu = VMMGetCpu0(pVM);
1489
1490 Assert((pInstr & PAGE_OFFSET_MASK) == 0);
1491
1492 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[0], pInstr, sizeof(val[0]));
1493 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1494 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1495 {
1496 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1497 return ~0ULL;
1498 }
1499
1500 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[1], pInstr+1024, sizeof(val[0]));
1501 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1502 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1503 {
1504 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1505 return ~0ULL;
1506 }
1507
1508 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[2], pInstr+2048, sizeof(val[0]));
1509 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1510 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1511 {
1512 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1513 return ~0ULL;
1514 }
1515
1516 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[3], pInstr+3072, sizeof(val[0]));
1517 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1518 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1519 {
1520 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1521 return ~0ULL;
1522 }
1523
1524 rc = PGMPhysSimpleReadGCPtr(pVCpu, &val[4], pInstr+4092, sizeof(val[0]));
1525 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1526 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1527 {
1528 Log(("csamR3CalcPageHash: page %RRv not present!!\n", pInstr));
1529 return ~0ULL;
1530 }
1531
1532 // don't want to get division by zero traps
1533 val[2] |= 1;
1534 val[4] |= 1;
1535
1536 hash = (uint64_t)val[0] * (uint64_t)val[1] / (uint64_t)val[2] + (val[3]%val[4]);
1537 return (hash == ~0ULL) ? hash - 1 : hash;
1538}
1539
1540
1541/**
1542 * Notify CSAM of a page flush
1543 *
1544 * @returns VBox status code
1545 * @param pVM The VM to operate on.
1546 * @param addr GC address of the page to flush
1547 * @param fRemovePage Page removal flag
1548 */
1549static int csamFlushPage(PVM pVM, RTRCPTR addr, bool fRemovePage)
1550{
1551 PCSAMPAGEREC pPageRec;
1552 int rc;
1553 RTGCPHYS GCPhys = 0;
1554 uint64_t fFlags = 0;
1555 Assert(pVM->cCpus == 1 || !CSAMIsEnabled(pVM));
1556
1557 if (!CSAMIsEnabled(pVM))
1558 return VINF_SUCCESS;
1559
1560 PVMCPU pVCpu = VMMGetCpu0(pVM);
1561
1562 STAM_PROFILE_START(&pVM->csam.s.StatTimeFlushPage, a);
1563
1564 addr = addr & PAGE_BASE_GC_MASK;
1565
1566 /*
1567 * Note: searching for the page in our tree first is more expensive (skipped flushes are two orders of magnitude more common)
1568 */
1569 if (pVM->csam.s.pPageTree == NULL)
1570 {
1571 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1572 return VWRN_CSAM_PAGE_NOT_FOUND;
1573 }
1574
1575 rc = PGMGstGetPage(pVCpu, addr, &fFlags, &GCPhys);
1576 /* Returned at a very early stage (no paging yet presumably). */
1577 if (rc == VERR_NOT_SUPPORTED)
1578 {
1579 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1580 return rc;
1581 }
1582
1583 if (RT_SUCCESS(rc))
1584 {
1585 if ( (fFlags & X86_PTE_US)
1586 || rc == VERR_PGM_PHYS_PAGE_RESERVED
1587 )
1588 {
1589 /* User page -> not relevant for us. */
1590 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1591 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1592 return VINF_SUCCESS;
1593 }
1594 }
1595 else
1596 if (rc != VERR_PAGE_NOT_PRESENT && rc != VERR_PAGE_TABLE_NOT_PRESENT)
1597 AssertMsgFailed(("PGMR3GetPage %RRv failed with %Rrc\n", addr, rc));
1598
1599 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)addr);
1600 if (pPageRec)
1601 {
1602 if ( GCPhys == pPageRec->page.GCPhys
1603 && (fFlags & X86_PTE_P))
1604 {
1605 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushesSkipped, 1);
1606 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1607 return VINF_SUCCESS;
1608 }
1609
1610 Log(("CSAMR3FlushPage: page %RRv has changed -> FLUSH (rc=%Rrc) (Phys: %RGp vs %RGp)\n", addr, rc, GCPhys, pPageRec->page.GCPhys));
1611
1612 STAM_COUNTER_ADD(&pVM->csam.s.StatNrFlushes, 1);
1613
1614 if (fRemovePage)
1615 csamRemovePageRecord(pVM, addr);
1616 else
1617 {
1618 CSAMMarkPage(pVM, addr, false);
1619 pPageRec->page.GCPhys = 0;
1620 pPageRec->page.fFlags = 0;
1621 rc = PGMGstGetPage(pVCpu, addr, &pPageRec->page.fFlags, &pPageRec->page.GCPhys);
1622 if (rc == VINF_SUCCESS)
1623 pPageRec->page.u64Hash = csamR3CalcPageHash(pVM, addr);
1624
1625 if (pPageRec->page.pBitmap == NULL)
1626 {
1627 pPageRec->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, CSAM_PAGE_BITMAP_SIZE);
1628 Assert(pPageRec->page.pBitmap);
1629 if (pPageRec->page.pBitmap == NULL)
1630 return VERR_NO_MEMORY;
1631 }
1632 else
1633 memset(pPageRec->page.pBitmap, 0, CSAM_PAGE_BITMAP_SIZE);
1634 }
1635
1636
1637 /*
1638 * Inform patch manager about the flush; no need to repeat the above check twice.
1639 */
1640 PATMR3FlushPage(pVM, addr);
1641
1642 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1643 return VINF_SUCCESS;
1644 }
1645 else
1646 {
1647 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeFlushPage, a);
1648 return VWRN_CSAM_PAGE_NOT_FOUND;
1649 }
1650}
1651
1652/**
1653 * Notify CSAM of a page flush
1654 *
1655 * @returns VBox status code
1656 * @param pVM The VM to operate on.
1657 * @param addr GC address of the page to flush
1658 */
1659VMMR3DECL(int) CSAMR3FlushPage(PVM pVM, RTRCPTR addr)
1660{
1661 return csamFlushPage(pVM, addr, true /* remove page record */);
1662}
1663
1664/**
1665 * Remove a CSAM monitored page. Use with care!
1666 *
1667 * @returns VBox status code
1668 * @param pVM The VM to operate on.
1669 * @param addr GC address of the page to flush
1670 */
1671VMMR3DECL(int) CSAMR3RemovePage(PVM pVM, RTRCPTR addr)
1672{
1673 PCSAMPAGEREC pPageRec;
1674 int rc;
1675
1676 addr = addr & PAGE_BASE_GC_MASK;
1677
1678 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)addr);
1679 if (pPageRec)
1680 {
1681 rc = csamRemovePageRecord(pVM, addr);
1682 if (RT_SUCCESS(rc))
1683 PATMR3FlushPage(pVM, addr);
1684 return VINF_SUCCESS;
1685 }
1686 return VWRN_CSAM_PAGE_NOT_FOUND;
1687}
1688
1689/**
1690 * Check a page record in case a page has been changed
1691 *
1692 * @returns VBox status code. (trap handled or not)
1693 * @param pVM The VM to operate on.
1694 * @param pInstrGC GC instruction pointer
1695 */
1696int csamR3CheckPageRecord(PVM pVM, RTRCPTR pInstrGC)
1697{
1698 PCSAMPAGEREC pPageRec;
1699 uint64_t u64hash;
1700
1701 pInstrGC = pInstrGC & PAGE_BASE_GC_MASK;
1702
1703 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)pInstrGC);
1704 if (pPageRec)
1705 {
1706 u64hash = csamR3CalcPageHash(pVM, pInstrGC);
1707 if (u64hash != pPageRec->page.u64Hash)
1708 csamFlushPage(pVM, pInstrGC, false /* don't remove page record */);
1709 }
1710 else
1711 return VWRN_CSAM_PAGE_NOT_FOUND;
1712
1713 return VINF_SUCCESS;
1714}
1715
1716/**
1717 * Returns monitor description based on CSAM tag
1718 *
1719 * @return description string
1720 * @param enmTag Owner tag
1721 */
1722const char *csamGetMonitorDescription(CSAMTAG enmTag)
1723{
1724 if (enmTag == CSAM_TAG_PATM)
1725 return "CSAM-PATM self-modifying code monitor handler";
1726 else
1727 if (enmTag == CSAM_TAG_REM)
1728 return "CSAM-REM self-modifying code monitor handler";
1729 Assert(enmTag == CSAM_TAG_CSAM);
1730 return "CSAM self-modifying code monitor handler";
1731}
1732
1733/**
1734 * Adds page record to our lookup tree
1735 *
1736 * @returns CSAMPAGE ptr or NULL if failure
1737 * @param pVM The VM to operate on.
1738 * @param GCPtr Page address
1739 * @param enmTag Owner tag
1740 * @param fCode32 16 or 32 bits code
1741 * @param fMonitorInvalidation Monitor page invalidation flag
1742 */
1743static PCSAMPAGE csamCreatePageRecord(PVM pVM, RTRCPTR GCPtr, CSAMTAG enmTag, bool fCode32, bool fMonitorInvalidation)
1744{
1745 PCSAMPAGEREC pPage;
1746 int rc;
1747 bool ret;
1748 Assert(pVM->cCpus == 1);
1749 PVMCPU pVCpu = VMMGetCpu0(pVM);
1750
1751 Log(("New page record for %RRv\n", GCPtr & PAGE_BASE_GC_MASK));
1752
1753 pPage = (PCSAMPAGEREC)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, sizeof(CSAMPAGEREC));
1754 if (pPage == NULL)
1755 {
1756 AssertMsgFailed(("csamCreatePageRecord: Out of memory!!!!\n"));
1757 return NULL;
1758 }
1759 /* Round down to page boundary. */
1760 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1761 pPage->Core.Key = (AVLPVKEY)GCPtr;
1762 pPage->page.pPageGC = GCPtr;
1763 pPage->page.fCode32 = fCode32;
1764 pPage->page.fMonitorInvalidation = fMonitorInvalidation;
1765 pPage->page.enmTag = enmTag;
1766 pPage->page.fMonitorActive = false;
1767 pPage->page.pBitmap = (uint8_t *)MMR3HeapAllocZ(pVM, MM_TAG_CSAM_PATCH, PAGE_SIZE/sizeof(uint8_t));
1768 rc = PGMGstGetPage(pVCpu, GCPtr, &pPage->page.fFlags, &pPage->page.GCPhys);
1769 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1770
1771 pPage->page.u64Hash = csamR3CalcPageHash(pVM, GCPtr);
1772 ret = RTAvlPVInsert(&pVM->csam.s.pPageTree, &pPage->Core);
1773 Assert(ret);
1774
1775#ifdef CSAM_MONITOR_CODE_PAGES
1776 AssertRelease(!fInCSAMCodePageInvalidate);
1777
1778 switch (enmTag)
1779 {
1780 case CSAM_TAG_PATM:
1781 case CSAM_TAG_REM:
1782#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
1783 case CSAM_TAG_CSAM:
1784#endif
1785 {
1786 int rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtr, GCPtr + (PAGE_SIZE - 1) /* inclusive! */,
1787 (fMonitorInvalidation) ? CSAMCodePageInvalidate : 0, CSAMCodePageWriteHandler, "CSAMGCCodePageWriteHandler", 0,
1788 csamGetMonitorDescription(enmTag));
1789 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT, ("PGMR3HandlerVirtualRegisterEx %RRv failed with %Rrc\n", GCPtr, rc));
1790 if (RT_FAILURE(rc))
1791 Log(("PGMR3HandlerVirtualRegisterEx for %RRv failed with %Rrc\n", GCPtr, rc));
1792
1793 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
1794
1795 /* Prefetch it in case it's not there yet. */
1796 rc = PGMPrefetchPage(pVCpu, GCPtr);
1797 AssertRC(rc);
1798
1799 rc = PGMShwModifyPage(pVCpu, GCPtr, 1, 0, ~(uint64_t)X86_PTE_RW);
1800 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1801
1802 pPage->page.fMonitorActive = true;
1803 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
1804 break;
1805 }
1806 default:
1807 break; /* to shut up GCC */
1808 }
1809
1810 Log(("csamCreatePageRecord %RRv GCPhys=%RGp\n", GCPtr, pPage->page.GCPhys));
1811
1812#ifdef VBOX_WITH_STATISTICS
1813 switch (enmTag)
1814 {
1815 case CSAM_TAG_CSAM:
1816 STAM_COUNTER_INC(&pVM->csam.s.StatPageCSAM);
1817 break;
1818 case CSAM_TAG_PATM:
1819 STAM_COUNTER_INC(&pVM->csam.s.StatPagePATM);
1820 break;
1821 case CSAM_TAG_REM:
1822 STAM_COUNTER_INC(&pVM->csam.s.StatPageREM);
1823 break;
1824 default:
1825 break; /* to shut up GCC */
1826 }
1827#endif
1828
1829#endif
1830
1831 STAM_COUNTER_INC(&pVM->csam.s.StatNrPages);
1832 if (fMonitorInvalidation)
1833 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
1834
1835 return &pPage->page;
1836}
1837
1838/**
1839 * Monitors a code page (if not already monitored)
1840 *
1841 * @returns VBox status code
1842 * @param pVM The VM to operate on.
1843 * @param pPageAddrGC The page to monitor
1844 * @param enmTag Monitor tag
1845 */
1846VMMR3DECL(int) CSAMR3MonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
1847{
1848 PCSAMPAGEREC pPageRec = NULL;
1849 int rc;
1850 bool fMonitorInvalidation;
1851 Assert(pVM->cCpus == 1);
1852 PVMCPU pVCpu = VMMGetCpu0(pVM);
1853
1854 /* Dirty pages must be handled before calling this function!. */
1855 Assert(!pVM->csam.s.cDirtyPages);
1856
1857 if (pVM->csam.s.fScanningStarted == false)
1858 return VINF_SUCCESS; /* too early */
1859
1860 pPageAddrGC &= PAGE_BASE_GC_MASK;
1861
1862 Log(("CSAMR3MonitorPage %RRv %d\n", pPageAddrGC, enmTag));
1863
1864 /** @todo implicit assumption */
1865 fMonitorInvalidation = (enmTag == CSAM_TAG_PATM);
1866
1867 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)pPageAddrGC);
1868 if (pPageRec == NULL)
1869 {
1870 uint64_t fFlags;
1871
1872 rc = PGMGstGetPage(pVCpu, pPageAddrGC, &fFlags, NULL);
1873 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
1874 if ( rc == VINF_SUCCESS
1875 && (fFlags & X86_PTE_US))
1876 {
1877 /* We don't care about user pages. */
1878 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
1879 return VINF_SUCCESS;
1880 }
1881
1882 csamCreatePageRecord(pVM, pPageAddrGC, enmTag, true /* 32 bits code */, fMonitorInvalidation);
1883
1884 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)pPageAddrGC);
1885 Assert(pPageRec);
1886 }
1887 /** @todo reference count */
1888
1889#ifdef CSAM_MONITOR_CSAM_CODE_PAGES
1890 Assert(pPageRec->page.fMonitorActive);
1891#endif
1892
1893#ifdef CSAM_MONITOR_CODE_PAGES
1894 if (!pPageRec->page.fMonitorActive)
1895 {
1896 Log(("CSAMR3MonitorPage: activate monitoring for %RRv\n", pPageAddrGC));
1897
1898 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, pPageAddrGC, pPageAddrGC + (PAGE_SIZE - 1) /* inclusive! */,
1899 (fMonitorInvalidation) ? CSAMCodePageInvalidate : 0, CSAMCodePageWriteHandler, "CSAMGCCodePageWriteHandler", 0,
1900 csamGetMonitorDescription(enmTag));
1901 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT, ("PGMR3HandlerVirtualRegisterEx %RRv failed with %Rrc\n", pPageAddrGC, rc));
1902 if (RT_FAILURE(rc))
1903 Log(("PGMR3HandlerVirtualRegisterEx for %RRv failed with %Rrc\n", pPageAddrGC, rc));
1904
1905 /* Could fail, because it's already monitored. Don't treat that condition as fatal. */
1906
1907 /* Prefetch it in case it's not there yet. */
1908 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
1909 AssertRC(rc);
1910
1911 rc = PGMShwModifyPage(pVCpu, pPageAddrGC, 1, 0, ~(uint64_t)X86_PTE_RW);
1912 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1913
1914 STAM_COUNTER_INC(&pVM->csam.s.StatPageMonitor);
1915
1916 pPageRec->page.fMonitorActive = true;
1917 pPageRec->page.fMonitorInvalidation = fMonitorInvalidation;
1918 }
1919 else
1920 if ( !pPageRec->page.fMonitorInvalidation
1921 && fMonitorInvalidation)
1922 {
1923 Assert(pPageRec->page.fMonitorActive);
1924 PGMHandlerVirtualChangeInvalidateCallback(pVM, pPageRec->page.pPageGC, CSAMCodePageInvalidate);
1925 pPageRec->page.fMonitorInvalidation = true;
1926 STAM_COUNTER_INC(&pVM->csam.s.StatNrPagesInv);
1927
1928 /* Prefetch it in case it's not there yet. */
1929 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
1930 AssertRC(rc);
1931
1932 /* Make sure it's readonly. Page invalidation may have modified the attributes. */
1933 rc = PGMShwModifyPage(pVCpu, pPageAddrGC, 1, 0, ~(uint64_t)X86_PTE_RW);
1934 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1935 }
1936
1937#if 0 /* def VBOX_STRICT -> very annoying) */
1938 if (pPageRec->page.fMonitorActive)
1939 {
1940 uint64_t fPageShw;
1941 RTHCPHYS GCPhys;
1942 rc = PGMShwGetPage(pVCpu, pPageAddrGC, &fPageShw, &GCPhys);
1943// AssertMsg( (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1944// || !(fPageShw & X86_PTE_RW)
1945// || (pPageRec->page.GCPhys == 0), ("Shadow page flags for %RRv (%RHp) aren't readonly (%RX64)!!\n", pPageAddrGC, GCPhys, fPageShw));
1946 }
1947#endif
1948
1949 if (pPageRec->page.GCPhys == 0)
1950 {
1951 /* Prefetch it in case it's not there yet. */
1952 rc = PGMPrefetchPage(pVCpu, pPageAddrGC);
1953 AssertRC(rc);
1954 /* The page was changed behind our back. It won't be made read-only until the next SyncCR3, so force it here. */
1955 rc = PGMShwModifyPage(pVCpu, pPageAddrGC, 1, 0, ~(uint64_t)X86_PTE_RW);
1956 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1957 }
1958#endif /* CSAM_MONITOR_CODE_PAGES */
1959 return VINF_SUCCESS;
1960}
1961
1962/**
1963 * Unmonitors a code page
1964 *
1965 * @returns VBox status code
1966 * @param pVM The VM to operate on.
1967 * @param pPageAddrGC The page to monitor
1968 * @param enmTag Monitor tag
1969 */
1970VMMR3DECL(int) CSAMR3UnmonitorPage(PVM pVM, RTRCPTR pPageAddrGC, CSAMTAG enmTag)
1971{
1972 pPageAddrGC &= PAGE_BASE_GC_MASK;
1973
1974 Log(("CSAMR3UnmonitorPage %RRv %d\n", pPageAddrGC, enmTag));
1975
1976 Assert(enmTag == CSAM_TAG_REM);
1977
1978#ifdef VBOX_STRICT
1979 PCSAMPAGEREC pPageRec;
1980
1981 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)pPageAddrGC);
1982 Assert(pPageRec && pPageRec->page.enmTag == enmTag);
1983#endif
1984 return CSAMR3RemovePage(pVM, pPageAddrGC);
1985}
1986
1987/**
1988 * Removes a page record from our lookup tree
1989 *
1990 * @returns VBox status code
1991 * @param pVM The VM to operate on.
1992 * @param GCPtr Page address
1993 */
1994static int csamRemovePageRecord(PVM pVM, RTRCPTR GCPtr)
1995{
1996 PCSAMPAGEREC pPageRec;
1997 Assert(pVM->cCpus == 1);
1998 PVMCPU pVCpu = VMMGetCpu0(pVM);
1999
2000 Log(("csamRemovePageRecord %RRv\n", GCPtr));
2001 pPageRec = (PCSAMPAGEREC)RTAvlPVRemove(&pVM->csam.s.pPageTree, (AVLPVKEY)GCPtr);
2002
2003 if (pPageRec)
2004 {
2005 STAM_COUNTER_INC(&pVM->csam.s.StatNrRemovedPages);
2006
2007#ifdef CSAM_MONITOR_CODE_PAGES
2008 if (pPageRec->page.fMonitorActive)
2009 {
2010 /* @todo -> this is expensive (cr3 reload)!!!
2011 * if this happens often, then reuse it instead!!!
2012 */
2013 Assert(!fInCSAMCodePageInvalidate);
2014 STAM_COUNTER_DEC(&pVM->csam.s.StatPageMonitor);
2015 PGMHandlerVirtualDeregister(pVM, GCPtr);
2016 }
2017 if (pPageRec->page.enmTag == CSAM_TAG_PATM)
2018 {
2019 /* Make sure the recompiler flushes its cache as this page is no longer monitored. */
2020 STAM_COUNTER_INC(&pVM->csam.s.StatPageRemoveREMFlush);
2021 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
2022 }
2023#endif
2024
2025#ifdef VBOX_WITH_STATISTICS
2026 switch (pPageRec->page.enmTag)
2027 {
2028 case CSAM_TAG_CSAM:
2029 STAM_COUNTER_DEC(&pVM->csam.s.StatPageCSAM);
2030 break;
2031 case CSAM_TAG_PATM:
2032 STAM_COUNTER_DEC(&pVM->csam.s.StatPagePATM);
2033 break;
2034 case CSAM_TAG_REM:
2035 STAM_COUNTER_DEC(&pVM->csam.s.StatPageREM);
2036 break;
2037 default:
2038 break; /* to shut up GCC */
2039 }
2040#endif
2041
2042 if (pPageRec->page.pBitmap) MMR3HeapFree(pPageRec->page.pBitmap);
2043 MMR3HeapFree(pPageRec);
2044 }
2045 else
2046 AssertFailed();
2047
2048 return VINF_SUCCESS;
2049}
2050
2051/**
2052 * Callback for delayed writes from non-EMT threads
2053 *
2054 * @param pVM VM Handle.
2055 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2056 * @param cbBuf How much it's reading/writing.
2057 */
2058static DECLCALLBACK(void) CSAMDelayedWriteHandler(PVM pVM, RTRCPTR GCPtr, size_t cbBuf)
2059{
2060 int rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2061 AssertRC(rc);
2062}
2063
2064/**
2065 * #PF Handler callback for virtual access handler ranges.
2066 *
2067 * Important to realize that a physical page in a range can have aliases, and
2068 * for ALL and WRITE handlers these will also trigger.
2069 *
2070 * @returns VINF_SUCCESS if the handler have carried out the operation.
2071 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
2072 * @param pVM VM Handle.
2073 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
2074 * @param pvPtr The HC mapping of that address.
2075 * @param pvBuf What the guest is reading/writing.
2076 * @param cbBuf How much it's reading/writing.
2077 * @param enmAccessType The access type.
2078 * @param pvUser User argument.
2079 */
2080static DECLCALLBACK(int) CSAMCodePageWriteHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
2081{
2082 int rc;
2083
2084 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2085 Log(("CSAMCodePageWriteHandler: write to %RGv size=%zu\n", GCPtr, cbBuf));
2086
2087 if ( PAGE_ADDRESS(pvPtr) == PAGE_ADDRESS((uintptr_t)pvPtr + cbBuf - 1)
2088 && !memcmp(pvPtr, pvBuf, cbBuf))
2089 {
2090 Log(("CSAMCodePageWriteHandler: dummy write -> ignore\n"));
2091 return VINF_PGM_HANDLER_DO_DEFAULT;
2092 }
2093
2094 if (VM_IS_EMT(pVM))
2095 {
2096 rc = PATMR3PatchWrite(pVM, GCPtr, (uint32_t)cbBuf);
2097 }
2098 else
2099 {
2100 /* Queue the write instead otherwise we'll get concurrency issues. */
2101 /** @note in theory not correct to let it write the data first before disabling a patch!
2102 * (if it writes the same data as the patch jump and we replace it with obsolete opcodes)
2103 */
2104 Log(("CSAMCodePageWriteHandler: delayed write!\n"));
2105 AssertCompileSize(RTRCPTR, 4);
2106 rc = VMR3ReqCallVoidNoWait(pVM, VMCPUID_ANY, (PFNRT)CSAMDelayedWriteHandler, 3, pVM, (RTRCPTR)GCPtr, cbBuf);
2107 }
2108 AssertRC(rc);
2109
2110 return VINF_PGM_HANDLER_DO_DEFAULT;
2111}
2112
2113/**
2114 * #PF Handler callback for invalidation of virtual access handler ranges.
2115 *
2116 * @param pVM VM Handle.
2117 * @param GCPtr The virtual address the guest has changed.
2118 */
2119static DECLCALLBACK(int) CSAMCodePageInvalidate(PVM pVM, RTGCPTR GCPtr)
2120{
2121 fInCSAMCodePageInvalidate = true;
2122 LogFlow(("CSAMCodePageInvalidate %RGv\n", GCPtr));
2123 /** @todo We can't remove the page (which unregisters the virtual handler) as we are called from a DoWithAll on the virtual handler tree. Argh. */
2124 csamFlushPage(pVM, GCPtr, false /* don't remove page! */);
2125 fInCSAMCodePageInvalidate = false;
2126 return VINF_SUCCESS;
2127}
2128
2129/**
2130 * Check if the current instruction has already been checked before
2131 *
2132 * @returns VBox status code. (trap handled or not)
2133 * @param pVM The VM to operate on.
2134 * @param pInstr Instruction pointer
2135 * @param pPage CSAM patch structure pointer
2136 */
2137bool csamIsCodeScanned(PVM pVM, RTRCPTR pInstr, PCSAMPAGE *pPage)
2138{
2139 PCSAMPAGEREC pPageRec;
2140 uint32_t offset;
2141
2142 STAM_PROFILE_START(&pVM->csam.s.StatTimeCheckAddr, a);
2143
2144 offset = pInstr & PAGE_OFFSET_MASK;
2145 pInstr = pInstr & PAGE_BASE_GC_MASK;
2146
2147 Assert(pPage);
2148
2149 if (*pPage && (*pPage)->pPageGC == pInstr)
2150 {
2151 if ((*pPage)->pBitmap == NULL || ASMBitTest((*pPage)->pBitmap, offset))
2152 {
2153 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2154 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2155 return true;
2156 }
2157 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2158 return false;
2159 }
2160
2161 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)pInstr);
2162 if (pPageRec)
2163 {
2164 if (pPage) *pPage= &pPageRec->page;
2165 if (pPageRec->page.pBitmap == NULL || ASMBitTest(pPageRec->page.pBitmap, offset))
2166 {
2167 STAM_COUNTER_ADD(&pVM->csam.s.StatNrKnownPagesHC, 1);
2168 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2169 return true;
2170 }
2171 }
2172 else
2173 {
2174 if (pPage) *pPage = NULL;
2175 }
2176 STAM_PROFILE_STOP(&pVM->csam.s.StatTimeCheckAddr, a);
2177 return false;
2178}
2179
2180/**
2181 * Mark an instruction in a page as scanned/not scanned
2182 *
2183 * @param pVM The VM to operate on.
2184 * @param pPage Patch structure pointer
2185 * @param pInstr Instruction pointer
2186 * @param opsize Instruction size
2187 * @param fScanned Mark as scanned or not
2188 */
2189static void csamMarkCode(PVM pVM, PCSAMPAGE pPage, RTRCPTR pInstr, uint32_t opsize, bool fScanned)
2190{
2191 LogFlow(("csamMarkCodeAsScanned %RRv opsize=%d\n", pInstr, opsize));
2192 CSAMMarkPage(pVM, pInstr, fScanned);
2193
2194 /** @todo should recreate empty bitmap if !fScanned */
2195 if (pPage->pBitmap == NULL)
2196 return;
2197
2198 if (fScanned)
2199 {
2200 // retn instructions can be scanned more than once
2201 if (ASMBitTest(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK) == 0)
2202 {
2203 pPage->uSize += opsize;
2204 STAM_COUNTER_ADD(&pVM->csam.s.StatNrInstr, 1);
2205 }
2206 if (pPage->uSize >= PAGE_SIZE)
2207 {
2208 Log(("Scanned full page (%RRv) -> free bitmap\n", pInstr & PAGE_BASE_GC_MASK));
2209 MMR3HeapFree(pPage->pBitmap);
2210 pPage->pBitmap = NULL;
2211 }
2212 else
2213 ASMBitSet(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2214 }
2215 else
2216 ASMBitClear(pPage->pBitmap, pInstr & PAGE_OFFSET_MASK);
2217}
2218
2219/**
2220 * Mark an instruction in a page as scanned/not scanned
2221 *
2222 * @returns VBox status code.
2223 * @param pVM The VM to operate on.
2224 * @param pInstr Instruction pointer
2225 * @param opsize Instruction size
2226 * @param fScanned Mark as scanned or not
2227 */
2228VMMR3DECL(int) CSAMR3MarkCode(PVM pVM, RTRCPTR pInstr, uint32_t opsize, bool fScanned)
2229{
2230 PCSAMPAGE pPage = 0;
2231
2232 Assert(!fScanned); /* other case not implemented. */
2233 Assert(!PATMIsPatchGCAddr(pVM, pInstr));
2234
2235 if (csamIsCodeScanned(pVM, pInstr, &pPage) == false)
2236 {
2237 Assert(fScanned == true); /* other case should not be possible */
2238 return VINF_SUCCESS;
2239 }
2240
2241 Log(("CSAMR3MarkCode: %RRv size=%d fScanned=%d\n", pInstr, opsize, fScanned));
2242 csamMarkCode(pVM, pPage, pInstr, opsize, fScanned);
2243 return VINF_SUCCESS;
2244}
2245
2246
2247/**
2248 * Scan and analyse code
2249 *
2250 * @returns VBox status code.
2251 * @param pVM The VM to operate on.
2252 * @param pCtxCore CPU context
2253 * @param pInstrGC Instruction pointer
2254 */
2255VMMR3DECL(int) CSAMR3CheckCodeEx(PVM pVM, PCPUMCTXCORE pCtxCore, RTRCPTR pInstrGC)
2256{
2257 if (EMIsRawRing0Enabled(pVM) == false || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2258 {
2259 // No use
2260 return VINF_SUCCESS;
2261 }
2262
2263 if (CSAMIsEnabled(pVM))
2264 {
2265 /* Assuming 32 bits code for now. */
2266 Assert(SELMGetCpuModeFromSelector(pVM, pCtxCore->eflags, pCtxCore->cs, &pCtxCore->csHid) == CPUMODE_32BIT);
2267
2268 pInstrGC = SELMToFlat(pVM, DIS_SELREG_CS, pCtxCore, pInstrGC);
2269 return CSAMR3CheckCode(pVM, pInstrGC);
2270 }
2271 return VINF_SUCCESS;
2272}
2273
2274/**
2275 * Scan and analyse code
2276 *
2277 * @returns VBox status code.
2278 * @param pVM The VM to operate on.
2279 * @param pInstrGC Instruction pointer (0:32 virtual address)
2280 */
2281VMMR3DECL(int) CSAMR3CheckCode(PVM pVM, RTRCPTR pInstrGC)
2282{
2283 int rc;
2284 PCSAMPAGE pPage = NULL;
2285
2286 if (EMIsRawRing0Enabled(pVM) == false || PATMIsPatchGCAddr(pVM, pInstrGC) == true)
2287 {
2288 // No use
2289 return VINF_SUCCESS;
2290 }
2291
2292 if (CSAMIsEnabled(pVM))
2293 {
2294 // Cache record for PATMGCVirtToHCVirt
2295 CSAMP2GLOOKUPREC cacheRec = {0};
2296
2297 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2298 rc = csamAnalyseCallCodeStream(pVM, pInstrGC, pInstrGC, true /* 32 bits code */, CSAMR3AnalyseCallback, pPage, &cacheRec);
2299 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2300 if (rc != VINF_SUCCESS)
2301 {
2302 Log(("csamAnalyseCodeStream failed with %d\n", rc));
2303 return rc;
2304 }
2305 }
2306 return VINF_SUCCESS;
2307}
2308
2309/**
2310 * Flush dirty code pages
2311 *
2312 * @returns VBox status code.
2313 * @param pVM The VM to operate on.
2314 */
2315static int csamR3FlushDirtyPages(PVM pVM)
2316{
2317 Assert(pVM->cCpus == 1);
2318 PVMCPU pVCpu = VMMGetCpu0(pVM);
2319
2320 STAM_PROFILE_START(&pVM->csam.s.StatFlushDirtyPages, a);
2321
2322 for (uint32_t i=0;i<pVM->csam.s.cDirtyPages;i++)
2323 {
2324 int rc;
2325 PCSAMPAGEREC pPageRec;
2326 RTRCPTR GCPtr = pVM->csam.s.pvDirtyBasePage[i];
2327
2328 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2329
2330 /* Notify the recompiler that this page has been changed. */
2331 REMR3NotifyCodePageChanged(pVM, pVCpu, GCPtr);
2332
2333 /* Enable write protection again. (use the fault address as it might be an alias) */
2334 rc = PGMShwModifyPage(pVCpu, pVM->csam.s.pvDirtyFaultPage[i], 1, 0, ~(uint64_t)X86_PTE_RW);
2335 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2336
2337 Log(("CSAMR3FlushDirtyPages: flush %RRv (modifypage rc=%Rrc)\n", pVM->csam.s.pvDirtyBasePage[i], rc));
2338
2339 pPageRec = (PCSAMPAGEREC)RTAvlPVGet(&pVM->csam.s.pPageTree, (AVLPVKEY)GCPtr);
2340 if (pPageRec && pPageRec->page.enmTag == CSAM_TAG_REM)
2341 {
2342 uint64_t fFlags;
2343
2344 rc = PGMGstGetPage(pVCpu, GCPtr, &fFlags, NULL);
2345 AssertMsg(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc = %Rrc\n", rc));
2346 if ( rc == VINF_SUCCESS
2347 && (fFlags & X86_PTE_US))
2348 {
2349 /* We don't care about user pages. */
2350 csamRemovePageRecord(pVM, GCPtr);
2351 STAM_COUNTER_INC(&pVM->csam.s.StatNrUserPages);
2352 }
2353 }
2354 }
2355 pVM->csam.s.cDirtyPages = 0;
2356 STAM_PROFILE_STOP(&pVM->csam.s.StatFlushDirtyPages, a);
2357 return VINF_SUCCESS;
2358}
2359
2360/**
2361 * Flush potential new code pages
2362 *
2363 * @returns VBox status code.
2364 * @param pVM The VM to operate on.
2365 */
2366static int csamR3FlushCodePages(PVM pVM)
2367{
2368 Assert(pVM->cCpus == 1);
2369 PVMCPU pVCpu = VMMGetCpu0(pVM);
2370
2371 for (uint32_t i=0;i<pVM->csam.s.cPossibleCodePages;i++)
2372 {
2373 RTRCPTR GCPtr = pVM->csam.s.pvPossibleCodePage[i];
2374
2375 GCPtr = GCPtr & PAGE_BASE_GC_MASK;
2376
2377 Log(("csamR3FlushCodePages: %RRv\n", GCPtr));
2378 PGMShwSetPage(pVCpu, GCPtr, 1, 0);
2379 /* Resync the page to make sure instruction fetch will fault */
2380 CSAMMarkPage(pVM, GCPtr, false);
2381 }
2382 pVM->csam.s.cPossibleCodePages = 0;
2383 return VINF_SUCCESS;
2384}
2385
2386/**
2387 * Perform any pending actions
2388 *
2389 * @returns VBox status code.
2390 * @param pVM The VM to operate on.
2391 * @param pVCpu The VMCPU to operate on.
2392 */
2393VMMR3DECL(int) CSAMR3DoPendingAction(PVM pVM, PVMCPU pVCpu)
2394{
2395 csamR3FlushDirtyPages(pVM);
2396 csamR3FlushCodePages(pVM);
2397
2398 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION);
2399 return VINF_SUCCESS;
2400}
2401
2402/**
2403 * Analyse interrupt and trap gates
2404 *
2405 * @returns VBox status code.
2406 * @param pVM The VM to operate on.
2407 * @param iGate Start gate
2408 * @param cGates Number of gates to check
2409 */
2410VMMR3DECL(int) CSAMR3CheckGates(PVM pVM, uint32_t iGate, uint32_t cGates)
2411{
2412 Assert(pVM->cCpus == 1);
2413 PVMCPU pVCpu = VMMGetCpu0(pVM);
2414 uint16_t cbIDT;
2415 RTRCPTR GCPtrIDT = CPUMGetGuestIDTR(pVCpu, &cbIDT);
2416 uint32_t iGateEnd;
2417 uint32_t maxGates;
2418 VBOXIDTE aIDT[256];
2419 PVBOXIDTE pGuestIdte;
2420 int rc;
2421
2422 if (EMIsRawRing0Enabled(pVM) == false)
2423 {
2424 /* Enabling interrupt gates only works when raw ring 0 is enabled. */
2425 //AssertFailed();
2426 return VINF_SUCCESS;
2427 }
2428
2429 /* We only check all gates once during a session */
2430 if ( !pVM->csam.s.fGatesChecked
2431 && cGates != 256)
2432 return VINF_SUCCESS; /* too early */
2433
2434 /* We only check all gates once during a session */
2435 if ( pVM->csam.s.fGatesChecked
2436 && cGates != 1)
2437 return VINF_SUCCESS; /* ignored */
2438
2439 Assert(cGates <= 256);
2440 if (!GCPtrIDT || cGates > 256)
2441 return VERR_INVALID_PARAMETER;
2442
2443 if (cGates != 1)
2444 {
2445 pVM->csam.s.fGatesChecked = true;
2446 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2447 {
2448 RTRCPTR pHandler = pVM->csam.s.pvCallInstruction[i];
2449
2450 if (pHandler)
2451 {
2452 CSAMP2GLOOKUPREC cacheRec = {0}; /* Cache record for PATMGCVirtToHCVirt. */
2453 PCSAMPAGE pPage = NULL;
2454
2455 Log(("CSAMCheckGates: checking previous call instruction %RRv\n", pHandler));
2456 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2457 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2458 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2459 if (rc != VINF_SUCCESS)
2460 {
2461 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2462 continue;
2463 }
2464 }
2465 }
2466 }
2467
2468 /* Determine valid upper boundary. */
2469 maxGates = (cbIDT+1) / sizeof(VBOXIDTE);
2470 Assert(iGate < maxGates);
2471 if (iGate > maxGates)
2472 return VERR_INVALID_PARAMETER;
2473
2474 if (iGate + cGates > maxGates)
2475 cGates = maxGates - iGate;
2476
2477 GCPtrIDT = GCPtrIDT + iGate * sizeof(VBOXIDTE);
2478 iGateEnd = iGate + cGates;
2479
2480 STAM_PROFILE_START(&pVM->csam.s.StatCheckGates, a);
2481
2482 /*
2483 * Get IDT entries.
2484 */
2485 if (PAGE_ADDRESS(GCPtrIDT) == PAGE_ADDRESS(GCPtrIDT+cGates*sizeof(VBOXIDTE)))
2486 {
2487 /* Just convert the IDT address to a R3 pointer. The whole IDT fits in one page. */
2488 rc = PGMPhysGCPtr2R3Ptr(pVCpu, GCPtrIDT, (PRTR3PTR)&pGuestIdte);
2489 if (RT_FAILURE(rc))
2490 {
2491 AssertMsgRC(rc, ("Failed to read IDTE! rc=%Rrc\n", rc));
2492 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2493 return rc;
2494 }
2495 }
2496 else
2497 {
2498 /* Slow method when it crosses a page boundary. */
2499 rc = PGMPhysSimpleReadGCPtr(pVCpu, aIDT, GCPtrIDT, cGates*sizeof(VBOXIDTE));
2500 if (RT_FAILURE(rc))
2501 {
2502 AssertMsgRC(rc, ("Failed to read IDTE! rc=%Rrc\n", rc));
2503 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2504 return rc;
2505 }
2506 pGuestIdte = &aIDT[0];
2507 }
2508
2509 for (/*iGate*/; iGate<iGateEnd; iGate++, pGuestIdte++)
2510 {
2511 Assert(TRPMR3GetGuestTrapHandler(pVM, iGate) == TRPM_INVALID_HANDLER);
2512
2513 if ( pGuestIdte->Gen.u1Present
2514 && (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32 || pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32)
2515 && (pGuestIdte->Gen.u2DPL == 3 || pGuestIdte->Gen.u2DPL == 0)
2516 )
2517 {
2518 RTRCPTR pHandler;
2519 CSAMP2GLOOKUPREC cacheRec = {0}; /* Cache record for PATMGCVirtToHCVirt. */
2520 PCSAMPAGE pPage = NULL;
2521 DBGFSELINFO selInfo;
2522
2523 pHandler = VBOXIDTE_OFFSET(*pGuestIdte);
2524 pHandler = SELMToFlatBySel(pVM, pGuestIdte->Gen.u16SegSel, pHandler);
2525
2526 rc = SELMR3GetSelectorInfo(pVM, pVCpu, pGuestIdte->Gen.u16SegSel, &selInfo);
2527 if ( RT_FAILURE(rc)
2528 || (selInfo.fFlags & (DBGFSELINFO_FLAGS_NOT_PRESENT | DBGFSELINFO_FLAGS_INVALID))
2529 || selInfo.GCPtrBase != 0
2530 || selInfo.cbLimit != ~0U
2531 )
2532 {
2533 /* Refuse to patch a handler whose idt cs selector isn't wide open. */
2534 Log(("CSAMCheckGates: check gate %d failed due to rc %Rrc GCPtrBase=%RRv limit=%x\n", iGate, rc, selInfo.GCPtrBase, selInfo.cbLimit));
2535 continue;
2536 }
2537
2538
2539 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2540 {
2541 Log(("CSAMCheckGates: check trap gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2542 }
2543 else
2544 {
2545 Log(("CSAMCheckGates: check interrupt gate %d at %04X:%08X (flat %RRv)\n", iGate, pGuestIdte->Gen.u16SegSel, VBOXIDTE_OFFSET(*pGuestIdte), pHandler));
2546 }
2547
2548 STAM_PROFILE_START(&pVM->csam.s.StatTime, a);
2549 rc = csamAnalyseCodeStream(pVM, pHandler, pHandler, true, CSAMR3AnalyseCallback, pPage, &cacheRec);
2550 STAM_PROFILE_STOP(&pVM->csam.s.StatTime, a);
2551 if (rc != VINF_SUCCESS)
2552 {
2553 Log(("CSAMCheckGates: csamAnalyseCodeStream failed with %d\n", rc));
2554 continue;
2555 }
2556 /* OpenBSD guest specific patch test. */
2557 if (iGate >= 0x20)
2558 {
2559 PCPUMCTX pCtx;
2560 DISCPUSTATE cpu;
2561 RTGCUINTPTR32 aOpenBsdPushCSOffset[3] = {0x03, /* OpenBSD 3.7 & 3.8 */
2562 0x2B, /* OpenBSD 4.0 installation ISO */
2563 0x2F}; /* OpenBSD 4.0 after install */
2564
2565 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2566
2567 for (unsigned i=0;i<RT_ELEMENTS(aOpenBsdPushCSOffset);i++)
2568 {
2569 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pHandler - aOpenBsdPushCSOffset[i], &cpu, NULL);
2570 if ( rc == VINF_SUCCESS
2571 && cpu.pCurInstr->opcode == OP_PUSH
2572 && cpu.pCurInstr->param1 == OP_PARM_REG_CS)
2573 {
2574 rc = PATMR3InstallPatch(pVM, pHandler - aOpenBsdPushCSOffset[i], PATMFL_CODE32 | PATMFL_GUEST_SPECIFIC);
2575 if (RT_SUCCESS(rc))
2576 Log(("Installed OpenBSD interrupt handler prefix instruction (push cs) patch\n"));
2577 }
2578 }
2579 }
2580
2581 /* Trap gates and certain interrupt gates. */
2582 uint32_t fPatchFlags = PATMFL_CODE32 | PATMFL_IDTHANDLER;
2583
2584 if (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32)
2585 fPatchFlags |= PATMFL_TRAPHANDLER;
2586 else
2587 fPatchFlags |= PATMFL_INTHANDLER;
2588
2589 switch (iGate) {
2590 case 8:
2591 case 10:
2592 case 11:
2593 case 12:
2594 case 13:
2595 case 14:
2596 case 17:
2597 fPatchFlags |= PATMFL_TRAPHANDLER_WITH_ERRORCODE;
2598 break;
2599 default:
2600 /* No error code. */
2601 break;
2602 }
2603
2604 Log(("Installing %s gate handler for 0x%X at %RRv\n", (pGuestIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_TRAP_32) ? "trap" : "intr", iGate, pHandler));
2605
2606 rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags);
2607 if (RT_SUCCESS(rc) || rc == VERR_PATM_ALREADY_PATCHED)
2608 {
2609 Log(("Gate handler 0x%X is SAFE!\n", iGate));
2610
2611 RTRCPTR pNewHandlerGC = PATMR3QueryPatchGCPtr(pVM, pHandler);
2612 if (pNewHandlerGC)
2613 {
2614 rc = TRPMR3SetGuestTrapHandler(pVM, iGate, pNewHandlerGC);
2615 if (RT_FAILURE(rc))
2616 Log(("TRPMR3SetGuestTrapHandler %d failed with %Rrc\n", iGate, rc));
2617 }
2618 }
2619 }
2620 } /* for */
2621 STAM_PROFILE_STOP(&pVM->csam.s.StatCheckGates, a);
2622 return VINF_SUCCESS;
2623}
2624
2625/**
2626 * Record previous call instruction addresses
2627 *
2628 * @returns VBox status code.
2629 * @param pVM The VM to operate on.
2630 * @param GCPtrCall Call address
2631 */
2632VMMR3DECL(int) CSAMR3RecordCallAddress(PVM pVM, RTRCPTR GCPtrCall)
2633{
2634 for (unsigned i=0;i<RT_ELEMENTS(pVM->csam.s.pvCallInstruction);i++)
2635 {
2636 if (pVM->csam.s.pvCallInstruction[i] == GCPtrCall)
2637 return VINF_SUCCESS;
2638 }
2639
2640 Log(("CSAMR3RecordCallAddress %RRv\n", GCPtrCall));
2641
2642 pVM->csam.s.pvCallInstruction[pVM->csam.s.iCallInstruction++] = GCPtrCall;
2643 if (pVM->csam.s.iCallInstruction >= RT_ELEMENTS(pVM->csam.s.pvCallInstruction))
2644 pVM->csam.s.iCallInstruction = 0;
2645
2646 return VINF_SUCCESS;
2647}
2648
2649
2650/**
2651 * Query CSAM state (enabled/disabled)
2652 *
2653 * @returns 0 - disabled, 1 - enabled
2654 * @param pVM The VM to operate on.
2655 */
2656VMMR3DECL(int) CSAMR3IsEnabled(PVM pVM)
2657{
2658 return pVM->fCSAMEnabled;
2659}
2660
2661#ifdef VBOX_WITH_DEBUGGER
2662/**
2663 * The '.csamoff' command.
2664 *
2665 * @returns VBox status.
2666 * @param pCmd Pointer to the command descriptor (as registered).
2667 * @param pCmdHlp Pointer to command helper functions.
2668 * @param pVM Pointer to the current VM (if any).
2669 * @param paArgs Pointer to (readonly) array of arguments.
2670 * @param cArgs Number of arguments in the array.
2671 */
2672static DECLCALLBACK(int) csamr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
2673{
2674 /*
2675 * Validate input.
2676 */
2677 if (!pVM)
2678 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
2679
2680 CSAMDisableScanning(pVM);
2681 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "CSAM Scanning disabled\n");
2682}
2683
2684/**
2685 * The '.csamon' command.
2686 *
2687 * @returns VBox status.
2688 * @param pCmd Pointer to the command descriptor (as registered).
2689 * @param pCmdHlp Pointer to command helper functions.
2690 * @param pVM Pointer to the current VM (if any).
2691 * @param paArgs Pointer to (readonly) array of arguments.
2692 * @param cArgs Number of arguments in the array.
2693 */
2694static DECLCALLBACK(int) csamr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
2695{
2696 /*
2697 * Validate input.
2698 */
2699 if (!pVM)
2700 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
2701
2702 CSAMEnableScanning(pVM);
2703 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "CSAM Scanning enabled\n");
2704}
2705#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette