VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp@ 54761

最後變更 在這個檔案從54761是 54761,由 vboxsync 提交於 10 年 前

PATM: Only fix up constants when loading state.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 68.7 KB
 
1/* $Id: PATMSSM.cpp 54761 2015-03-13 21:35:30Z vboxsync $ */
2/** @file
3 * PATMSSM - Dynamic Guest OS Patching Manager; Save and load state
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2014 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.alldomusa.eu.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/cpumctx-v1_6.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/ssm.h>
29#include <VBox/param.h>
30#include <iprt/avl.h>
31#include "PATMInternal.h"
32#include "PATMPatch.h"
33#include "PATMA.h"
34#include <VBox/vmm/vm.h>
35#include <VBox/vmm/csam.h>
36#include "internal/pgm.h"
37#include <VBox/dbg.h>
38#include <VBox/err.h>
39#include <VBox/log.h>
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43#include <VBox/dis.h>
44#include <VBox/disopcode.h>
45#include <VBox/version.h>
46
47/**
48 * Patch information - SSM version.
49 *
50 * the difference is the missing pTrampolinePatchesHead member
51 * to avoid changing the saved state version for now (will come later).
52 */
53typedef struct PATCHINFOSSM
54{
55 uint32_t uState;
56 uint32_t uOldState;
57 DISCPUMODE uOpMode;
58
59 /* GC pointer of privileged instruction */
60 RCPTRTYPE(uint8_t *) pPrivInstrGC;
61 R3PTRTYPE(uint8_t *) unusedHC; /* todo Can't remove due to structure size dependencies in saved states. */
62 uint8_t aPrivInstr[MAX_INSTR_SIZE];
63 uint32_t cbPrivInstr;
64 uint32_t opcode; //opcode for priv instr (OP_*)
65 uint32_t cbPatchJump; //patch jump size
66
67 /* Only valid for PATMFL_JUMP_CONFLICT patches */
68 RTRCPTR pPatchJumpDestGC;
69
70 RTGCUINTPTR32 pPatchBlockOffset;
71 uint32_t cbPatchBlockSize;
72 uint32_t uCurPatchOffset;
73#if HC_ARCH_BITS == 64
74 uint32_t Alignment0; /**< Align flags correctly. */
75#endif
76
77 uint64_t flags;
78
79 /**
80 * Lowest and highest patched GC instruction address. To optimize searches.
81 */
82 RTRCPTR pInstrGCLowest;
83 RTRCPTR pInstrGCHighest;
84
85 /* Tree of fixup records for the patch. */
86 R3PTRTYPE(PAVLPVNODECORE) FixupTree;
87 uint32_t nrFixups;
88
89 /* Tree of jumps inside the generated patch code. */
90 uint32_t nrJumpRecs;
91 R3PTRTYPE(PAVLPVNODECORE) JumpTree;
92
93 /**
94 * Lookup trees for determining the corresponding guest address of an
95 * instruction in the patch block.
96 */
97 R3PTRTYPE(PAVLU32NODECORE) Patch2GuestAddrTree;
98 R3PTRTYPE(PAVLU32NODECORE) Guest2PatchAddrTree;
99 uint32_t nrPatch2GuestRecs;
100#if HC_ARCH_BITS == 64
101 uint32_t Alignment1;
102#endif
103
104 /* Unused, but can't remove due to structure size dependencies in the saved state. */
105 PATMP2GLOOKUPREC_OBSOLETE unused;
106
107 /* Temporary information during patch creation. Don't waste hypervisor memory for this. */
108 R3PTRTYPE(PPATCHINFOTEMP) pTempInfo;
109
110 /* Count the number of writes to the corresponding guest code. */
111 uint32_t cCodeWrites;
112
113 /* Count the number of invalid writes to pages monitored for the patch. */
114 //some statistics to determine if we should keep this patch activated
115 uint32_t cTraps;
116
117 uint32_t cInvalidWrites;
118
119 // Index into the uPatchRun and uPatchTrap arrays (0..MAX_PATCHES-1)
120 uint32_t uPatchIdx;
121
122 /* First opcode byte, that's overwritten when a patch is marked dirty. */
123 uint8_t bDirtyOpcode;
124 uint8_t Alignment2[7]; /**< Align the structure size on a 8-byte boundary. */
125} PATCHINFOSSM, *PPATCHINFOSSM;
126
127/**
128 * Lookup record for patches - SSM version.
129 */
130typedef struct PATMPATCHRECSSM
131{
132 /** The key is a GC virtual address. */
133 AVLOU32NODECORE Core;
134 /** The key is a patch offset. */
135 AVLOU32NODECORE CoreOffset;
136
137 PATCHINFOSSM patch;
138} PATMPATCHRECSSM, *PPATMPATCHRECSSM;
139
140
141/*******************************************************************************
142* Internal Functions *
143*******************************************************************************/
144static int patmCorrectFixup(PVM pVM, unsigned ulSSMVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec,
145 int32_t offset, RTRCPTR *pFixup);
146
147
148/*******************************************************************************
149* Global Variables *
150*******************************************************************************/
151/**
152 * SSM descriptor table for the PATM structure.
153 */
154static SSMFIELD const g_aPatmFields[] =
155{
156 /** @todo there are a bunch more fields here which can be marked as ignored. */
157 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
158 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
159 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
160 SSMFIELD_ENTRY( PATM, cbPatchMem),
161 SSMFIELD_ENTRY( PATM, offPatchMem),
162 SSMFIELD_ENTRY( PATM, fOutOfMemory),
163 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
164 SSMFIELD_ENTRY( PATM, deltaReloc),
165 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
166 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
167 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
168 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
169 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
170 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
171 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
172 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
173 SSMFIELD_ENTRY( PATM, ulCallDepth),
174 SSMFIELD_ENTRY( PATM, cPageRecords),
175 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
176 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
177 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
178 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
179 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
180 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
181 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
182 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
183 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
184 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
185 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
186 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
187 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
188 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
189 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
190 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
191 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
192 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
193 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
194 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
195 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
196 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
197 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
198 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
199 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
200 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
201 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
202 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
203 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
204 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
205 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
206 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
207 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
208 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
209 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
210 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
211 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
212 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
213 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
214 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
215 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
216 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
217 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
218 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
219 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
220 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
221 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
222 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
223 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
224 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
225 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
226 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
227 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
228 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
229 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
230 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
231 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
232 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
233 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
234 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
235 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
236 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
237 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
238 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
239 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
240 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
241 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
242 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
243 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
244 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
245 SSMFIELD_ENTRY_TERM()
246};
247
248/**
249 * SSM descriptor table for the PATM structure starting with r86139.
250 */
251static SSMFIELD const g_aPatmFields86139[] =
252{
253 /** @todo there are a bunch more fields here which can be marked as ignored. */
254 SSMFIELD_ENTRY_IGNORE( PATM, offVM),
255 SSMFIELD_ENTRY_RCPTR( PATM, pPatchMemGC),
256 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pPatchMemHC),
257 SSMFIELD_ENTRY( PATM, cbPatchMem),
258 SSMFIELD_ENTRY( PATM, offPatchMem),
259 SSMFIELD_ENTRY( PATM, fOutOfMemory),
260 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
261 SSMFIELD_ENTRY( PATM, deltaReloc),
262 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStateHC),
263 SSMFIELD_ENTRY_RCPTR( PATM, pGCStateGC),
264 SSMFIELD_ENTRY_RCPTR( PATM, pGCStackGC),
265 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGCStackHC),
266 SSMFIELD_ENTRY_RCPTR( PATM, pCPUMCtxGC),
267 SSMFIELD_ENTRY_RCPTR( PATM, pStatsGC),
268 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pStatsHC),
269 SSMFIELD_ENTRY( PATM, uCurrentPatchIdx),
270 SSMFIELD_ENTRY( PATM, ulCallDepth),
271 SSMFIELD_ENTRY( PATM, cPageRecords),
272 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCLowest),
273 SSMFIELD_ENTRY_RCPTR( PATM, pPatchedInstrGCHighest),
274 SSMFIELD_ENTRY_RCPTR( PATM, PatchLookupTreeGC),
275 SSMFIELD_ENTRY_IGN_HCPTR( PATM, PatchLookupTreeHC),
276 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperCallGC),
277 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperRetGC),
278 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperJumpGC),
279 SSMFIELD_ENTRY_RCPTR( PATM, pfnHelperIretGC),
280 SSMFIELD_ENTRY_IGN_HCPTR( PATM, pGlobalPatchRec),
281 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterGC),
282 SSMFIELD_ENTRY_RCPTR( PATM, pfnSysEnterPatchGC),
283 SSMFIELD_ENTRY( PATM, uSysEnterPatchIdx),
284 SSMFIELD_ENTRY_RCPTR( PATM, pvFaultMonitor),
285 SSMFIELD_ENTRY_GCPHYS( PATM, mmio.GCPhys),
286 SSMFIELD_ENTRY_RCPTR( PATM, mmio.pCachedData),
287 SSMFIELD_ENTRY_IGN_RCPTR( PATM, mmio.Alignment0),
288 SSMFIELD_ENTRY_IGN_HCPTR( PATM, savedstate.pSSM),
289 SSMFIELD_ENTRY( PATM, savedstate.cPatches),
290 SSMFIELD_ENTRY_PAD_HC64( PATM, savedstate.Alignment0, sizeof(uint32_t)),
291 SSMFIELD_ENTRY_IGN_HCPTR( PATM, hDbgModPatchMem),
292 SSMFIELD_ENTRY_PAD_HC32( PATM, Alignment0, sizeof(uint32_t)),
293 SSMFIELD_ENTRY_IGNORE( PATM, StatNrOpcodeRead),
294 SSMFIELD_ENTRY_IGNORE( PATM, StatDisabled),
295 SSMFIELD_ENTRY_IGNORE( PATM, StatUnusable),
296 SSMFIELD_ENTRY_IGNORE( PATM, StatEnabled),
297 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalled),
298 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledFunctionPatches),
299 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledTrampoline),
300 SSMFIELD_ENTRY_IGNORE( PATM, StatInstalledJump),
301 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3Callable),
302 SSMFIELD_ENTRY_IGNORE( PATM, StatInt3BlockRun),
303 SSMFIELD_ENTRY_IGNORE( PATM, StatOverwritten),
304 SSMFIELD_ENTRY_IGNORE( PATM, StatFixedConflicts),
305 SSMFIELD_ENTRY_IGNORE( PATM, StatFlushed),
306 SSMFIELD_ENTRY_IGNORE( PATM, StatPageBoundaryCrossed),
307 SSMFIELD_ENTRY_IGNORE( PATM, StatMonitored),
308 SSMFIELD_ENTRY_IGNORE( PATM, StatHandleTrap),
309 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBack),
310 SSMFIELD_ENTRY_IGNORE( PATM, StatSwitchBackFail),
311 SSMFIELD_ENTRY_IGNORE( PATM, StatPATMMemoryUsed),
312 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQSuccess),
313 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateREQFailed),
314 SSMFIELD_ENTRY_IGNORE( PATM, StatDuplicateUseExisting),
315 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionFound),
316 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionNotFound),
317 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWrite),
318 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteDetect),
319 SSMFIELD_ENTRY_IGNORE( PATM, StatDirty),
320 SSMFIELD_ENTRY_IGNORE( PATM, StatPushTrap),
321 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpreted),
322 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchWriteInterpretedFailed),
323 SSMFIELD_ENTRY_IGNORE( PATM, StatSysEnter),
324 SSMFIELD_ENTRY_IGNORE( PATM, StatSysExit),
325 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIret),
326 SSMFIELD_ENTRY_IGNORE( PATM, StatEmulIretFailed),
327 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirty),
328 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyGood),
329 SSMFIELD_ENTRY_IGNORE( PATM, StatInstrDirtyBad),
330 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageInserted),
331 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchPageRemoved),
332 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshSuccess),
333 SSMFIELD_ENTRY_IGNORE( PATM, StatPatchRefreshFailed),
334 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRet),
335 SSMFIELD_ENTRY_IGNORE( PATM, StatGenRetReused),
336 SSMFIELD_ENTRY_IGNORE( PATM, StatGenJump),
337 SSMFIELD_ENTRY_IGNORE( PATM, StatGenCall),
338 SSMFIELD_ENTRY_IGNORE( PATM, StatGenPopf),
339 SSMFIELD_ENTRY_IGNORE( PATM, StatCheckPendingIRQ),
340 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupReplace),
341 SSMFIELD_ENTRY_IGNORE( PATM, StatFunctionLookupInsert),
342 SSMFIELD_ENTRY_IGNORE( PATM, StatU32FunctionMaxSlotsUsed),
343 SSMFIELD_ENTRY_IGNORE( PATM, Alignment0),
344 SSMFIELD_ENTRY_TERM()
345};
346
347/**
348 * SSM descriptor table for the PATMGCSTATE structure.
349 */
350static SSMFIELD const g_aPatmGCStateFields[] =
351{
352 SSMFIELD_ENTRY( PATMGCSTATE, uVMFlags),
353 SSMFIELD_ENTRY( PATMGCSTATE, uPendingAction),
354 SSMFIELD_ENTRY( PATMGCSTATE, uPatchCalls),
355 SSMFIELD_ENTRY( PATMGCSTATE, uScratch),
356 SSMFIELD_ENTRY( PATMGCSTATE, uIretEFlags),
357 SSMFIELD_ENTRY( PATMGCSTATE, uIretCS),
358 SSMFIELD_ENTRY( PATMGCSTATE, uIretEIP),
359 SSMFIELD_ENTRY( PATMGCSTATE, Psp),
360 SSMFIELD_ENTRY( PATMGCSTATE, fPIF),
361 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCPtrInhibitInterrupts),
362 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallPatchTargetAddr),
363 SSMFIELD_ENTRY_RCPTR( PATMGCSTATE, GCCallReturnAddr),
364 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEAX),
365 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uECX),
366 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uEDI),
367 SSMFIELD_ENTRY( PATMGCSTATE, Restore.eFlags),
368 SSMFIELD_ENTRY( PATMGCSTATE, Restore.uFlags),
369 SSMFIELD_ENTRY_TERM()
370};
371
372/**
373 * SSM descriptor table for the PATMPATCHREC structure.
374 */
375static SSMFIELD const g_aPatmPatchRecFields[] =
376{
377 SSMFIELD_ENTRY( PATMPATCHRECSSM, Core.Key),
378 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pLeft),
379 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.pRight),
380 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, Core.uchHeight),
381 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
382 SSMFIELD_ENTRY( PATMPATCHRECSSM, CoreOffset.Key),
383 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pLeft),
384 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.pRight),
385 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, CoreOffset.uchHeight),
386 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 3),
387 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uState),
388 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOldState),
389 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uOpMode),
390 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPrivInstrGC),
391 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unusedHC),
392 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.aPrivInstr),
393 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPrivInstr),
394 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.opcode),
395 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchJump),
396 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pPatchJumpDestGC),
397 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.pPatchBlockOffset),
398 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cbPatchBlockSize),
399 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uCurPatchOffset),
400 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment0, sizeof(uint32_t)),
401 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.flags),
402 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCLowest),
403 SSMFIELD_ENTRY_RCPTR( PATMPATCHRECSSM, patch.pInstrGCHighest),
404 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.FixupTree),
405 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrFixups),
406 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrJumpRecs), // should be zero?
407 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.JumpTree),
408 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Patch2GuestAddrTree),
409 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.Guest2PatchAddrTree),
410 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.nrPatch2GuestRecs),
411 SSMFIELD_ENTRY_PAD_HC64( PATMPATCHRECSSM, patch.Alignment1, sizeof(uint32_t)),
412 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocStartHC), // saved as zero
413 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.unused.pPatchLocEndHC), // ditto
414 SSMFIELD_ENTRY_IGN_RCPTR( PATMPATCHRECSSM, patch.unused.pGuestLoc), // ditto
415 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.unused.opsize), // ditto
416 SSMFIELD_ENTRY_IGN_HCPTR( PATMPATCHRECSSM, patch.pTempInfo),
417 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cCodeWrites),
418 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cTraps),
419 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.cInvalidWrites),
420 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.uPatchIdx),
421 SSMFIELD_ENTRY( PATMPATCHRECSSM, patch.bDirtyOpcode),
422 SSMFIELD_ENTRY_IGNORE( PATMPATCHRECSSM, patch.Alignment2),
423 SSMFIELD_ENTRY_TERM()
424};
425
426/**
427 * SSM descriptor table for the RELOCREC structure.
428 */
429static SSMFIELD const g_aPatmRelocRec[] =
430{
431 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, Core.Key), // Used to store the relocation type
432 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pLeft),
433 SSMFIELD_ENTRY_IGN_HCPTR( RELOCREC, Core.pRight),
434 SSMFIELD_ENTRY_IGNORE( RELOCREC, Core.uchHeight),
435 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
436 SSMFIELD_ENTRY( RELOCREC, uType),
437 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
438 SSMFIELD_ENTRY_HCPTR_HACK_U32( RELOCREC, pRelocPos), // converted to a patch member offset.
439 SSMFIELD_ENTRY_RCPTR( RELOCREC, pSource),
440 SSMFIELD_ENTRY_RCPTR( RELOCREC, pDest),
441 SSMFIELD_ENTRY_TERM()
442};
443
444/**
445 * SSM descriptor table for the RECPATCHTOGUEST structure.
446 */
447static SSMFIELD const g_aPatmRecPatchToGuest[] =
448{
449 SSMFIELD_ENTRY( RECPATCHTOGUEST, Core.Key),
450 SSMFIELD_ENTRY_PAD_HC_AUTO( 0, 4),
451 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pLeft),
452 SSMFIELD_ENTRY_IGN_HCPTR( RECPATCHTOGUEST, Core.pRight),
453 SSMFIELD_ENTRY_IGNORE( RECPATCHTOGUEST, Core.uchHeight),
454 SSMFIELD_ENTRY_PAD_HC_AUTO( 3, 7),
455 SSMFIELD_ENTRY_RCPTR( RECPATCHTOGUEST, pOrgInstrGC),
456 SSMFIELD_ENTRY( RECPATCHTOGUEST, enmType),
457 SSMFIELD_ENTRY( RECPATCHTOGUEST, fDirty),
458 SSMFIELD_ENTRY( RECPATCHTOGUEST, fJumpTarget),
459 SSMFIELD_ENTRY( RECPATCHTOGUEST, u8DirtyOpcode),
460 SSMFIELD_ENTRY_PAD_HC_AUTO( 1, 5),
461 SSMFIELD_ENTRY_TERM()
462};
463
464#ifdef VBOX_STRICT
465
466/**
467 * Callback function for RTAvlPVDoWithAll
468 *
469 * Counts the number of patches in the tree
470 *
471 * @returns VBox status code.
472 * @param pNode Current node
473 * @param pcPatches Pointer to patch counter (uint32_t)
474 */
475static DECLCALLBACK(int) patmCountLeafPV(PAVLPVNODECORE pNode, void *pcPatches)
476{
477 NOREF(pNode);
478 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
479 return VINF_SUCCESS;
480}
481
482/**
483 * Callback function for RTAvlU32DoWithAll
484 *
485 * Counts the number of patches in the tree
486 *
487 * @returns VBox status code.
488 * @param pNode Current node
489 * @param pcPatches Pointer to patch counter (uint32_t)
490 */
491static DECLCALLBACK(int) patmCountLeaf(PAVLU32NODECORE pNode, void *pcPatches)
492{
493 NOREF(pNode);
494 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
495 return VINF_SUCCESS;
496}
497
498#endif /* VBOX_STRICT */
499
500/**
501 * Callback function for RTAvloU32DoWithAll
502 *
503 * Counts the number of patches in the tree
504 *
505 * @returns VBox status code.
506 * @param pNode Current node
507 * @param pcPatches Pointer to patch counter
508 */
509static DECLCALLBACK(int) patmCountPatch(PAVLOU32NODECORE pNode, void *pcPatches)
510{
511 NOREF(pNode);
512 *(uint32_t *)pcPatches = *(uint32_t *)pcPatches + 1;
513 return VINF_SUCCESS;
514}
515
516/**
517 * Callback function for RTAvlU32DoWithAll
518 *
519 * Saves all patch to guest lookup records.
520 *
521 * @returns VBox status code.
522 * @param pNode Current node
523 * @param pVM1 Pointer to the VM
524 */
525static DECLCALLBACK(int) patmSaveP2GLookupRecords(PAVLU32NODECORE pNode, void *pVM1)
526{
527 PVM pVM = (PVM)pVM1;
528 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
529 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)pNode;
530
531 /* Save the lookup record. */
532 int rc = SSMR3PutStructEx(pSSM, pPatchToGuestRec, sizeof(RECPATCHTOGUEST), 0 /*fFlags*/, &g_aPatmRecPatchToGuest[0], NULL);
533 AssertRCReturn(rc, rc);
534
535 return VINF_SUCCESS;
536}
537
538/**
539 * Callback function for RTAvlPVDoWithAll
540 *
541 * Saves all patch to guest lookup records.
542 *
543 * @returns VBox status code.
544 * @param pNode Current node
545 * @param pVM1 Pointer to the VM
546 */
547static DECLCALLBACK(int) patmSaveFixupRecords(PAVLPVNODECORE pNode, void *pVM1)
548{
549 PVM pVM = (PVM)pVM1;
550 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
551 RELOCREC rec = *(PRELOCREC)pNode;
552 RTRCPTR *pFixup = (RTRCPTR *)rec.pRelocPos;
553
554 /* Convert pointer to an offset into patch memory. May not be applicable
555 to all fixup types, thus the UINT32_MAX. */
556 Assert(rec.pRelocPos);
557 uintptr_t offRelocPos = (uintptr_t)rec.pRelocPos - (uintptr_t)pVM->patm.s.pPatchMemHC;
558 if (offRelocPos > pVM->patm.s.cbPatchMem)
559 offRelocPos = UINT32_MAX;
560 rec.pRelocPos = (uint8_t *)offRelocPos;
561
562 /* Zero rec.Core.Key since it's unused and may trigger SSM check due to the hack below. */
563 rec.Core.Key = 0;
564
565
566 /* Save the lookup record. */
567 int rc = SSMR3PutStructEx(pSSM, &rec, sizeof(rec), 0 /*fFlags*/, &g_aPatmRelocRec[0], NULL);
568 AssertRCReturn(rc, rc);
569
570 return VINF_SUCCESS;
571}
572
573/**
574 * Converts a saved state patch record to the memory record.
575 *
576 * @returns nothing.
577 * @param pPatch The memory record.
578 * @param pPatchSSM The SSM version of the patch record.
579 */
580static void patmR3PatchConvertSSM2Mem(PPATMPATCHREC pPatch, PPATMPATCHRECSSM pPatchSSM)
581{
582 /*
583 * Only restore the patch part of the tree record; not the internal data (except the key of course)
584 */
585 pPatch->Core.Key = pPatchSSM->Core.Key;
586 pPatch->CoreOffset.Key = pPatchSSM->CoreOffset.Key;
587 pPatch->patch.uState = pPatchSSM->patch.uState;
588 pPatch->patch.uOldState = pPatchSSM->patch.uOldState;
589 pPatch->patch.uOpMode = pPatchSSM->patch.uOpMode;
590 pPatch->patch.pPrivInstrGC = pPatchSSM->patch.pPrivInstrGC;
591 pPatch->patch.unusedHC = pPatchSSM->patch.unusedHC;
592 memcpy(&pPatch->patch.aPrivInstr[0], &pPatchSSM->patch.aPrivInstr[0], MAX_INSTR_SIZE);
593 pPatch->patch.cbPrivInstr = pPatchSSM->patch.cbPrivInstr;
594 pPatch->patch.opcode = pPatchSSM->patch.opcode;
595 pPatch->patch.cbPatchJump = pPatchSSM->patch.cbPatchJump;
596 pPatch->patch.pPatchJumpDestGC = pPatchSSM->patch.pPatchJumpDestGC;
597 pPatch->patch.pPatchBlockOffset = pPatchSSM->patch.pPatchBlockOffset;
598 pPatch->patch.cbPatchBlockSize = pPatchSSM->patch.cbPatchBlockSize;
599 pPatch->patch.uCurPatchOffset = pPatchSSM->patch.uCurPatchOffset;
600 pPatch->patch.flags = pPatchSSM->patch.flags;
601 pPatch->patch.pInstrGCLowest = pPatchSSM->patch.pInstrGCLowest;
602 pPatch->patch.pInstrGCHighest = pPatchSSM->patch.pInstrGCHighest;
603 pPatch->patch.FixupTree = pPatchSSM->patch.FixupTree;
604 pPatch->patch.nrFixups = pPatchSSM->patch.nrFixups;
605 pPatch->patch.nrJumpRecs = pPatchSSM->patch.nrJumpRecs;
606 pPatch->patch.JumpTree = pPatchSSM->patch.JumpTree;
607 pPatch->patch.Patch2GuestAddrTree = pPatchSSM->patch.Patch2GuestAddrTree;
608 pPatch->patch.Guest2PatchAddrTree = pPatchSSM->patch.Guest2PatchAddrTree;
609 pPatch->patch.nrPatch2GuestRecs = pPatchSSM->patch.nrPatch2GuestRecs;
610 pPatch->patch.unused = pPatchSSM->patch.unused;
611 pPatch->patch.pTempInfo = pPatchSSM->patch.pTempInfo;
612 pPatch->patch.cCodeWrites = pPatchSSM->patch.cCodeWrites;
613 pPatch->patch.cTraps = pPatchSSM->patch.cTraps;
614 pPatch->patch.cInvalidWrites = pPatchSSM->patch.cInvalidWrites;
615 pPatch->patch.uPatchIdx = pPatchSSM->patch.uPatchIdx;
616 pPatch->patch.bDirtyOpcode = pPatchSSM->patch.bDirtyOpcode;
617 pPatch->patch.pTrampolinePatchesHead = NULL;
618}
619
620/**
621 * Converts a memory patch record to the saved state version.
622 *
623 * @returns nothing.
624 * @param pPatchSSM The saved state record.
625 * @param pPatch The memory version to save.
626 */
627static void patmR3PatchConvertMem2SSM(PPATMPATCHRECSSM pPatchSSM, PPATMPATCHREC pPatch)
628{
629 pPatchSSM->Core = pPatch->Core;
630 pPatchSSM->CoreOffset = pPatch->CoreOffset;
631 pPatchSSM->patch.uState = pPatch->patch.uState;
632 pPatchSSM->patch.uOldState = pPatch->patch.uOldState;
633 pPatchSSM->patch.uOpMode = pPatch->patch.uOpMode;
634 pPatchSSM->patch.pPrivInstrGC = pPatch->patch.pPrivInstrGC;
635 pPatchSSM->patch.unusedHC = pPatch->patch.unusedHC;
636 memcpy(&pPatchSSM->patch.aPrivInstr[0], &pPatch->patch.aPrivInstr[0], MAX_INSTR_SIZE);
637 pPatchSSM->patch.cbPrivInstr = pPatch->patch.cbPrivInstr;
638 pPatchSSM->patch.opcode = pPatch->patch.opcode;
639 pPatchSSM->patch.cbPatchJump = pPatch->patch.cbPatchJump;
640 pPatchSSM->patch.pPatchJumpDestGC = pPatch->patch.pPatchJumpDestGC;
641 pPatchSSM->patch.pPatchBlockOffset = pPatch->patch.pPatchBlockOffset;
642 pPatchSSM->patch.cbPatchBlockSize = pPatch->patch.cbPatchBlockSize;
643 pPatchSSM->patch.uCurPatchOffset = pPatch->patch.uCurPatchOffset;
644 pPatchSSM->patch.flags = pPatch->patch.flags;
645 pPatchSSM->patch.pInstrGCLowest = pPatch->patch.pInstrGCLowest;
646 pPatchSSM->patch.pInstrGCHighest = pPatch->patch.pInstrGCHighest;
647 pPatchSSM->patch.FixupTree = pPatch->patch.FixupTree;
648 pPatchSSM->patch.nrFixups = pPatch->patch.nrFixups;
649 pPatchSSM->patch.nrJumpRecs = pPatch->patch.nrJumpRecs;
650 pPatchSSM->patch.JumpTree = pPatch->patch.JumpTree;
651 pPatchSSM->patch.Patch2GuestAddrTree = pPatch->patch.Patch2GuestAddrTree;
652 pPatchSSM->patch.Guest2PatchAddrTree = pPatch->patch.Guest2PatchAddrTree;
653 pPatchSSM->patch.nrPatch2GuestRecs = pPatch->patch.nrPatch2GuestRecs;
654 pPatchSSM->patch.unused = pPatch->patch.unused;
655 pPatchSSM->patch.pTempInfo = pPatch->patch.pTempInfo;
656 pPatchSSM->patch.cCodeWrites = pPatch->patch.cCodeWrites;
657 pPatchSSM->patch.cTraps = pPatch->patch.cTraps;
658 pPatchSSM->patch.cInvalidWrites = pPatch->patch.cInvalidWrites;
659 pPatchSSM->patch.uPatchIdx = pPatch->patch.uPatchIdx;
660 pPatchSSM->patch.bDirtyOpcode = pPatch->patch.bDirtyOpcode;
661}
662
663/**
664 * Callback function for RTAvloU32DoWithAll
665 *
666 * Saves the state of the patch that's being enumerated
667 *
668 * @returns VBox status code.
669 * @param pNode Current node
670 * @param pVM1 Pointer to the VM
671 */
672static DECLCALLBACK(int) patmSavePatchState(PAVLOU32NODECORE pNode, void *pVM1)
673{
674 PVM pVM = (PVM)pVM1;
675 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
676 PATMPATCHRECSSM patch;
677 PSSMHANDLE pSSM = pVM->patm.s.savedstate.pSSM;
678 int rc;
679
680 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
681
682 patmR3PatchConvertMem2SSM(&patch, pPatch);
683 Log4(("patmSavePatchState: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
684 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
685 patch.patch.nrFixups, patch.patch.nrJumpRecs));
686
687 /*
688 * Reset HC pointers that need to be recalculated when loading the state
689 */
690 AssertMsg(patch.patch.uState == PATCH_REFUSED || (patch.patch.pPatchBlockOffset || (patch.patch.flags & (PATMFL_SYSENTER_XP|PATMFL_INT3_REPLACEMENT))),
691 ("State = %x pPatchBlockHC=%08x flags=%x\n", patch.patch.uState, PATCHCODE_PTR_HC(&patch.patch), patch.patch.flags));
692 Assert(pPatch->patch.JumpTree == 0);
693 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->DisasmJumpTree == 0);
694 Assert(!pPatch->patch.pTempInfo || pPatch->patch.pTempInfo->IllegalInstrTree == 0);
695
696 /* Save the patch record itself */
697 rc = SSMR3PutStructEx(pSSM, &patch, sizeof(patch), 0 /*fFlags*/, &g_aPatmPatchRecFields[0], NULL);
698 AssertRCReturn(rc, rc);
699
700 /*
701 * Reset HC pointers in fixup records and save them.
702 */
703#ifdef VBOX_STRICT
704 uint32_t nrFixupRecs = 0;
705 RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmCountLeafPV, &nrFixupRecs);
706 AssertMsg(nrFixupRecs == pPatch->patch.nrFixups, ("Fixup inconsistency! counted %d vs %d\n", nrFixupRecs, pPatch->patch.nrFixups));
707#endif
708 rc = RTAvlPVDoWithAll(&pPatch->patch.FixupTree, true, patmSaveFixupRecords, pVM);
709 AssertRCReturn(rc, rc);
710
711#ifdef VBOX_STRICT
712 uint32_t nrLookupRecords = 0;
713 RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmCountLeaf, &nrLookupRecords);
714 Assert(nrLookupRecords == pPatch->patch.nrPatch2GuestRecs);
715#endif
716
717 rc = RTAvlU32DoWithAll(&pPatch->patch.Patch2GuestAddrTree, true, patmSaveP2GLookupRecords, pVM);
718 AssertRCReturn(rc, rc);
719
720 return VINF_SUCCESS;
721}
722
723/**
724 * Execute state save operation.
725 *
726 * @returns VBox status code.
727 * @param pVM Pointer to the VM.
728 * @param pSSM SSM operation handle.
729 */
730DECLCALLBACK(int) patmR3Save(PVM pVM, PSSMHANDLE pSSM)
731{
732 PATM patmInfo = pVM->patm.s;
733 int rc;
734
735 pVM->patm.s.savedstate.pSSM = pSSM;
736
737 /*
738 * Reset HC pointers that need to be recalculated when loading the state
739 */
740 patmInfo.pPatchMemHC = NULL;
741 patmInfo.pGCStateHC = 0;
742 patmInfo.pvFaultMonitor = 0;
743
744 Assert(patmInfo.ulCallDepth == 0);
745
746 /*
747 * Count the number of patches in the tree (feeling lazy)
748 */
749 patmInfo.savedstate.cPatches = 0;
750 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmCountPatch, &patmInfo.savedstate.cPatches);
751
752 /*
753 * Save PATM structure
754 */
755 rc = SSMR3PutStructEx(pSSM, &patmInfo, sizeof(patmInfo), 0 /*fFlags*/, &g_aPatmFields[0], NULL);
756 AssertRCReturn(rc, rc);
757
758 /*
759 * Save patch memory contents
760 */
761 rc = SSMR3PutMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
762 AssertRCReturn(rc, rc);
763
764 /*
765 * Save GC state memory
766 */
767 rc = SSMR3PutStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), 0 /*fFlags*/, &g_aPatmGCStateFields[0], NULL);
768 AssertRCReturn(rc, rc);
769
770 /*
771 * Save PATM stack page
772 */
773 SSMR3PutU32(pSSM, PATM_STACK_TOTAL_SIZE);
774 rc = SSMR3PutMem(pSSM, pVM->patm.s.pGCStackHC, PATM_STACK_TOTAL_SIZE);
775 AssertRCReturn(rc, rc);
776
777 /*
778 * Save all patches
779 */
780 rc = RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, patmSavePatchState, pVM);
781 AssertRCReturn(rc, rc);
782
783 /** @note patch statistics are not saved. */
784
785 return VINF_SUCCESS;
786}
787
788
789/**
790 * Execute state load operation.
791 *
792 * @returns VBox status code.
793 * @param pVM Pointer to the VM.
794 * @param pSSM SSM operation handle.
795 * @param uVersion Data layout version.
796 * @param uPass The data pass.
797 */
798DECLCALLBACK(int) patmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
799{
800 PATM patmInfo;
801 int rc;
802
803 if ( uVersion != PATM_SAVED_STATE_VERSION
804 && uVersion != PATM_SAVED_STATE_VERSION_MEM
805 && uVersion != PATM_SAVED_STATE_VERSION_FIXUP_HACK
806 && uVersion != PATM_SAVED_STATE_VERSION_VER16
807 )
808 {
809 AssertMsgFailed(("patmR3Load: Invalid version uVersion=%d!\n", uVersion));
810 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
811 }
812 uint32_t const fStructRestoreFlags = uVersion <= PATM_SAVED_STATE_VERSION_MEM ? SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED : 0;
813 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
814
815 pVM->patm.s.savedstate.pSSM = pSSM;
816
817 /*
818 * Restore PATM structure
819 */
820 RT_ZERO(patmInfo);
821 if ( uVersion == PATM_SAVED_STATE_VERSION_MEM
822 && SSMR3HandleRevision(pSSM) >= 86139
823 && SSMR3HandleVersion(pSSM) >= VBOX_FULL_VERSION_MAKE(4, 2, 51))
824 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED,
825 &g_aPatmFields86139[0], NULL);
826 else
827 rc = SSMR3GetStructEx(pSSM, &patmInfo, sizeof(patmInfo), fStructRestoreFlags, &g_aPatmFields[0], NULL);
828 AssertRCReturn(rc, rc);
829
830 /* Relative calls are made to the helper functions. Therefor their relative location must not change! */
831 /* Note: we reuse the saved global helpers and assume they are identical, which is kind of dangerous. */
832 AssertLogRelReturn((pVM->patm.s.pfnHelperCallGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperCallGC - patmInfo.pPatchMemGC),
833 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
834 AssertLogRelReturn((pVM->patm.s.pfnHelperRetGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperRetGC - patmInfo.pPatchMemGC),
835 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
836 AssertLogRelReturn((pVM->patm.s.pfnHelperJumpGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperJumpGC - patmInfo.pPatchMemGC),
837 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
838 AssertLogRelReturn((pVM->patm.s.pfnHelperIretGC - pVM->patm.s.pPatchMemGC) == (patmInfo.pfnHelperIretGC - patmInfo.pPatchMemGC),
839 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
840 AssertLogRelReturn(pVM->patm.s.cbPatchMem == patmInfo.cbPatchMem, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
841
842 pVM->patm.s.offPatchMem = patmInfo.offPatchMem;
843 pVM->patm.s.deltaReloc = patmInfo.deltaReloc;
844 pVM->patm.s.uCurrentPatchIdx = patmInfo.uCurrentPatchIdx;
845 pVM->patm.s.fOutOfMemory = patmInfo.fOutOfMemory;
846
847 /* Lowest and highest patched instruction */
848 pVM->patm.s.pPatchedInstrGCLowest = patmInfo.pPatchedInstrGCLowest;
849 pVM->patm.s.pPatchedInstrGCHighest = patmInfo.pPatchedInstrGCHighest;
850
851 /* Sysenter handlers */
852 pVM->patm.s.pfnSysEnterGC = patmInfo.pfnSysEnterGC;
853 pVM->patm.s.pfnSysEnterPatchGC = patmInfo.pfnSysEnterPatchGC;
854 pVM->patm.s.uSysEnterPatchIdx = patmInfo.uSysEnterPatchIdx;
855
856 Assert(patmInfo.ulCallDepth == 0 && pVM->patm.s.ulCallDepth == 0);
857
858 Log(("pPatchMemGC %RRv vs old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
859 Log(("pGCStateGC %RRv vs old %RRv\n", pVM->patm.s.pGCStateGC, patmInfo.pGCStateGC));
860 Log(("pGCStackGC %RRv vs old %RRv\n", pVM->patm.s.pGCStackGC, patmInfo.pGCStackGC));
861 Log(("pCPUMCtxGC %RRv vs old %RRv\n", pVM->patm.s.pCPUMCtxGC, patmInfo.pCPUMCtxGC));
862
863
864 /** @note patch statistics are not restored. */
865
866 /*
867 * Restore patch memory contents
868 */
869 Log(("Restore patch memory: new %RRv old %RRv\n", pVM->patm.s.pPatchMemGC, patmInfo.pPatchMemGC));
870 rc = SSMR3GetMem(pSSM, pVM->patm.s.pPatchMemHC, pVM->patm.s.cbPatchMem);
871 AssertRCReturn(rc, rc);
872
873 /*
874 * Restore GC state memory
875 */
876 RT_BZERO(pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE));
877 rc = SSMR3GetStructEx(pSSM, pVM->patm.s.pGCStateHC, sizeof(PATMGCSTATE), fStructRestoreFlags, &g_aPatmGCStateFields[0], NULL);
878 AssertRCReturn(rc, rc);
879
880 /*
881 * Restore PATM stack page
882 */
883 uint32_t cbStack = PATM_STACK_TOTAL_SIZE;
884 if (uVersion > PATM_SAVED_STATE_VERSION_MEM)
885 {
886 rc = SSMR3GetU32(pSSM, &cbStack);
887 AssertRCReturn(rc, rc);
888 }
889 AssertCompile(!(PATM_STACK_TOTAL_SIZE & 31));
890 AssertLogRelMsgReturn(cbStack > 0 && cbStack <= PATM_STACK_TOTAL_SIZE && !(cbStack & 31),
891 ("cbStack=%#x vs %#x", cbStack, PATM_STACK_TOTAL_SIZE),
892 VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
893 rc = SSMR3GetMem(pSSM, pVM->patm.s.pGCStackHC, cbStack);
894 AssertRCReturn(rc, rc);
895 if (cbStack < PATM_STACK_TOTAL_SIZE)
896 memset((uint8_t *)pVM->patm.s.pGCStackHC + cbStack, 0, PATM_STACK_TOTAL_SIZE - cbStack);
897
898 /*
899 * Load all patches
900 */
901 for (unsigned i = 0; i < patmInfo.savedstate.cPatches; i++)
902 {
903 PATMPATCHRECSSM patch;
904 PATMPATCHREC *pPatchRec;
905
906 RT_ZERO(patch);
907 rc = SSMR3GetStructEx(pSSM, &patch, sizeof(patch), fStructRestoreFlags, &g_aPatmPatchRecFields[0], NULL);
908 AssertRCReturn(rc, rc);
909 Log4(("patmR3Load: cbPatchJump=%u uCurPathOffset=%#x pInstrGCLowest/Higest=%#x/%#x nrFixups=%#x nrJumpRecs=%#x\n",
910 patch.patch.cbPatchJump, patch.patch.uCurPatchOffset, patch.patch.pInstrGCLowest, patch.patch.pInstrGCHighest,
911 patch.patch.nrFixups, patch.patch.nrJumpRecs));
912
913 Assert(!(patch.patch.flags & PATMFL_GLOBAL_FUNCTIONS));
914
915 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
916 if (RT_FAILURE(rc))
917 {
918 AssertMsgFailed(("Out of memory!!!!\n"));
919 return VERR_NO_MEMORY;
920 }
921
922 /* Convert SSM version to memory. */
923 patmR3PatchConvertSSM2Mem(pPatchRec, &patch);
924
925 Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState));
926 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
927 Assert(ret);
928 if (pPatchRec->patch.uState != PATCH_REFUSED)
929 {
930 if (pPatchRec->patch.pPatchBlockOffset)
931 {
932 /* We actually generated code for this patch. */
933 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
934 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));
935 }
936 }
937 /* Set to zero as we don't need it anymore. */
938 pPatchRec->patch.pTempInfo = 0;
939
940 PATMP2GLOOKUPREC cacheRec;
941 RT_ZERO(cacheRec);
942 cacheRec.pPatch = &pPatchRec->patch;
943
944 uint8_t *pPrivInstrHC = patmR3GCVirtToHCVirt(pVM, &cacheRec, pPatchRec->patch.pPrivInstrGC);
945 /* Can fail due to page or page table not present. */
946
947 /*
948 * Restore fixup records and correct HC pointers in fixup records
949 */
950 pPatchRec->patch.FixupTree = 0;
951 pPatchRec->patch.nrFixups = 0; /* increased by patmPatchAddReloc32 */
952 for (unsigned j = 0; j < patch.patch.nrFixups; j++)
953 {
954 RELOCREC rec;
955 int32_t offset;
956 RTRCPTR *pFixup;
957
958 RT_ZERO(rec);
959 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRelocRec[0], NULL);
960 AssertRCReturn(rc, rc);
961
962 if (pPrivInstrHC)
963 {
964 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
965 offset = (int32_t)(intptr_t)rec.pRelocPos;
966 /* Convert to HC pointer again. */
967 if ((uintptr_t)rec.pRelocPos < pVM->patm.s.cbPatchMem)
968 rec.pRelocPos = pVM->patm.s.pPatchMemHC + (uintptr_t)rec.pRelocPos;
969 else
970 rec.pRelocPos = NULL;
971 pFixup = (RTRCPTR *)rec.pRelocPos;
972
973 if (pPatchRec->patch.uState != PATCH_REFUSED)
974 {
975 if ( rec.uType == FIXUP_REL_JMPTOPATCH
976 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE))
977 {
978 Assert(pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32 || pPatchRec->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32);
979 unsigned offset2 = (pPatchRec->patch.cbPatchJump == SIZEOF_NEARJUMP32) ? 1 : 2;
980
981 rec.pRelocPos = pPrivInstrHC + offset2;
982 pFixup = (RTRCPTR *)rec.pRelocPos;
983 }
984
985 rc = patmCorrectFixup(pVM, uVersion, patmInfo, &pPatchRec->patch, &rec, offset, pFixup);
986 AssertRCReturn(rc, rc);
987 }
988
989 rc = patmPatchAddReloc32(pVM, &pPatchRec->patch, rec.pRelocPos, rec.uType, rec.pSource, rec.pDest);
990 AssertRCReturn(rc, rc);
991 }
992 }
993 /* Release previous lock if any. */
994 if (cacheRec.Lock.pvMap)
995 PGMPhysReleasePageMappingLock(pVM, &cacheRec.Lock);
996
997 /* And all patch to guest lookup records */
998 Assert(pPatchRec->patch.nrPatch2GuestRecs || pPatchRec->patch.uState == PATCH_REFUSED || (pPatchRec->patch.flags & (PATMFL_SYSENTER_XP | PATMFL_IDTHANDLER | PATMFL_TRAPHANDLER | PATMFL_INT3_REPLACEMENT)));
999
1000 pPatchRec->patch.Patch2GuestAddrTree = 0;
1001 pPatchRec->patch.Guest2PatchAddrTree = 0;
1002 if (pPatchRec->patch.nrPatch2GuestRecs)
1003 {
1004 RECPATCHTOGUEST rec;
1005 uint32_t nrPatch2GuestRecs = pPatchRec->patch.nrPatch2GuestRecs;
1006
1007 pPatchRec->patch.nrPatch2GuestRecs = 0; /* incremented by patmr3AddP2GLookupRecord */
1008 for (uint32_t j=0;j<nrPatch2GuestRecs;j++)
1009 {
1010 RT_ZERO(rec);
1011 rc = SSMR3GetStructEx(pSSM, &rec, sizeof(rec), fStructRestoreFlags, &g_aPatmRecPatchToGuest[0], NULL);
1012 AssertRCReturn(rc, rc);
1013
1014 patmR3AddP2GLookupRecord(pVM, &pPatchRec->patch, (uintptr_t)rec.Core.Key + pVM->patm.s.pPatchMemHC, rec.pOrgInstrGC, rec.enmType, rec.fDirty);
1015 }
1016 Assert(pPatchRec->patch.Patch2GuestAddrTree);
1017 }
1018
1019 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
1020 {
1021 /* Insert the guest page lookup records (for detection self-modifying code) */
1022 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
1023 AssertRCReturn(rc, rc);
1024 }
1025
1026#if 0 /* can fail def LOG_ENABLED */
1027 if ( pPatchRec->patch.uState != PATCH_REFUSED
1028 && !(pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT))
1029 {
1030 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
1031 Log(("Patch code ----------------------------------------------------------\n"));
1032 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(&pPatchRec->patch), PATCHCODE_PTR_GC(&pPatchRec->patch), patmr3DisasmCallback, &pPatchRec->patch);
1033 Log(("Patch code ends -----------------------------------------------------\n"));
1034 MMR3HeapFree(pPatchRec->patch.pTempInfo);
1035 pPatchRec->patch.pTempInfo = NULL;
1036 }
1037#endif
1038 /* Remove the patch in case the gc mapping is not present. */
1039 if ( !pPrivInstrHC
1040 && pPatchRec->patch.uState == PATCH_ENABLED)
1041 {
1042 Log(("Remove patch %RGv due to failed HC address translation\n", pPatchRec->patch.pPrivInstrGC));
1043 PATMR3RemovePatch(pVM, pPatchRec->patch.pPrivInstrGC);
1044 }
1045 }
1046
1047 /*
1048 * Correct absolute fixups in the global patch. (helper functions)
1049 * Bit of a mess. Uses the new patch record, but restored patch functions.
1050 */
1051 PRELOCREC pRec = 0;
1052 AVLPVKEY key = 0;
1053
1054 Log(("Correct fixups in global helper functions\n"));
1055 while (true)
1056 {
1057 int32_t offset;
1058 RTRCPTR *pFixup;
1059
1060 /* Get the record that's closest from above */
1061 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pVM->patm.s.pGlobalPatchRec->patch.FixupTree, key, true);
1062 if (pRec == 0)
1063 break;
1064
1065 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
1066
1067 /* rec.pRelocPos now contains the relative position inside the hypervisor area. */
1068 offset = (int32_t)(pRec->pRelocPos - pVM->patm.s.pPatchMemHC);
1069 pFixup = (RTRCPTR *)pRec->pRelocPos;
1070
1071 /* Correct fixups that refer to PATM structures in the hypervisor region (their addresses might have changed). */
1072 rc = patmCorrectFixup(pVM, uVersion, patmInfo, &pVM->patm.s.pGlobalPatchRec->patch, pRec, offset, pFixup);
1073 AssertRCReturn(rc, rc);
1074 }
1075
1076#ifdef VBOX_WITH_STATISTICS
1077 /*
1078 * Restore relevant old statistics
1079 */
1080 pVM->patm.s.StatDisabled = patmInfo.StatDisabled;
1081 pVM->patm.s.StatUnusable = patmInfo.StatUnusable;
1082 pVM->patm.s.StatEnabled = patmInfo.StatEnabled;
1083 pVM->patm.s.StatInstalled = patmInfo.StatInstalled;
1084#endif
1085
1086 return VINF_SUCCESS;
1087}
1088
1089/**
1090 * Correct fixups to predefined hypervisor PATM regions. (their addresses might have changed)
1091 *
1092 * @returns VBox status code.
1093 * @param pVM Pointer to the VM.
1094 * @param uVersion Saved state version.
1095 * @param patmInfo Saved PATM structure
1096 * @param pPatch Patch record
1097 * @param pRec Relocation record
1098 * @param offset Offset of referenced data/code
1099 * @param pFixup Fixup address
1100 */
1101static int patmCorrectFixup(PVM pVM, unsigned uVersion, PATM &patmInfo, PPATCHINFO pPatch, PRELOCREC pRec,
1102 int32_t offset, RTRCPTR *pFixup)
1103{
1104 int32_t delta = pVM->patm.s.pPatchMemGC - patmInfo.pPatchMemGC;
1105
1106 switch (pRec->uType)
1107 {
1108 case FIXUP_ABSOLUTE:
1109 case FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL:
1110 {
1111 Assert( pRec->uType != PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1112 || (pRec->pSource == pRec->pDest && PATM_IS_FIXUP_TYPE(pRec->pSource)) );
1113
1114 /* bird: What is this for exactly? Only the MMIO fixups used to have pSource set. */
1115 if ( pRec->pSource
1116 && !PATMIsPatchGCAddr(pVM, (RTRCUINTPTR)pRec->pSource)
1117 && pRec->uType != FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
1118 break;
1119
1120 RTRCPTR const uFixup = *pFixup;
1121 if ( uFixup >= patmInfo.pGCStateGC
1122 && uFixup < patmInfo.pGCStateGC + sizeof(PATMGCSTATE))
1123 {
1124 LogFlow(("Changing absolute GCState at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC));
1125 *pFixup = (uFixup - patmInfo.pGCStateGC) + pVM->patm.s.pGCStateGC;
1126 }
1127 else if ( uFixup >= patmInfo.pCPUMCtxGC
1128 && uFixup < patmInfo.pCPUMCtxGC + sizeof(CPUMCTX))
1129 {
1130 LogFlow(("Changing absolute CPUMCTX at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC));
1131
1132 /* The CPUMCTX structure has completely changed, so correct the offsets too. */
1133 if (uVersion == PATM_SAVED_STATE_VERSION_VER16)
1134 {
1135 unsigned offCpumCtx = uFixup - patmInfo.pCPUMCtxGC;
1136
1137 /* ''case RT_OFFSETOF()'' does not work as gcc refuses to use & as a constant expression.
1138 * Defining RT_OFFSETOF as __builtin_offsetof for gcc would make this possible. But this
1139 * function is not available in older gcc versions, at least not in gcc-3.3 */
1140 if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr0))
1141 {
1142 LogFlow(("Changing dr[0] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[0])));
1143 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[0]);
1144 }
1145 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr1))
1146 {
1147 LogFlow(("Changing dr[1] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[1])));
1148 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[1]);
1149 }
1150 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr2))
1151 {
1152 LogFlow(("Changing dr[2] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[2])));
1153 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[2]);
1154 }
1155 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr3))
1156 {
1157 LogFlow(("Changing dr[3] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[3])));
1158 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[3]);
1159 }
1160 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr4))
1161 {
1162 LogFlow(("Changing dr[4] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[4])));
1163 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[4]);
1164 }
1165 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr5))
1166 {
1167 LogFlow(("Changing dr[5] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[5])));
1168 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[5]);
1169 }
1170 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr6))
1171 {
1172 LogFlow(("Changing dr[6] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[6])));
1173 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[6]);
1174 }
1175 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, dr7))
1176 {
1177 LogFlow(("Changing dr[7] offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, dr[7])));
1178 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, dr[7]);
1179 }
1180 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr0))
1181 {
1182 LogFlow(("Changing cr0 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr0)));
1183 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr0);
1184 }
1185 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr2))
1186 {
1187 LogFlow(("Changing cr2 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr2)));
1188 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr2);
1189 }
1190 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr3))
1191 {
1192 LogFlow(("Changing cr3 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr3)));
1193 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr3);
1194 }
1195 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, cr4))
1196 {
1197 LogFlow(("Changing cr4 offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, cr4)));
1198 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, cr4);
1199 }
1200 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, tr))
1201 {
1202 LogFlow(("Changing tr offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, tr)));
1203 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1204 }
1205 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, ldtr))
1206 {
1207 LogFlow(("Changing ldtr offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, ldtr)));
1208 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1209 }
1210 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.pGdt))
1211 {
1212 LogFlow(("Changing pGdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, gdtr.pGdt)));
1213 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1214 }
1215 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, gdtr.cbGdt))
1216 {
1217 LogFlow(("Changing cbGdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, gdtr.cbGdt)));
1218 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1219 }
1220 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.pIdt))
1221 {
1222 LogFlow(("Changing pIdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, idtr.pIdt)));
1223 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1224 }
1225 else if (offCpumCtx == (unsigned)RT_OFFSETOF(CPUMCTX_VER1_6, idtr.cbIdt))
1226 {
1227 LogFlow(("Changing cbIdt offset from %x to %x\n", offCpumCtx, RT_OFFSETOF(CPUMCTX, idtr.cbIdt)));
1228 *pFixup = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1229 }
1230 else
1231 AssertMsgFailed(("Unexpected CPUMCTX offset %x\n", offCpumCtx));
1232 }
1233 else
1234 *pFixup = (uFixup - patmInfo.pCPUMCtxGC) + pVM->patm.s.pCPUMCtxGC;
1235 }
1236 else if ( uFixup >= patmInfo.pStatsGC
1237 && uFixup < patmInfo.pStatsGC + PATM_STAT_MEMSIZE)
1238 {
1239 LogFlow(("Changing absolute Stats at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC));
1240 *pFixup = (uFixup - patmInfo.pStatsGC) + pVM->patm.s.pStatsGC;
1241 }
1242 else if ( uFixup >= patmInfo.pGCStackGC
1243 && uFixup < patmInfo.pGCStackGC + PATM_STACK_TOTAL_SIZE)
1244 {
1245 LogFlow(("Changing absolute Stack at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC));
1246 *pFixup = (uFixup - patmInfo.pGCStackGC) + pVM->patm.s.pGCStackGC;
1247 }
1248 else if ( uFixup >= patmInfo.pPatchMemGC
1249 && uFixup < patmInfo.pPatchMemGC + patmInfo.cbPatchMem)
1250 {
1251 LogFlow(("Changing absolute PatchMem at %RRv from %RRv to %RRv\n", patmInfo.pPatchMemGC + offset, uFixup, (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC));
1252 *pFixup = (uFixup - patmInfo.pPatchMemGC) + pVM->patm.s.pPatchMemGC;
1253 }
1254 /*
1255 * For PATM_SAVED_STATE_VERSION_FIXUP_HACK and earlier boldly ASSUME:
1256 * 1. That pCPUMCtxGC is in the VM structure and that its location is
1257 * at the first page of the same 4 MB chunk.
1258 * 2. That the forced actions were in the first 32 bytes of the VM
1259 * structure.
1260 * 3. That the CPUM leaves are less than 8KB into the structure.
1261 */
1262 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
1263 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(32))
1264 {
1265 LogFlow(("Changing fLocalForcedActions fixup from %RRv to %RRv\n", uFixup, pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions)));
1266 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1267 pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS;
1268 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1269 }
1270 else if ( uVersion <= PATM_SAVED_STATE_VERSION_FIXUP_HACK
1271 && uFixup - (patmInfo.pCPUMCtxGC & UINT32_C(0xffc00000)) < UINT32_C(8192))
1272 {
1273 static int cCpuidFixup = 0;
1274
1275 /* Very dirty assumptions about the cpuid patch and cpuid ordering. */
1276 switch (cCpuidFixup & 3)
1277 {
1278 case 0:
1279 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1280 pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR;
1281 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1282 break;
1283 case 1:
1284 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1285 pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR;
1286 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1287 break;
1288 case 2:
1289 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1290 pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR;
1291 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1292 break;
1293 case 3:
1294 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1295 pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR;
1296 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1297 break;
1298 }
1299 LogFlow(("Changing cpuid fixup %d from %RRv to %RRv\n", cCpuidFixup, uFixup, *pFixup));
1300 cCpuidFixup++;
1301 }
1302 /*
1303 * For PATM_SAVED_STATE_VERSION_MEM thru PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1304 * we abused Core.Key to store the type for fixups needing correcting on load.
1305 */
1306 else if ( uVersion >= PATM_SAVED_STATE_VERSION_MEM
1307 && uVersion <= PATM_SAVED_STATE_VERSION_NO_RAW_MEM)
1308 {
1309 /* Core.Key abused to store the type of fixup. */
1310 switch ((uintptr_t)pRec->Core.Key)
1311 {
1312 case PATM_FIXUP_CPU_FF_ACTION:
1313 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1314 pRec->pSource = pRec->pDest = PATM_VM_FORCEDACTIONS;
1315 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1316 LogFlow(("Changing cpu ff action fixup from %x to %x\n", uFixup, *pFixup));
1317 break;
1318 case PATM_FIXUP_CPUID_DEFAULT:
1319 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1320 pRec->pSource = pRec->pDest = PATM_CPUID_DEF_PTR;
1321 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1322 LogFlow(("Changing cpuid def fixup from %x to %x\n", uFixup, *pFixup));
1323 break;
1324 case PATM_FIXUP_CPUID_STANDARD:
1325 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1326 pRec->pSource = pRec->pDest = PATM_CPUID_STD_PTR;
1327 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1328 LogFlow(("Changing cpuid std fixup from %x to %x\n", uFixup, *pFixup));
1329 break;
1330 case PATM_FIXUP_CPUID_EXTENDED:
1331 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1332 pRec->pSource = pRec->pDest = PATM_CPUID_EXT_PTR;
1333 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1334 LogFlow(("Changing cpuid ext fixup from %x to %x\n", uFixup, *pFixup));
1335 break;
1336 case PATM_FIXUP_CPUID_CENTAUR:
1337 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1338 pRec->pSource = pRec->pDest = PATM_CPUID_CENTAUR_PTR;
1339 pRec->uType = FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL;
1340 LogFlow(("Changing cpuid centaur fixup from %x to %x\n", uFixup, *pFixup));
1341 break;
1342 default:
1343 AssertMsgFailed(("Unexpected fixup value %p\n", (uintptr_t)pRec->Core.Key));
1344 break;
1345 }
1346 }
1347 /*
1348 * After PATM_SAVED_STATE_VERSION_NO_RAW_MEM we changed the fixup type
1349 * and instead put the patch fixup code in the source and target addresses.
1350 */
1351 else if ( uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM
1352 && pRec->uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL)
1353 {
1354 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_FIXUP_TYPE(pRec->pSource));
1355 switch (pRec->pSource)
1356 {
1357 case PATM_VM_FORCEDACTIONS:
1358 *pFixup = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
1359 break;
1360 case PATM_CPUID_DEF_PTR:
1361 *pFixup = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
1362 break;
1363 case PATM_CPUID_ARRAY_PTR:
1364 *pFixup = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);
1365 break;
1366 case PATM_CPUID_ARRAY_END_PTR:
1367 *pFixup = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);
1368 break;
1369 case PATM_CPUID_STD_PTR: /* Saved again patches only. */
1370 *pFixup = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
1371 break;
1372 case PATM_CPUID_EXT_PTR: /* Saved again patches only. */
1373 *pFixup = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
1374 break;
1375 case PATM_CPUID_CENTAUR_PTR: /* Saved again patches only. */
1376 *pFixup = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
1377 break;
1378 }
1379 }
1380 /*
1381 * Constant that may change between VM version needs fixing up.
1382 */
1383 else if (pRec->uType == FIXUP_CONSTANT_IN_PATCH_ASM_TMPL)
1384 {
1385 AssertLogRelReturn(uVersion > PATM_SAVED_STATE_VERSION_NO_RAW_MEM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
1386 Assert(pRec->pSource == pRec->pDest); Assert(PATM_IS_FIXUP_TYPE(pRec->pSource));
1387 switch (pRec->pSource)
1388 {
1389 case PATM_CPUID_ARRAY_ENTRY_SIZE:
1390 *pFixup = sizeof(CPUMCPUIDLEAF);
1391 break;
1392 case PATM_CPUID_UNKNOWN_METHOD:
1393 *pFixup = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM);
1394 break;
1395 default:
1396 AssertLogRelMsgFailed(("Unknown FIXUP_CONSTANT_IN_PATCH_ASM_TMPL fixup: %#x\n", pRec->pSource));
1397 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1398 }
1399 }
1400
1401#ifdef RT_OS_WINDOWS
1402 AssertCompile(RT_OFFSETOF(VM, fGlobalForcedActions) < 32);
1403#endif
1404 break;
1405 }
1406
1407 case FIXUP_REL_JMPTOPATCH:
1408 {
1409 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
1410
1411 if ( pPatch->uState == PATCH_ENABLED
1412 && (pPatch->flags & PATMFL_PATCHED_GUEST_CODE))
1413 {
1414 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
1415 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
1416 RTRCPTR pJumpOffGC;
1417 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
1418 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
1419
1420 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1421
1422 Assert(pRec->pSource - pPatch->cbPatchJump == pPatch->pPrivInstrGC);
1423#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
1424 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
1425 {
1426 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
1427
1428 pJumpOffGC = pPatch->pPrivInstrGC + 2; //two byte opcode
1429 oldJump[0] = pPatch->aPrivInstr[0];
1430 oldJump[1] = pPatch->aPrivInstr[1];
1431 *(RTRCUINTPTR *)&oldJump[2] = displOld;
1432 }
1433 else
1434#endif
1435 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
1436 {
1437 pJumpOffGC = pPatch->pPrivInstrGC + 1; //one byte opcode
1438 oldJump[0] = 0xE9;
1439 *(RTRCUINTPTR *)&oldJump[1] = displOld;
1440 }
1441 else
1442 {
1443 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->cbPatchJump));
1444 break;
1445 }
1446 Assert(pPatch->cbPatchJump <= sizeof(temp));
1447
1448 /*
1449 * Read old patch jump and compare it to the one we previously installed
1450 */
1451 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
1452 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
1453
1454 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
1455 {
1456 RTRCPTR pPage = pPatch->pPrivInstrGC & PAGE_BASE_GC_MASK;
1457
1458 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
1459 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
1460 }
1461 else
1462 if (memcmp(temp, oldJump, pPatch->cbPatchJump))
1463 {
1464 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
1465 /*
1466 * Disable patch; this is not a good solution
1467 */
1468 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
1469 pPatch->uState = PATCH_DISABLED;
1470 }
1471 else
1472 if (RT_SUCCESS(rc))
1473 {
1474 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
1475 AssertRC(rc);
1476 }
1477 else
1478 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
1479 }
1480 else
1481 Log(("Skip the guest jump to patch code for this disabled patch %08X\n", pRec->pRelocPos));
1482
1483 pRec->pDest = pTarget;
1484 break;
1485 }
1486
1487 case FIXUP_REL_JMPTOGUEST:
1488 {
1489 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
1490 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
1491
1492 Assert(!(pPatch->flags & PATMFL_GLOBAL_FUNCTIONS));
1493 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
1494 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
1495 pRec->pSource = pSource;
1496 break;
1497
1498 }
1499}
1500 return VINF_SUCCESS;
1501}
1502
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette