VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp@ 54746

最後變更 在這個檔案從54746是 54746,由 vboxsync 提交於 10 年 前

Changed PATCHGEN_PROLOG to take the expected output size as input instead of making an educated guess at max 256 bytes. Adjusting patch stats a little.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 56.7 KB
 
1/* $Id: PATMPatch.cpp 54746 2015-03-13 15:53:54Z vboxsync $ */
2/** @file
3 * PATMPatch - Dynamic Guest OS Instruction patches
4 *
5 * NOTE: CSAM assumes patch memory is never reused!!
6 */
7
8/*
9 * Copyright (C) 2006-2015 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.alldomusa.eu.org. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20/*******************************************************************************
21* Header Files *
22*******************************************************************************/
23#define LOG_GROUP LOG_GROUP_PATM
24#include <VBox/vmm/patm.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/trpm.h>
30#include <VBox/vmm/csam.h>
31#include "PATMInternal.h"
32#include <VBox/vmm/vm.h>
33#include <VBox/param.h>
34
35#include <VBox/err.h>
36#include <VBox/log.h>
37#include <VBox/dis.h>
38#include <VBox/disopcode.h>
39
40#include <iprt/assert.h>
41#include <iprt/asm.h>
42#include <iprt/string.h>
43
44#include "PATMA.h"
45#include "PATMPatch.h"
46
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/**
52 * Internal structure for passing more information about call fixups to
53 * patmPatchGenCode.
54 */
55typedef struct
56{
57 RTRCPTR pTargetGC;
58 RTRCPTR pCurInstrGC;
59 RTRCPTR pNextInstrGC;
60 RTRCPTR pReturnGC;
61} PATMCALLINFO, *PPATMCALLINFO;
62
63
64/*******************************************************************************
65* Defined Constants And Macros *
66*******************************************************************************/
67/** Value to use when not sure about the patch size. */
68#define PATCHGEN_DEF_SIZE 256
69
70#define PATCHGEN_PROLOG_NODEF(pVM, pPatch, a_cbMaxEmit) \
71 do { \
72 cbGivenPatchSize = (a_cbMaxEmit) + 16U /*jmp++*/; \
73 if (RT_LIKELY((pPatch)->pPatchBlockOffset + pPatch->uCurPatchOffset + cbGivenPatchSize < pVM->patm.s.cbPatchMem)) \
74 pPB = PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset; \
75 else \
76 { \
77 pVM->patm.s.fOutOfMemory = true; \
78 AssertMsgFailed(("offPatch=%#x + offEmit=%#x + a_cbMaxEmit=%#x + jmp --> cbTotalWithFudge=%#x >= cbPatchMem=%#x", \
79 (pPatch)->pPatchBlockOffset, pPatch->uCurPatchOffset, a_cbMaxEmit, \
80 (pPatch)->pPatchBlockOffset + pPatch->uCurPatchOffset + cbGivenPatchSize, pVM->patm.s.cbPatchMem)); \
81 return VERR_NO_MEMORY; \
82 } \
83 } while (0)
84
85#define PATCHGEN_PROLOG(pVM, pPatch, a_cbMaxEmit) \
86 uint8_t *pPB; \
87 uint32_t cbGivenPatchSize; \
88 PATCHGEN_PROLOG_NODEF(pVM, pPatch, a_cbMaxEmit)
89
90#define PATCHGEN_EPILOG(pPatch, a_cbActual) \
91 do { \
92 AssertMsg((a_cbActual) <= cbGivenPatchSize, ("a_cbActual=%#x cbGivenPatchSize=%#x\n", a_cbActual, cbGivenPatchSize)); \
93 Assert((a_cbActual) <= 640); \
94 pPatch->uCurPatchOffset += (a_cbActual); \
95 } while (0)
96
97
98
99
100int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType,
101 RTRCPTR pSource /*= 0*/, RTRCPTR pDest /*= 0*/)
102{
103 PRELOCREC pRec;
104
105 Assert( uType == FIXUP_ABSOLUTE
106 || (uType == FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL && pSource == pDest && PATM_IS_FIXUP_TYPE(pSource))
107 || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
108
109 LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
110
111 pRec = (PRELOCREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
112 Assert(pRec);
113 pRec->Core.Key = (AVLPVKEY)pRelocHC;
114 pRec->pRelocPos = pRelocHC; /* @todo redundant. */
115 pRec->pSource = pSource;
116 pRec->pDest = pDest;
117 pRec->uType = uType;
118
119 bool ret = RTAvlPVInsert(&pPatch->FixupTree, &pRec->Core);
120 Assert(ret); NOREF(ret);
121 pPatch->nrFixups++;
122
123 return VINF_SUCCESS;
124}
125
126int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
127{
128 PJUMPREC pRec;
129
130 pRec = (PJUMPREC)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
131 Assert(pRec);
132
133 pRec->Core.Key = (AVLPVKEY)pJumpHC;
134 pRec->pJumpHC = pJumpHC; /* @todo redundant. */
135 pRec->offDispl = offset;
136 pRec->pTargetGC = pTargetGC;
137 pRec->opcode = opcode;
138
139 bool ret = RTAvlPVInsert(&pPatch->JumpTree, &pRec->Core);
140 Assert(ret); NOREF(ret);
141 pPatch->nrJumpRecs++;
142
143 return VINF_SUCCESS;
144}
145
146static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PCPATCHASMRECORD pAsmRecord,
147 RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
148 PPATMCALLINFO pCallInfo = 0)
149{
150 Assert(fGenJump == false || pReturnAddrGC);
151 Assert(fGenJump == false || pAsmRecord->offJump);
152 Assert(pAsmRecord);
153 Assert(pAsmRecord->cbFunction > sizeof(pAsmRecord->aRelocs[0].uType) * pAsmRecord->cRelocs);
154
155 // Copy the code block
156 memcpy(pPB, pAsmRecord->pbFunction, pAsmRecord->cbFunction);
157
158 // Process all fixups
159 uint32_t i, j;
160 for (j = 0, i = 0; i < pAsmRecord->cRelocs; i++)
161 {
162 for (; j < pAsmRecord->cbFunction; j++)
163 {
164 if (*(uint32_t*)&pPB[j] == pAsmRecord->aRelocs[i].uType)
165 {
166 RCPTRTYPE(uint32_t *) dest;
167
168#ifdef VBOX_STRICT
169 if (pAsmRecord->aRelocs[i].uType == PATM_FIXUP)
170 Assert(pAsmRecord->aRelocs[i].uInfo != 0);
171 else
172 Assert(pAsmRecord->aRelocs[i].uInfo == 0);
173#endif
174
175 /*
176 * BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING
177 * A SAVED STATE WITH A DIFFERENT HYPERVISOR LAYOUT.
178 */
179 switch (pAsmRecord->aRelocs[i].uType)
180 {
181 /*
182 * PATMGCSTATE member fixups.
183 */
184 case PATM_VMFLAGS:
185 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uVMFlags);
186 break;
187 case PATM_PENDINGACTION:
188 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPendingAction);
189 break;
190 case PATM_STACKPTR:
191 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Psp);
192 break;
193 case PATM_INTERRUPTFLAG:
194 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, fPIF);
195 break;
196 case PATM_INHIBITIRQADDR:
197 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCPtrInhibitInterrupts);
198 break;
199 case PATM_TEMP_EAX:
200 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEAX);
201 break;
202 case PATM_TEMP_ECX:
203 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uECX);
204 break;
205 case PATM_TEMP_EDI:
206 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uEDI);
207 break;
208 case PATM_TEMP_EFLAGS:
209 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.eFlags);
210 break;
211 case PATM_TEMP_RESTORE_FLAGS:
212 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, Restore.uFlags);
213 break;
214 case PATM_CALL_PATCH_TARGET_ADDR:
215 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallPatchTargetAddr);
216 break;
217 case PATM_CALL_RETURN_ADDR:
218 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, GCCallReturnAddr);
219 break;
220#ifdef VBOX_WITH_STATISTICS
221 case PATM_ALLPATCHCALLS:
222 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uPatchCalls);
223 break;
224 case PATM_IRETEFLAGS:
225 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEFlags);
226 break;
227 case PATM_IRETCS:
228 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretCS);
229 break;
230 case PATM_IRETEIP:
231 dest = pVM->patm.s.pGCStateGC + RT_OFFSETOF(PATMGCSTATE, uIretEIP);
232 break;
233#endif
234
235
236 case PATM_FIXUP:
237 /* Offset in aRelocs[i].uInfo is from the base of the function. */
238 dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->aRelocs[i].uInfo
239 + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
240 break;
241
242#ifdef VBOX_WITH_STATISTICS
243 case PATM_PERPATCHCALLS:
244 dest = patmPatchQueryStatAddress(pVM, pPatch);
245 break;
246#endif
247
248 /* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
249 * part to store the original return addresses.
250 */
251 case PATM_STACKBASE:
252 dest = pVM->patm.s.pGCStackGC;
253 break;
254
255 case PATM_STACKBASE_GUEST:
256 dest = pVM->patm.s.pGCStackGC + PATM_STACK_SIZE;
257 break;
258
259 case PATM_RETURNADDR: /* absolute guest address; no fixup required */
260 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
261 dest = pCallInfo->pReturnGC;
262 break;
263
264 case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
265 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
266
267 /** @note hardcoded assumption that we must return to the instruction following this block */
268 dest = (uintptr_t)pPB - (uintptr_t)pVM->patm.s.pPatchMemHC + pAsmRecord->cbFunction;
269 break;
270
271 case PATM_CALLTARGET: /* relative to patch address; no fixup required */
272 Assert(pCallInfo && pAsmRecord->aRelocs[i].uType >= PATM_NO_FIXUP);
273
274 /* Address must be filled in later. (see patmr3SetBranchTargets) */
275 patmPatchAddJump(pVM, pPatch, &pPB[j-1], 1, pCallInfo->pTargetGC, OP_CALL);
276 dest = PATM_ILLEGAL_DESTINATION;
277 break;
278
279 case PATM_PATCHBASE: /* Patch GC base address */
280 dest = pVM->patm.s.pPatchMemGC;
281 break;
282
283 case PATM_NEXTINSTRADDR:
284 Assert(pCallInfo);
285 /* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
286 dest = pCallInfo->pNextInstrGC;
287 break;
288
289 case PATM_CURINSTRADDR:
290 Assert(pCallInfo);
291 dest = pCallInfo->pCurInstrGC;
292 break;
293
294 /* Relative address of global patm lookup and call function. */
295 case PATM_LOOKUP_AND_CALL_FUNCTION:
296 {
297 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
298 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
299 Assert(pVM->patm.s.pfnHelperCallGC);
300 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
301
302 /* Relative value is target minus address of instruction after the actual call instruction. */
303 dest = pVM->patm.s.pfnHelperCallGC - pInstrAfterCall;
304 break;
305 }
306
307 case PATM_RETURN_FUNCTION:
308 {
309 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
310 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
311 Assert(pVM->patm.s.pfnHelperRetGC);
312 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
313
314 /* Relative value is target minus address of instruction after the actual call instruction. */
315 dest = pVM->patm.s.pfnHelperRetGC - pInstrAfterCall;
316 break;
317 }
318
319 case PATM_IRET_FUNCTION:
320 {
321 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
322 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
323 Assert(pVM->patm.s.pfnHelperIretGC);
324 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
325
326 /* Relative value is target minus address of instruction after the actual call instruction. */
327 dest = pVM->patm.s.pfnHelperIretGC - pInstrAfterCall;
328 break;
329 }
330
331 case PATM_LOOKUP_AND_JUMP_FUNCTION:
332 {
333 RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC
334 + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
335 Assert(pVM->patm.s.pfnHelperJumpGC);
336 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
337
338 /* Relative value is target minus address of instruction after the actual call instruction. */
339 dest = pVM->patm.s.pfnHelperJumpGC - pInstrAfterCall;
340 break;
341 }
342
343 case PATM_CPUID_STD_MAX: /* saved state only */
344 dest = CPUMR3GetGuestCpuIdPatmStdMax(pVM);
345 break;
346 case PATM_CPUID_EXT_MAX: /* saved state only */
347 dest = CPUMR3GetGuestCpuIdPatmExtMax(pVM);
348 break;
349 case PATM_CPUID_CENTAUR_MAX: /* saved state only */
350 dest = CPUMR3GetGuestCpuIdPatmCentaurMax(pVM);
351 break;
352
353 /*
354 * The following fixups needs to be recalculated when loading saved state
355 * Note! Earlier saved state versions had different hacks for detecting these.
356 */
357 case PATM_VM_FORCEDACTIONS:
358 dest = pVM->pVMRC + RT_OFFSETOF(VM, aCpus[0].fLocalForcedActions);
359 break;
360 case PATM_CPUID_DEF_PTR:
361 dest = CPUMR3GetGuestCpuIdPatmDefRCPtr(pVM);
362 break;
363 case PATM_CPUID_ARRAY_PTR:
364 dest = CPUMR3GetGuestCpuIdPatmArrayRCPtr(pVM);
365 break;
366 case PATM_CPUID_ARRAY_END_PTR:
367 dest = CPUMR3GetGuestCpuIdPatmArrayEndRCPtr(pVM);
368 break;
369 case PATM_CPUID_ARRAY_ENTRY_SIZE:
370 dest = sizeof(CPUMCPUIDLEAF);
371 break;
372 case PATM_CPUID_UNKNOWN_METHOD:
373 dest = CPUMR3GetGuestCpuIdPatmUnknownLeafMethod(pVM);
374 break;
375
376 case PATM_CPUID_STD_PTR: /* saved state only */
377 dest = CPUMR3GetGuestCpuIdPatmStdRCPtr(pVM);
378 break;
379 case PATM_CPUID_EXT_PTR: /* saved state only */
380 dest = CPUMR3GetGuestCpuIdPatmExtRCPtr(pVM);
381 break;
382 case PATM_CPUID_CENTAUR_PTR: /* saved state only */
383 dest = CPUMR3GetGuestCpuIdPatmCentaurRCPtr(pVM);
384 break;
385
386 default:
387 dest = PATM_ILLEGAL_DESTINATION;
388 AssertReleaseFailed();
389 break;
390 }
391
392 *(RTRCPTR *)&pPB[j] = dest;
393 if (pAsmRecord->aRelocs[i].uType < PATM_NO_FIXUP)
394 {
395 patmPatchAddReloc32(pVM, pPatch, &pPB[j], FIXUP_ABSOLUTE_IN_PATCH_ASM_TMPL,
396 pAsmRecord->aRelocs[i].uType /*pSources*/, pAsmRecord->aRelocs[i].uType /*pDest*/);
397 }
398 break;
399 }
400 }
401 Assert(j < pAsmRecord->cbFunction);
402 }
403 Assert(pAsmRecord->aRelocs[i].uInfo == 0xffffffff);
404
405 /* Add the jump back to guest code (if required) */
406 if (fGenJump)
407 {
408 int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
409
410 /* Add lookup record for patch to guest address translation */
411 Assert(pPB[pAsmRecord->offJump - 1] == 0xE9);
412 patmR3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
413
414 *(uint32_t *)&pPB[pAsmRecord->offJump] = displ;
415 patmPatchAddReloc32(pVM, pPatch, &pPB[pAsmRecord->offJump], FIXUP_REL_JMPTOGUEST,
416 PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32,
417 pReturnAddrGC);
418 }
419
420 // Calculate the right size of this patch block
421 if ((fGenJump && pAsmRecord->offJump) || (!fGenJump && !pAsmRecord->offJump))
422 return pAsmRecord->cbFunction;
423 // if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
424 return pAsmRecord->cbFunction - SIZEOF_NEARJUMP32;
425}
426
427/* Read bytes and check for overwritten instructions. */
428static int patmPatchReadBytes(PVM pVM, uint8_t *pDest, RTRCPTR pSrc, uint32_t cb)
429{
430 int rc = PGMPhysSimpleReadGCPtr(&pVM->aCpus[0], pDest, pSrc, cb);
431 AssertRCReturn(rc, rc);
432 /*
433 * Could be patched already; make sure this is checked!
434 */
435 for (uint32_t i=0;i<cb;i++)
436 {
437 uint8_t temp;
438
439 int rc2 = PATMR3QueryOpcode(pVM, pSrc+i, &temp);
440 if (RT_SUCCESS(rc2))
441 {
442 pDest[i] = temp;
443 }
444 else
445 break; /* no more */
446 }
447 return VINF_SUCCESS;
448}
449
450int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
451{
452 uint32_t const cbInstrShutUpGcc = pCpu->cbInstr;
453 PATCHGEN_PROLOG(pVM, pPatch, cbInstrShutUpGcc);
454
455 int rc = patmPatchReadBytes(pVM, pPB, pCurInstrGC, cbInstrShutUpGcc);
456 AssertRC(rc);
457 PATCHGEN_EPILOG(pPatch, cbInstrShutUpGcc);
458 return rc;
459}
460
461int patmPatchGenIret(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, bool fSizeOverride)
462{
463 uint32_t size;
464 PATMCALLINFO callInfo;
465 PCPATCHASMRECORD pPatchAsmRec = EMIsRawRing1Enabled(pVM) ? &g_patmIretRing1Record : &g_patmIretRecord;
466
467 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
468
469 AssertMsg(fSizeOverride == false, ("operand size override!!\n"));
470 callInfo.pCurInstrGC = pCurInstrGC;
471
472 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false, &callInfo);
473
474 PATCHGEN_EPILOG(pPatch, size);
475 return VINF_SUCCESS;
476}
477
478int patmPatchGenCli(PVM pVM, PPATCHINFO pPatch)
479{
480 uint32_t size;
481 PATCHGEN_PROLOG(pVM, pPatch, g_patmCliRecord.cbFunction);
482
483 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCliRecord, 0, false);
484
485 PATCHGEN_EPILOG(pPatch, size);
486 return VINF_SUCCESS;
487}
488
489/*
490 * Generate an STI patch
491 */
492int patmPatchGenSti(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RTRCPTR pNextInstrGC)
493{
494 PATMCALLINFO callInfo;
495 uint32_t size;
496
497 Log(("patmPatchGenSti at %RRv; next %RRv\n", pCurInstrGC, pNextInstrGC));
498 PATCHGEN_PROLOG(pVM, pPatch, g_patmStiRecord.cbFunction);
499 callInfo.pNextInstrGC = pNextInstrGC;
500 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmStiRecord, 0, false, &callInfo);
501 PATCHGEN_EPILOG(pPatch, size);
502
503 return VINF_SUCCESS;
504}
505
506
507int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
508{
509 uint32_t size;
510 PATMCALLINFO callInfo;
511 PCPATCHASMRECORD pPatchAsmRec;
512 if (fSizeOverride == true)
513 pPatchAsmRec = fGenJumpBack ? &g_patmPopf16Record : &g_patmPopf16Record_NoExit;
514 else
515 pPatchAsmRec = fGenJumpBack ? &g_patmPopf32Record : &g_patmPopf32Record_NoExit;
516
517 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
518
519 callInfo.pNextInstrGC = pReturnAddrGC;
520
521 Log(("patmPatchGenPopf at %RRv\n", pReturnAddrGC));
522
523 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
524 if (fSizeOverride == true)
525 Log(("operand size override!!\n"));
526 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, pReturnAddrGC, fGenJumpBack, &callInfo);
527
528 PATCHGEN_EPILOG(pPatch, size);
529 STAM_COUNTER_INC(&pVM->patm.s.StatGenPopf);
530 return VINF_SUCCESS;
531}
532
533int patmPatchGenPushf(PVM pVM, PPATCHINFO pPatch, bool fSizeOverride)
534{
535 uint32_t size;
536 PCPATCHASMRECORD pPatchAsmRec = fSizeOverride == true ? &g_patmPushf16Record : &g_patmPushf32Record;
537 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
538
539 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
540
541 PATCHGEN_EPILOG(pPatch, size);
542 return VINF_SUCCESS;
543}
544
545int patmPatchGenPushCS(PVM pVM, PPATCHINFO pPatch)
546{
547 uint32_t size;
548 PATCHGEN_PROLOG(pVM, pPatch, g_patmPushCSRecord.cbFunction);
549 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmPushCSRecord, 0, false);
550 PATCHGEN_EPILOG(pPatch, size);
551 return VINF_SUCCESS;
552}
553
554int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
555{
556 uint32_t size = 0;
557 PCPATCHASMRECORD pPatchAsmRec;
558 switch (opcode)
559 {
560 case OP_LOOP:
561 pPatchAsmRec = &g_patmLoopRecord;
562 break;
563 case OP_LOOPNE:
564 pPatchAsmRec = &g_patmLoopNZRecord;
565 break;
566 case OP_LOOPE:
567 pPatchAsmRec = &g_patmLoopZRecord;
568 break;
569 case OP_JECXZ:
570 pPatchAsmRec = &g_patmJEcxRecord;
571 break;
572 default:
573 AssertMsgFailed(("PatchGenLoop: invalid opcode %d\n", opcode));
574 return VERR_INVALID_PARAMETER;
575 }
576 Assert(pPatchAsmRec->offSizeOverride && pPatchAsmRec->offRelJump);
577
578 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
579 Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
580
581 // Generate the patch code
582 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
583
584 if (fSizeOverride)
585 {
586 pPB[pPatchAsmRec->offSizeOverride] = 0x66; // ecx -> cx or vice versa
587 }
588
589 *(RTRCPTR *)&pPB[pPatchAsmRec->offRelJump] = 0xDEADBEEF;
590
591 patmPatchAddJump(pVM, pPatch, &pPB[pPatchAsmRec->offRelJump - 1], 1, pTargetGC, opcode);
592
593 PATCHGEN_EPILOG(pPatch, size);
594 return VINF_SUCCESS;
595}
596
597int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
598{
599 uint32_t offset = 0;
600 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
601
602 // internal relative jumps from patch code to patch code; no relocation record required
603
604 Assert(PATMIsPatchGCAddr(pVM, pTargetGC) == false);
605
606 switch (opcode)
607 {
608 case OP_JO:
609 pPB[1] = 0x80;
610 break;
611 case OP_JNO:
612 pPB[1] = 0x81;
613 break;
614 case OP_JC:
615 pPB[1] = 0x82;
616 break;
617 case OP_JNC:
618 pPB[1] = 0x83;
619 break;
620 case OP_JE:
621 pPB[1] = 0x84;
622 break;
623 case OP_JNE:
624 pPB[1] = 0x85;
625 break;
626 case OP_JBE:
627 pPB[1] = 0x86;
628 break;
629 case OP_JNBE:
630 pPB[1] = 0x87;
631 break;
632 case OP_JS:
633 pPB[1] = 0x88;
634 break;
635 case OP_JNS:
636 pPB[1] = 0x89;
637 break;
638 case OP_JP:
639 pPB[1] = 0x8A;
640 break;
641 case OP_JNP:
642 pPB[1] = 0x8B;
643 break;
644 case OP_JL:
645 pPB[1] = 0x8C;
646 break;
647 case OP_JNL:
648 pPB[1] = 0x8D;
649 break;
650 case OP_JLE:
651 pPB[1] = 0x8E;
652 break;
653 case OP_JNLE:
654 pPB[1] = 0x8F;
655 break;
656
657 case OP_JMP:
658 /* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
659 /* Add lookup record for patch to guest address translation */
660 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTargetGC, PATM_LOOKUP_PATCH2GUEST);
661
662 pPB[0] = 0xE9;
663 break;
664
665 case OP_JECXZ:
666 case OP_LOOP:
667 case OP_LOOPNE:
668 case OP_LOOPE:
669 return patmPatchGenLoop(pVM, pPatch, pTargetGC, opcode, fSizeOverride);
670
671 default:
672 AssertMsg(0, ("Invalid jump opcode %d\n", opcode));
673 return VERR_PATCHING_REFUSED;
674 }
675 if (opcode != OP_JMP)
676 {
677 pPB[0] = 0xF;
678 offset += 2;
679 }
680 else offset++;
681
682 *(RTRCPTR *)&pPB[offset] = 0xDEADBEEF;
683
684 patmPatchAddJump(pVM, pPatch, pPB, offset, pTargetGC, opcode);
685
686 offset += sizeof(RTRCPTR);
687
688 PATCHGEN_EPILOG(pPatch, offset);
689 return VINF_SUCCESS;
690}
691
692/*
693 * Rewrite call to dynamic or currently unknown function (on-demand patching of function)
694 */
695int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
696{
697 PATMCALLINFO callInfo;
698 uint32_t offset;
699 uint32_t i, size;
700 int rc;
701
702 /** @note Don't check for IF=1 here. The ret instruction will do this. */
703 /** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
704
705 /* 1: Clear PATM interrupt flag on entry. */
706 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
707 if (rc == VERR_NO_MEMORY)
708 return rc;
709 AssertRCReturn(rc, rc);
710
711 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
712 /* 2: We must push the target address onto the stack before appending the indirect call code. */
713
714 if (fIndirect)
715 {
716 Log(("patmPatchGenIndirectCall\n"));
717 Assert(pCpu->Param1.cb == 4);
718 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
719
720 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
721 * a page fault. The assembly code restores the stack afterwards.
722 */
723 offset = 0;
724 /* include prefix byte to make sure we don't use the incorrect selector register. */
725 if (pCpu->fPrefix & DISPREFIX_SEG)
726 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
727 pPB[offset++] = 0xFF; // push r/m32
728 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
729 i = 2; /* standard offset of modrm bytes */
730 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
731 i++; //skip operand prefix
732 if (pCpu->fPrefix & DISPREFIX_SEG)
733 i++; //skip segment prefix
734
735 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
736 AssertRCReturn(rc, rc);
737 offset += (pCpu->cbInstr - i);
738 }
739 else
740 {
741 AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
742 Assert(pTargetGC);
743 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) == OP_PARM_J);
744
745 /** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
746
747 /* Relative call to patch code (patch to patch -> no fixup). */
748 Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->cbInstr, pTargetGC));
749
750 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
751 * a page fault. The assembly code restores the stack afterwards.
752 */
753 offset = 0;
754 pPB[offset++] = 0x68; // push %Iv
755 *(RTRCPTR *)&pPB[offset] = pTargetGC;
756 offset += sizeof(RTRCPTR);
757 }
758
759 /* align this block properly to make sure the jump table will not be misaligned. */
760 size = (RTHCUINTPTR)&pPB[offset] & 3;
761 if (size)
762 size = 4 - size;
763
764 for (i=0;i<size;i++)
765 {
766 pPB[offset++] = 0x90; /* nop */
767 }
768 PATCHGEN_EPILOG(pPatch, offset);
769
770 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
771 PCPATCHASMRECORD pPatchAsmRec = fIndirect ? &g_patmCallIndirectRecord : &g_patmCallRecord;
772 PATCHGEN_PROLOG_NODEF(pVM, pPatch, pPatchAsmRec->cbFunction);
773 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
774 callInfo.pTargetGC = (fIndirect) ? 0xDEADBEEF : pTargetGC;
775 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false, &callInfo);
776 PATCHGEN_EPILOG(pPatch, size);
777
778 /* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
779 rc = patmPatchGenSetPIF(pVM, pPatch, pCurInstrGC);
780 if (rc == VERR_NO_MEMORY)
781 return rc;
782 AssertRCReturn(rc, rc);
783
784 STAM_COUNTER_INC(&pVM->patm.s.StatGenCall);
785 return VINF_SUCCESS;
786}
787
788/**
789 * Generate indirect jump to unknown destination
790 *
791 * @returns VBox status code.
792 * @param pVM Pointer to the VM.
793 * @param pPatch Patch record
794 * @param pCpu Disassembly state
795 * @param pCurInstrGC Current instruction address
796 */
797int patmPatchGenJump(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
798{
799 PATMCALLINFO callInfo;
800 uint32_t offset;
801 uint32_t i, size;
802 int rc;
803
804 /* 1: Clear PATM interrupt flag on entry. */
805 rc = patmPatchGenClearPIF(pVM, pPatch, pCurInstrGC);
806 if (rc == VERR_NO_MEMORY)
807 return rc;
808 AssertRCReturn(rc, rc);
809
810 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
811 /* 2: We must push the target address onto the stack before appending the indirect call code. */
812
813 Log(("patmPatchGenIndirectJump\n"));
814 Assert(pCpu->Param1.cb == 4);
815 Assert(OP_PARM_VTYPE(pCpu->pCurInstr->fParam1) != OP_PARM_J);
816
817 /* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
818 * a page fault. The assembly code restores the stack afterwards.
819 */
820 offset = 0;
821 /* include prefix byte to make sure we don't use the incorrect selector register. */
822 if (pCpu->fPrefix & DISPREFIX_SEG)
823 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
824
825 pPB[offset++] = 0xFF; // push r/m32
826 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, 6 /* group 5 */, pCpu->ModRM.Bits.Rm);
827 i = 2; /* standard offset of modrm bytes */
828 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
829 i++; //skip operand prefix
830 if (pCpu->fPrefix & DISPREFIX_SEG)
831 i++; //skip segment prefix
832
833 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
834 AssertRCReturn(rc, rc);
835 offset += (pCpu->cbInstr - i);
836
837 /* align this block properly to make sure the jump table will not be misaligned. */
838 size = (RTHCUINTPTR)&pPB[offset] & 3;
839 if (size)
840 size = 4 - size;
841
842 for (i=0;i<size;i++)
843 {
844 pPB[offset++] = 0x90; /* nop */
845 }
846 PATCHGEN_EPILOG(pPatch, offset);
847
848 /* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
849 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmJumpIndirectRecord.cbFunction);
850 callInfo.pReturnGC = pCurInstrGC + pCpu->cbInstr;
851 callInfo.pTargetGC = 0xDEADBEEF;
852 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmJumpIndirectRecord, 0, false, &callInfo);
853 PATCHGEN_EPILOG(pPatch, size);
854
855 STAM_COUNTER_INC(&pVM->patm.s.StatGenJump);
856 return VINF_SUCCESS;
857}
858
859/**
860 * Generate return instruction
861 *
862 * @returns VBox status code.
863 * @param pVM Pointer to the VM.
864 * @param pPatch Patch structure
865 * @param pCpu Disassembly struct
866 * @param pCurInstrGC Current instruction pointer
867 *
868 */
869int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
870{
871 RTRCPTR pPatchRetInstrGC;
872
873 /* Remember start of this patch for below. */
874 pPatchRetInstrGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
875
876 Log(("patmPatchGenRet %RRv\n", pCurInstrGC));
877
878 /** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
879 if ( pPatch->pTempInfo->pPatchRetInstrGC
880 && pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->Param1.uValue) /* nr of bytes popped off the stack should be identical of course! */
881 {
882 Assert(pCpu->pCurInstr->uOpcode == OP_RETN);
883 STAM_COUNTER_INC(&pVM->patm.s.StatGenRetReused);
884
885 return patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, pPatch->pTempInfo->pPatchRetInstrGC);
886 }
887
888 /* Jump back to the original instruction if IF is set again. */
889 Assert(!patmFindActivePatchByEntrypoint(pVM, pCurInstrGC));
890 int rc = patmPatchGenCheckIF(pVM, pPatch, pCurInstrGC);
891 AssertRCReturn(rc, rc);
892
893 /* align this block properly to make sure the jump table will not be misaligned. */
894 PATCHGEN_PROLOG(pVM, pPatch, 4);
895 uint32_t size = (RTHCUINTPTR)pPB & 3;
896 if (size)
897 size = 4 - size;
898
899 for (uint32_t i = 0; i < size; i++)
900 pPB[i] = 0x90; /* nop */
901 PATCHGEN_EPILOG(pPatch, size);
902
903 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmRetRecord.cbFunction);
904 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmRetRecord, 0, false);
905 PATCHGEN_EPILOG(pPatch, size);
906
907 STAM_COUNTER_INC(&pVM->patm.s.StatGenRet);
908 /* Duplicate the ret or ret n instruction; it will use the PATM return address */
909 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
910
911 if (rc == VINF_SUCCESS)
912 {
913 pPatch->pTempInfo->pPatchRetInstrGC = pPatchRetInstrGC;
914 pPatch->pTempInfo->uPatchRetParam1 = pCpu->Param1.uValue;
915 }
916 return rc;
917}
918
919/**
920 * Generate all global patm functions
921 *
922 * @returns VBox status code.
923 * @param pVM Pointer to the VM.
924 * @param pPatch Patch structure
925 *
926 */
927int patmPatchGenGlobalFunctions(PVM pVM, PPATCHINFO pPatch)
928{
929 pVM->patm.s.pfnHelperCallGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
930 PATCHGEN_PROLOG(pVM, pPatch, g_patmLookupAndCallRecord.cbFunction);
931 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmLookupAndCallRecord, 0, false);
932 PATCHGEN_EPILOG(pPatch, size);
933
934 /* Round to next 8 byte boundary. */
935 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
936
937 pVM->patm.s.pfnHelperRetGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
938 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmRetFunctionRecord.cbFunction);
939 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmRetFunctionRecord, 0, false);
940 PATCHGEN_EPILOG(pPatch, size);
941
942 /* Round to next 8 byte boundary. */
943 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
944
945 pVM->patm.s.pfnHelperJumpGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
946 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmLookupAndJumpRecord.cbFunction);
947 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmLookupAndJumpRecord, 0, false);
948 PATCHGEN_EPILOG(pPatch, size);
949
950 /* Round to next 8 byte boundary. */
951 pPatch->uCurPatchOffset = RT_ALIGN_32(pPatch->uCurPatchOffset, 8);
952
953 pVM->patm.s.pfnHelperIretGC = PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset;
954 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmIretFunctionRecord.cbFunction);
955 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmIretFunctionRecord, 0, false);
956 PATCHGEN_EPILOG(pPatch, size);
957
958 Log(("pfnHelperCallGC %RRv\n", pVM->patm.s.pfnHelperCallGC));
959 Log(("pfnHelperRetGC %RRv\n", pVM->patm.s.pfnHelperRetGC));
960 Log(("pfnHelperJumpGC %RRv\n", pVM->patm.s.pfnHelperJumpGC));
961 Log(("pfnHelperIretGC %RRv\n", pVM->patm.s.pfnHelperIretGC));
962
963 return VINF_SUCCESS;
964}
965
966/**
967 * Generate illegal instruction (int 3)
968 *
969 * @returns VBox status code.
970 * @param pVM Pointer to the VM.
971 * @param pPatch Patch structure
972 *
973 */
974int patmPatchGenIllegalInstr(PVM pVM, PPATCHINFO pPatch)
975{
976 PATCHGEN_PROLOG(pVM, pPatch, 1);
977
978 pPB[0] = 0xCC;
979
980 PATCHGEN_EPILOG(pPatch, 1);
981 return VINF_SUCCESS;
982}
983
984/**
985 * Check virtual IF flag and jump back to original guest code if set
986 *
987 * @returns VBox status code.
988 * @param pVM Pointer to the VM.
989 * @param pPatch Patch structure
990 * @param pCurInstrGC Guest context pointer to the current instruction
991 *
992 */
993int patmPatchGenCheckIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
994{
995 uint32_t size;
996
997 PATCHGEN_PROLOG(pVM, pPatch, g_patmCheckIFRecord.cbFunction);
998
999 /* Add lookup record for patch to guest address translation */
1000 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1001
1002 /* Generate code to check for IF=1 before executing the call to the duplicated function. */
1003 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCheckIFRecord, pCurInstrGC, true);
1004
1005 PATCHGEN_EPILOG(pPatch, size);
1006 return VINF_SUCCESS;
1007}
1008
1009/**
1010 * Set PATM interrupt flag
1011 *
1012 * @returns VBox status code.
1013 * @param pVM Pointer to the VM.
1014 * @param pPatch Patch structure
1015 * @param pInstrGC Corresponding guest instruction
1016 *
1017 */
1018int patmPatchGenSetPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1019{
1020 PATCHGEN_PROLOG(pVM, pPatch, g_patmSetPIFRecord.cbFunction);
1021
1022 /* Add lookup record for patch to guest address translation */
1023 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1024
1025 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmSetPIFRecord, 0, false);
1026 PATCHGEN_EPILOG(pPatch, size);
1027 return VINF_SUCCESS;
1028}
1029
1030/**
1031 * Clear PATM interrupt flag
1032 *
1033 * @returns VBox status code.
1034 * @param pVM Pointer to the VM.
1035 * @param pPatch Patch structure
1036 * @param pInstrGC Corresponding guest instruction
1037 *
1038 */
1039int patmPatchGenClearPIF(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1040{
1041 PATCHGEN_PROLOG(pVM, pPatch, g_patmSetPIFRecord.cbFunction);
1042
1043 /* Add lookup record for patch to guest address translation */
1044 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1045
1046 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearPIFRecord, 0, false);
1047 PATCHGEN_EPILOG(pPatch, size);
1048 return VINF_SUCCESS;
1049}
1050
1051
1052/**
1053 * Clear PATM inhibit irq flag
1054 *
1055 * @returns VBox status code.
1056 * @param pVM Pointer to the VM.
1057 * @param pPatch Patch structure
1058 * @param pNextInstrGC Next guest instruction
1059 */
1060int patmPatchGenClearInhibitIRQ(PVM pVM, PPATCHINFO pPatch, RTRCPTR pNextInstrGC)
1061{
1062 PATMCALLINFO callInfo;
1063 PCPATCHASMRECORD pPatchAsmRec = pPatch->flags & PATMFL_DUPLICATE_FUNCTION
1064 ? &g_patmClearInhibitIRQContIF0Record : &g_patmClearInhibitIRQFaultIF0Record;
1065 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
1066
1067 Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
1068
1069 /* Add lookup record for patch to guest address translation */
1070 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pNextInstrGC, PATM_LOOKUP_PATCH2GUEST);
1071
1072 callInfo.pNextInstrGC = pNextInstrGC;
1073
1074 uint32_t size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false, &callInfo);
1075
1076 PATCHGEN_EPILOG(pPatch, size);
1077 return VINF_SUCCESS;
1078}
1079
1080/**
1081 * Generate an interrupt handler entrypoint
1082 *
1083 * @returns VBox status code.
1084 * @param pVM Pointer to the VM.
1085 * @param pPatch Patch record
1086 * @param pIntHandlerGC IDT handler address
1087 *
1088 ** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
1089 */
1090int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC)
1091{
1092 int rc = VINF_SUCCESS;
1093
1094 if (!EMIsRawRing1Enabled(pVM)) /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't
1095 deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as
1096 TRPMForwardTrap takes care of the details. */
1097 {
1098 uint32_t size;
1099 PCPATCHASMRECORD pPatchAsmRec = pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE
1100 ? &g_patmIntEntryRecordErrorCode : &g_patmIntEntryRecord;
1101 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
1102
1103 /* Add lookup record for patch to guest address translation */
1104 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1105
1106 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
1107 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, 0, false);
1108
1109 PATCHGEN_EPILOG(pPatch, size);
1110 }
1111
1112 // Interrupt gates set IF to 0
1113 rc = patmPatchGenCli(pVM, pPatch);
1114 AssertRCReturn(rc, rc);
1115
1116 return rc;
1117}
1118
1119/**
1120 * Generate a trap handler entrypoint
1121 *
1122 * @returns VBox status code.
1123 * @param pVM Pointer to the VM.
1124 * @param pPatch Patch record
1125 * @param pTrapHandlerGC IDT handler address
1126 */
1127int patmPatchGenTrapEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTrapHandlerGC)
1128{
1129 uint32_t size;
1130 PCPATCHASMRECORD pPatchAsmRec = (pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE)
1131 ? &g_patmTrapEntryRecordErrorCode : &g_patmTrapEntryRecord;
1132
1133 Assert(!EMIsRawRing1Enabled(pVM));
1134
1135 PATCHGEN_PROLOG(pVM, pPatch, pPatchAsmRec->cbFunction);
1136
1137 /* Add lookup record for patch to guest address translation */
1138 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pTrapHandlerGC, PATM_LOOKUP_PATCH2GUEST);
1139
1140 /* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
1141 size = patmPatchGenCode(pVM, pPatch, pPB, pPatchAsmRec, pTrapHandlerGC, true);
1142 PATCHGEN_EPILOG(pPatch, size);
1143
1144 return VINF_SUCCESS;
1145}
1146
1147#ifdef VBOX_WITH_STATISTICS
1148int patmPatchGenStats(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1149{
1150 uint32_t size;
1151
1152 PATCHGEN_PROLOG(pVM, pPatch, g_patmStatsRecord.cbFunction);
1153
1154 /* Add lookup record for stats code -> guest handler. */
1155 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pInstrGC, PATM_LOOKUP_PATCH2GUEST);
1156
1157 /* Generate code to keep calling statistics for this patch */
1158 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmStatsRecord, pInstrGC, false);
1159 PATCHGEN_EPILOG(pPatch, size);
1160
1161 return VINF_SUCCESS;
1162}
1163#endif
1164
1165/**
1166 * Debug register moves to or from general purpose registers
1167 * mov GPR, DRx
1168 * mov DRx, GPR
1169 *
1170 * @todo: if we ever want to support hardware debug registers natively, then
1171 * this will need to be changed!
1172 */
1173int patmPatchGenMovDebug(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1174{
1175 int rc = VINF_SUCCESS;
1176 unsigned reg, mod, rm, dbgreg;
1177 uint32_t offset;
1178
1179 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
1180
1181 mod = 0; //effective address (only)
1182 rm = 5; //disp32
1183 if (pCpu->pCurInstr->fParam1 == OP_PARM_Dd)
1184 {
1185 Assert(0); // You not come here. Illegal!
1186
1187 // mov DRx, GPR
1188 pPB[0] = 0x89; //mov disp32, GPR
1189 Assert(pCpu->Param1.fUse & DISUSE_REG_DBG);
1190 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
1191
1192 dbgreg = pCpu->Param1.Base.idxDbgReg;
1193 reg = pCpu->Param2.Base.idxGenReg;
1194 }
1195 else
1196 {
1197 // mov GPR, DRx
1198 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1199 Assert(pCpu->Param2.fUse & DISUSE_REG_DBG);
1200
1201 pPB[0] = 0x8B; // mov GPR, disp32
1202 reg = pCpu->Param1.Base.idxGenReg;
1203 dbgreg = pCpu->Param2.Base.idxDbgReg;
1204 }
1205
1206 pPB[1] = MAKE_MODRM(mod, reg, rm);
1207
1208 AssertReturn(dbgreg <= DISDREG_DR7, VERR_INVALID_PARAMETER);
1209 offset = RT_OFFSETOF(CPUMCTX, dr[dbgreg]);
1210
1211 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1212 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1213
1214 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1215 return rc;
1216}
1217
1218/*
1219 * Control register moves to or from general purpose registers
1220 * mov GPR, CRx
1221 * mov CRx, GPR
1222 */
1223int patmPatchGenMovControl(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu)
1224{
1225 int rc = VINF_SUCCESS;
1226 int reg, mod, rm, ctrlreg;
1227 uint32_t offset;
1228
1229 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
1230
1231 mod = 0; //effective address (only)
1232 rm = 5; //disp32
1233 if (pCpu->pCurInstr->fParam1 == OP_PARM_Cd)
1234 {
1235 Assert(0); // You not come here. Illegal!
1236
1237 // mov CRx, GPR
1238 pPB[0] = 0x89; //mov disp32, GPR
1239 ctrlreg = pCpu->Param1.Base.idxCtrlReg;
1240 reg = pCpu->Param2.Base.idxGenReg;
1241 Assert(pCpu->Param1.fUse & DISUSE_REG_CR);
1242 Assert(pCpu->Param2.fUse & DISUSE_REG_GEN32);
1243 }
1244 else
1245 {
1246 // mov GPR, CRx
1247 Assert(pCpu->Param1.fUse & DISUSE_REG_GEN32);
1248 Assert(pCpu->Param2.fUse & DISUSE_REG_CR);
1249
1250 pPB[0] = 0x8B; // mov GPR, disp32
1251 reg = pCpu->Param1.Base.idxGenReg;
1252 ctrlreg = pCpu->Param2.Base.idxCtrlReg;
1253 }
1254
1255 pPB[1] = MAKE_MODRM(mod, reg, rm);
1256
1257 /// @todo: make this an array in the context structure
1258 switch (ctrlreg)
1259 {
1260 case DISCREG_CR0:
1261 offset = RT_OFFSETOF(CPUMCTX, cr0);
1262 break;
1263 case DISCREG_CR2:
1264 offset = RT_OFFSETOF(CPUMCTX, cr2);
1265 break;
1266 case DISCREG_CR3:
1267 offset = RT_OFFSETOF(CPUMCTX, cr3);
1268 break;
1269 case DISCREG_CR4:
1270 offset = RT_OFFSETOF(CPUMCTX, cr4);
1271 break;
1272 default: /* Shut up compiler warning. */
1273 AssertFailed();
1274 offset = 0;
1275 break;
1276 }
1277 *(RTRCPTR *)&pPB[2] = pVM->patm.s.pCPUMCtxGC + offset;
1278 patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_ABSOLUTE);
1279
1280 PATCHGEN_EPILOG(pPatch, 2 + sizeof(RTRCPTR));
1281 return rc;
1282}
1283
1284/*
1285 * mov GPR, SS
1286 */
1287int patmPatchGenMovFromSS(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1288{
1289 uint32_t size, offset;
1290
1291 Log(("patmPatchGenMovFromSS %RRv\n", pCurInstrGC));
1292
1293 Assert(pPatch->flags & PATMFL_CODE32);
1294
1295 PATCHGEN_PROLOG(pVM, pPatch, g_patmClearPIFRecord.cbFunction + 2 + g_patmMovFromSSRecord.cbFunction + 2 + g_patmSetPIFRecord.cbFunction);
1296 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmClearPIFRecord, 0, false);
1297 PATCHGEN_EPILOG(pPatch, size);
1298
1299 /* push ss */
1300 PATCHGEN_PROLOG_NODEF(pVM, pPatch, 2);
1301 offset = 0;
1302 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1303 pPB[offset++] = 0x66; /* size override -> 16 bits push */
1304 pPB[offset++] = 0x16;
1305 PATCHGEN_EPILOG(pPatch, offset);
1306
1307 /* checks and corrects RPL of pushed ss*/
1308 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmMovFromSSRecord.cbFunction);
1309 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmMovFromSSRecord, 0, false);
1310 PATCHGEN_EPILOG(pPatch, size);
1311
1312 /* pop general purpose register */
1313 PATCHGEN_PROLOG_NODEF(pVM, pPatch, 2);
1314 offset = 0;
1315 if (pCpu->fPrefix & DISPREFIX_OPSIZE)
1316 pPB[offset++] = 0x66; /* size override -> 16 bits pop */
1317 pPB[offset++] = 0x58 + pCpu->Param1.Base.idxGenReg;
1318 PATCHGEN_EPILOG(pPatch, offset);
1319
1320
1321 PATCHGEN_PROLOG_NODEF(pVM, pPatch, g_patmSetPIFRecord.cbFunction);
1322 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmSetPIFRecord, 0, false);
1323 PATCHGEN_EPILOG(pPatch, size);
1324
1325 return VINF_SUCCESS;
1326}
1327
1328
1329/**
1330 * Generate an sldt or str patch instruction
1331 *
1332 * @returns VBox status code.
1333 * @param pVM Pointer to the VM.
1334 * @param pPatch Patch record
1335 * @param pCpu Disassembly state
1336 * @param pCurInstrGC Guest instruction address
1337 */
1338int patmPatchGenSldtStr(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1339{
1340 // sldt %Ew
1341 int rc = VINF_SUCCESS;
1342 uint32_t offset = 0;
1343 uint32_t i;
1344
1345 /** @todo segment prefix (untested) */
1346 Assert(pCpu->fPrefix == DISPREFIX_NONE || pCpu->fPrefix == DISPREFIX_OPSIZE);
1347
1348 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
1349
1350 if (pCpu->Param1.fUse == DISUSE_REG_GEN32 || pCpu->Param1.fUse == DISUSE_REG_GEN16)
1351 {
1352 /* Register operand */
1353 // 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
1354
1355 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1356 pPB[offset++] = 0x66;
1357
1358 pPB[offset++] = 0x8B; // mov destreg, CPUMCTX.tr/ldtr
1359 /* Modify REG part according to destination of original instruction */
1360 pPB[offset++] = MAKE_MODRM(0, pCpu->Param1.Base.idxGenReg, 5);
1361 if (pCpu->pCurInstr->uOpcode == OP_STR)
1362 {
1363 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1364 }
1365 else
1366 {
1367 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1368 }
1369 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1370 offset += sizeof(RTRCPTR);
1371 }
1372 else
1373 {
1374 /* Memory operand */
1375 //50 push eax
1376 //52 push edx
1377 //8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1378 //66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
1379 //66 89 02 mov word ptr [edx],ax
1380 //5A pop edx
1381 //58 pop eax
1382
1383 pPB[offset++] = 0x50; // push eax
1384 pPB[offset++] = 0x52; // push edx
1385
1386 if (pCpu->fPrefix == DISPREFIX_SEG)
1387 {
1388 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1389 }
1390 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1391 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1392 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1393
1394 i = 3; /* standard offset of modrm bytes */
1395 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1396 i++; //skip operand prefix
1397 if (pCpu->fPrefix == DISPREFIX_SEG)
1398 i++; //skip segment prefix
1399
1400 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1401 AssertRCReturn(rc, rc);
1402 offset += (pCpu->cbInstr - i);
1403
1404 pPB[offset++] = 0x66; // mov ax, CPUMCTX.tr/ldtr
1405 pPB[offset++] = 0xA1;
1406 if (pCpu->pCurInstr->uOpcode == OP_STR)
1407 {
1408 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, tr);
1409 }
1410 else
1411 {
1412 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + RT_OFFSETOF(CPUMCTX, ldtr);
1413 }
1414 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1415 offset += sizeof(RTRCPTR);
1416
1417 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1418 pPB[offset++] = 0x89;
1419 pPB[offset++] = 0x02;
1420
1421 pPB[offset++] = 0x5A; // pop edx
1422 pPB[offset++] = 0x58; // pop eax
1423 }
1424
1425 PATCHGEN_EPILOG(pPatch, offset);
1426
1427 return rc;
1428}
1429
1430/**
1431 * Generate an sgdt or sidt patch instruction
1432 *
1433 * @returns VBox status code.
1434 * @param pVM Pointer to the VM.
1435 * @param pPatch Patch record
1436 * @param pCpu Disassembly state
1437 * @param pCurInstrGC Guest instruction address
1438 */
1439int patmPatchGenSxDT(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC)
1440{
1441 int rc = VINF_SUCCESS;
1442 uint32_t offset = 0, offset_base, offset_limit;
1443 uint32_t i;
1444
1445 /* @todo segment prefix (untested) */
1446 Assert(pCpu->fPrefix == DISPREFIX_NONE);
1447
1448 // sgdt %Ms
1449 // sidt %Ms
1450
1451 switch (pCpu->pCurInstr->uOpcode)
1452 {
1453 case OP_SGDT:
1454 offset_base = RT_OFFSETOF(CPUMCTX, gdtr.pGdt);
1455 offset_limit = RT_OFFSETOF(CPUMCTX, gdtr.cbGdt);
1456 break;
1457
1458 case OP_SIDT:
1459 offset_base = RT_OFFSETOF(CPUMCTX, idtr.pIdt);
1460 offset_limit = RT_OFFSETOF(CPUMCTX, idtr.cbIdt);
1461 break;
1462
1463 default:
1464 return VERR_INVALID_PARAMETER;
1465 }
1466
1467//50 push eax
1468//52 push edx
1469//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
1470//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
1471//66 89 02 mov word ptr [edx],ax
1472//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
1473//89 42 02 mov dword ptr [edx+2],eax
1474//5A pop edx
1475//58 pop eax
1476
1477 PATCHGEN_PROLOG(pVM, pPatch, PATCHGEN_DEF_SIZE);
1478 pPB[offset++] = 0x50; // push eax
1479 pPB[offset++] = 0x52; // push edx
1480
1481 if (pCpu->fPrefix == DISPREFIX_SEG)
1482 {
1483 pPB[offset++] = DISQuerySegPrefixByte(pCpu);
1484 }
1485 pPB[offset++] = 0x8D; // lea edx, dword ptr [dest]
1486 // duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
1487 pPB[offset++] = MAKE_MODRM(pCpu->ModRM.Bits.Mod, DISGREG_EDX , pCpu->ModRM.Bits.Rm);
1488
1489 i = 3; /* standard offset of modrm bytes */
1490 if (pCpu->fPrefix == DISPREFIX_OPSIZE)
1491 i++; //skip operand prefix
1492 if (pCpu->fPrefix == DISPREFIX_SEG)
1493 i++; //skip segment prefix
1494 rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->cbInstr - i);
1495 AssertRCReturn(rc, rc);
1496 offset += (pCpu->cbInstr - i);
1497
1498 pPB[offset++] = 0x66; // mov ax, CPUMCTX.gdtr.limit
1499 pPB[offset++] = 0xA1;
1500 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_limit;
1501 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1502 offset += sizeof(RTRCPTR);
1503
1504 pPB[offset++] = 0x66; // mov word ptr [edx],ax
1505 pPB[offset++] = 0x89;
1506 pPB[offset++] = 0x02;
1507
1508 pPB[offset++] = 0xA1; // mov eax, CPUMCTX.gdtr.base
1509 *(RTRCPTR *)&pPB[offset] = pVM->patm.s.pCPUMCtxGC + offset_base;
1510 patmPatchAddReloc32(pVM, pPatch, &pPB[offset], FIXUP_ABSOLUTE);
1511 offset += sizeof(RTRCPTR);
1512
1513 pPB[offset++] = 0x89; // mov dword ptr [edx+2],eax
1514 pPB[offset++] = 0x42;
1515 pPB[offset++] = 0x02;
1516
1517 pPB[offset++] = 0x5A; // pop edx
1518 pPB[offset++] = 0x58; // pop eax
1519
1520 PATCHGEN_EPILOG(pPatch, offset);
1521
1522 return rc;
1523}
1524
1525/**
1526 * Generate a cpuid patch instruction
1527 *
1528 * @returns VBox status code.
1529 * @param pVM Pointer to the VM.
1530 * @param pPatch Patch record
1531 * @param pCurInstrGC Guest instruction address
1532 */
1533int patmPatchGenCpuid(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC)
1534{
1535 uint32_t size;
1536 PATCHGEN_PROLOG(pVM, pPatch, g_patmCpuidRecord.cbFunction);
1537
1538 size = patmPatchGenCode(pVM, pPatch, pPB, &g_patmCpuidRecord, 0, false);
1539
1540 PATCHGEN_EPILOG(pPatch, size);
1541 NOREF(pCurInstrGC);
1542 return VINF_SUCCESS;
1543}
1544
1545/**
1546 * Generate the jump from guest to patch code
1547 *
1548 * @returns VBox status code.
1549 * @param pVM Pointer to the VM.
1550 * @param pPatch Patch record
1551 * @param pTargetGC Guest target jump
1552 * @param fClearInhibitIRQs Clear inhibit irq flag
1553 */
1554int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
1555{
1556 int rc = VINF_SUCCESS;
1557 uint32_t size;
1558
1559 if (fClearInhibitIRQs)
1560 {
1561 rc = patmPatchGenClearInhibitIRQ(pVM, pPatch, pReturnAddrGC);
1562 if (rc == VERR_NO_MEMORY)
1563 return rc;
1564 AssertRCReturn(rc, rc);
1565 }
1566
1567 PATCHGEN_PROLOG(pVM, pPatch, PATMJumpToGuest_IF1Record.cbFunction);
1568
1569 /* Add lookup record for patch to guest address translation */
1570 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
1571
1572 /* Generate code to jump to guest code if IF=1, else fault. */
1573 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMJumpToGuest_IF1Record, pReturnAddrGC, true);
1574 PATCHGEN_EPILOG(pPatch, size);
1575
1576 return rc;
1577}
1578
1579/*
1580 * Relative jump from patch code to patch code (no fixup required)
1581 */
1582int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
1583{
1584 int32_t displ;
1585 int rc = VINF_SUCCESS;
1586
1587 Assert(PATMIsPatchGCAddr(pVM, pPatchAddrGC));
1588 PATCHGEN_PROLOG(pVM, pPatch, SIZEOF_NEARJUMP32);
1589
1590 if (fAddLookupRecord)
1591 {
1592 /* Add lookup record for patch to guest address translation */
1593 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pCurInstrGC, PATM_LOOKUP_PATCH2GUEST);
1594 }
1595
1596 pPB[0] = 0xE9; //JMP
1597
1598 displ = pPatchAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + SIZEOF_NEARJUMP32);
1599
1600 *(uint32_t *)&pPB[1] = displ;
1601
1602 PATCHGEN_EPILOG(pPatch, SIZEOF_NEARJUMP32);
1603
1604 return rc;
1605}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette