VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 71152

最後變更 在這個檔案從71152是 71152,由 vboxsync 提交於 7 年 前

VMM/NEM/win: Refactoring... bugref:9044

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 94.1 KB
 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 71152 2018-02-28 12:36:04Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Global Variables *
21*********************************************************************************************************************************/
22/** NEM_WIN_PAGE_STATE_XXX names. */
23NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
24
25
26/*********************************************************************************************************************************
27* Internal Functions *
28*********************************************************************************************************************************/
29NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
30 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
31
32
33#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
34
35/**
36 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
37 *
38 * @returns VBox status code.
39 * @param pVM The cross context VM structure.
40 * @param pVCpu The cross context virtual CPU structure of the caller.
41 * @param GCPhysSrc The source page. Does not need to be page aligned.
42 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
43 * when A20 is disabled.
44 * @param fFlags HV_MAP_GPA_XXX.
45 */
46DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
47{
48#ifdef IN_RING0
49 /** @todo optimize further, caller generally has the physical address. */
50 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
51 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
52 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu], GCPhysSrc, GCPhysDst, 1, fFlags);
53#else
54 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
55 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
56 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
57 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
58 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
59#endif
60}
61
62
63/**
64 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
65 *
66 * @returns VBox status code.
67 * @param pVM The cross context VM structure.
68 * @param pVCpu The cross context virtual CPU structure of the caller.
69 * @param GCPhys The page to unmap. Does not need to be page aligned.
70 */
71DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
72{
73# ifdef IN_RING0
74 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
75 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
76 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys, 1);
77# else
78 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
79 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
80 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
81# endif
82}
83
84#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
85
86
87#ifndef IN_RING0
88
89NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
90{
91#ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
92 NOREF(pCtx);
93 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, UINT64_MAX, NULL);
94 AssertLogRelRCReturn(rc, rc);
95 return rc;
96
97#else
98 WHV_REGISTER_NAME aenmNames[128];
99 WHV_REGISTER_VALUE aValues[128];
100
101 /* GPRs */
102 aenmNames[0] = WHvX64RegisterRax;
103 aValues[0].Reg64 = pCtx->rax;
104 aenmNames[1] = WHvX64RegisterRcx;
105 aValues[1].Reg64 = pCtx->rcx;
106 aenmNames[2] = WHvX64RegisterRdx;
107 aValues[2].Reg64 = pCtx->rdx;
108 aenmNames[3] = WHvX64RegisterRbx;
109 aValues[3].Reg64 = pCtx->rbx;
110 aenmNames[4] = WHvX64RegisterRsp;
111 aValues[4].Reg64 = pCtx->rsp;
112 aenmNames[5] = WHvX64RegisterRbp;
113 aValues[5].Reg64 = pCtx->rbp;
114 aenmNames[6] = WHvX64RegisterRsi;
115 aValues[6].Reg64 = pCtx->rsi;
116 aenmNames[7] = WHvX64RegisterRdi;
117 aValues[7].Reg64 = pCtx->rdi;
118 aenmNames[8] = WHvX64RegisterR8;
119 aValues[8].Reg64 = pCtx->r8;
120 aenmNames[9] = WHvX64RegisterR9;
121 aValues[9].Reg64 = pCtx->r9;
122 aenmNames[10] = WHvX64RegisterR10;
123 aValues[10].Reg64 = pCtx->r10;
124 aenmNames[11] = WHvX64RegisterR11;
125 aValues[11].Reg64 = pCtx->r11;
126 aenmNames[12] = WHvX64RegisterR12;
127 aValues[12].Reg64 = pCtx->r12;
128 aenmNames[13] = WHvX64RegisterR13;
129 aValues[13].Reg64 = pCtx->r13;
130 aenmNames[14] = WHvX64RegisterR14;
131 aValues[14].Reg64 = pCtx->r14;
132 aenmNames[15] = WHvX64RegisterR15;
133 aValues[15].Reg64 = pCtx->r15;
134
135 /* RIP & Flags */
136 aenmNames[16] = WHvX64RegisterRip;
137 aValues[16].Reg64 = pCtx->rip;
138 aenmNames[17] = WHvX64RegisterRflags;
139 aValues[17].Reg64 = pCtx->rflags.u;
140
141 /* Segments */
142#define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
143 do { \
144 aenmNames[a_idx] = a_enmName; \
145 aValues[a_idx].Segment.Base = (a_SReg).u64Base; \
146 aValues[a_idx].Segment.Limit = (a_SReg).u32Limit; \
147 aValues[a_idx].Segment.Selector = (a_SReg).Sel; \
148 aValues[a_idx].Segment.Attributes = (a_SReg).Attr.u; \
149 } while (0)
150 COPY_OUT_SEG(18, WHvX64RegisterEs, pCtx->es);
151 COPY_OUT_SEG(19, WHvX64RegisterCs, pCtx->cs);
152 COPY_OUT_SEG(20, WHvX64RegisterSs, pCtx->ss);
153 COPY_OUT_SEG(21, WHvX64RegisterDs, pCtx->ds);
154 COPY_OUT_SEG(22, WHvX64RegisterFs, pCtx->fs);
155 COPY_OUT_SEG(23, WHvX64RegisterGs, pCtx->gs);
156 COPY_OUT_SEG(24, WHvX64RegisterLdtr, pCtx->ldtr);
157 COPY_OUT_SEG(25, WHvX64RegisterTr, pCtx->tr);
158
159 uintptr_t iReg = 26;
160 /* Descriptor tables. */
161 aenmNames[iReg] = WHvX64RegisterIdtr;
162 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
163 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
164 iReg++;
165 aenmNames[iReg] = WHvX64RegisterGdtr;
166 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
167 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
168 iReg++;
169
170 /* Control registers. */
171 aenmNames[iReg] = WHvX64RegisterCr0;
172 aValues[iReg].Reg64 = pCtx->cr0;
173 iReg++;
174 aenmNames[iReg] = WHvX64RegisterCr2;
175 aValues[iReg].Reg64 = pCtx->cr2;
176 iReg++;
177 aenmNames[iReg] = WHvX64RegisterCr3;
178 aValues[iReg].Reg64 = pCtx->cr3;
179 iReg++;
180 aenmNames[iReg] = WHvX64RegisterCr4;
181 aValues[iReg].Reg64 = pCtx->cr4;
182 iReg++;
183 aenmNames[iReg] = WHvX64RegisterCr8;
184 aValues[iReg].Reg64 = CPUMGetGuestCR8(pVCpu);
185 iReg++;
186
187 /* Debug registers. */
188/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
189 aenmNames[iReg] = WHvX64RegisterDr0;
190 //aValues[iReg].Reg64 = CPUMGetHyperDR0(pVCpu);
191 aValues[iReg].Reg64 = pCtx->dr[0];
192 iReg++;
193 aenmNames[iReg] = WHvX64RegisterDr1;
194 //aValues[iReg].Reg64 = CPUMGetHyperDR1(pVCpu);
195 aValues[iReg].Reg64 = pCtx->dr[1];
196 iReg++;
197 aenmNames[iReg] = WHvX64RegisterDr2;
198 //aValues[iReg].Reg64 = CPUMGetHyperDR2(pVCpu);
199 aValues[iReg].Reg64 = pCtx->dr[2];
200 iReg++;
201 aenmNames[iReg] = WHvX64RegisterDr3;
202 //aValues[iReg].Reg64 = CPUMGetHyperDR3(pVCpu);
203 aValues[iReg].Reg64 = pCtx->dr[3];
204 iReg++;
205 aenmNames[iReg] = WHvX64RegisterDr6;
206 //aValues[iReg].Reg64 = CPUMGetHyperDR6(pVCpu);
207 aValues[iReg].Reg64 = pCtx->dr[6];
208 iReg++;
209 aenmNames[iReg] = WHvX64RegisterDr7;
210 //aValues[iReg].Reg64 = CPUMGetHyperDR7(pVCpu);
211 aValues[iReg].Reg64 = pCtx->dr[7];
212 iReg++;
213
214 /* Vector state. */
215 aenmNames[iReg] = WHvX64RegisterXmm0;
216 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Lo;
217 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Hi;
218 iReg++;
219 aenmNames[iReg] = WHvX64RegisterXmm1;
220 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Lo;
221 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Hi;
222 iReg++;
223 aenmNames[iReg] = WHvX64RegisterXmm2;
224 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Lo;
225 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Hi;
226 iReg++;
227 aenmNames[iReg] = WHvX64RegisterXmm3;
228 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Lo;
229 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Hi;
230 iReg++;
231 aenmNames[iReg] = WHvX64RegisterXmm4;
232 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Lo;
233 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Hi;
234 iReg++;
235 aenmNames[iReg] = WHvX64RegisterXmm5;
236 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Lo;
237 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Hi;
238 iReg++;
239 aenmNames[iReg] = WHvX64RegisterXmm6;
240 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Lo;
241 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Hi;
242 iReg++;
243 aenmNames[iReg] = WHvX64RegisterXmm7;
244 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Lo;
245 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Hi;
246 iReg++;
247 aenmNames[iReg] = WHvX64RegisterXmm8;
248 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Lo;
249 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Hi;
250 iReg++;
251 aenmNames[iReg] = WHvX64RegisterXmm9;
252 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Lo;
253 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Hi;
254 iReg++;
255 aenmNames[iReg] = WHvX64RegisterXmm10;
256 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo;
257 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi;
258 iReg++;
259 aenmNames[iReg] = WHvX64RegisterXmm11;
260 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo;
261 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi;
262 iReg++;
263 aenmNames[iReg] = WHvX64RegisterXmm12;
264 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo;
265 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi;
266 iReg++;
267 aenmNames[iReg] = WHvX64RegisterXmm13;
268 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo;
269 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi;
270 iReg++;
271 aenmNames[iReg] = WHvX64RegisterXmm14;
272 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo;
273 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi;
274 iReg++;
275 aenmNames[iReg] = WHvX64RegisterXmm15;
276 aValues[iReg].Reg128.Low64 = pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo;
277 aValues[iReg].Reg128.High64 = pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi;
278 iReg++;
279
280 /* Floating point state. */
281 aenmNames[iReg] = WHvX64RegisterFpMmx0;
282 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[0].au64[0];
283 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[0].au64[1];
284 iReg++;
285 aenmNames[iReg] = WHvX64RegisterFpMmx1;
286 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[1].au64[0];
287 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[1].au64[1];
288 iReg++;
289 aenmNames[iReg] = WHvX64RegisterFpMmx2;
290 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[2].au64[0];
291 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[2].au64[1];
292 iReg++;
293 aenmNames[iReg] = WHvX64RegisterFpMmx3;
294 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[3].au64[0];
295 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[3].au64[1];
296 iReg++;
297 aenmNames[iReg] = WHvX64RegisterFpMmx4;
298 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[4].au64[0];
299 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[4].au64[1];
300 iReg++;
301 aenmNames[iReg] = WHvX64RegisterFpMmx5;
302 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[5].au64[0];
303 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[5].au64[1];
304 iReg++;
305 aenmNames[iReg] = WHvX64RegisterFpMmx6;
306 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[6].au64[0];
307 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[6].au64[1];
308 iReg++;
309 aenmNames[iReg] = WHvX64RegisterFpMmx7;
310 aValues[iReg].Fp.AsUINT128.Low64 = pCtx->pXStateR3->x87.aRegs[7].au64[0];
311 aValues[iReg].Fp.AsUINT128.High64 = pCtx->pXStateR3->x87.aRegs[7].au64[1];
312 iReg++;
313
314 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
315 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
316 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
317 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
318 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
319 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
320 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
321 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
322 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
323 iReg++;
324
325 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
326 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
327 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
328 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
329 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
330 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
331 iReg++;
332
333 /* MSRs */
334 // WHvX64RegisterTsc - don't touch
335 aenmNames[iReg] = WHvX64RegisterEfer;
336 aValues[iReg].Reg64 = pCtx->msrEFER;
337 iReg++;
338 aenmNames[iReg] = WHvX64RegisterKernelGsBase;
339 aValues[iReg].Reg64 = pCtx->msrKERNELGSBASE;
340 iReg++;
341 aenmNames[iReg] = WHvX64RegisterApicBase;
342 aValues[iReg].Reg64 = APICGetBaseMsrNoCheck(pVCpu);
343 iReg++;
344 aenmNames[iReg] = WHvX64RegisterPat;
345 aValues[iReg].Reg64 = pCtx->msrPAT;
346 iReg++;
347 /// @todo WHvX64RegisterSysenterCs
348 /// @todo WHvX64RegisterSysenterEip
349 /// @todo WHvX64RegisterSysenterEsp
350 aenmNames[iReg] = WHvX64RegisterStar;
351 aValues[iReg].Reg64 = pCtx->msrSTAR;
352 iReg++;
353 aenmNames[iReg] = WHvX64RegisterLstar;
354 aValues[iReg].Reg64 = pCtx->msrLSTAR;
355 iReg++;
356 aenmNames[iReg] = WHvX64RegisterCstar;
357 aValues[iReg].Reg64 = pCtx->msrCSTAR;
358 iReg++;
359 aenmNames[iReg] = WHvX64RegisterSfmask;
360 aValues[iReg].Reg64 = pCtx->msrSFMASK;
361 iReg++;
362
363 /* event injection (always clear it). */
364 aenmNames[iReg] = WHvRegisterPendingInterruption;
365 aValues[iReg].Reg64 = 0;
366 iReg++;
367 /// @todo WHvRegisterInterruptState
368 /// @todo WHvRegisterPendingEvent0
369 /// @todo WHvRegisterPendingEvent1
370
371 /*
372 * Set the registers.
373 */
374 Assert(iReg < RT_ELEMENTS(aValues));
375 Assert(iReg < RT_ELEMENTS(aenmNames));
376#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
377 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
378 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
379#endif
380 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
381 if (SUCCEEDED(hrc))
382 return VINF_SUCCESS;
383 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
384 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
385 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
386 return VERR_INTERNAL_ERROR;
387#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
388}
389
390
391NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
392{
393#ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
394 NOREF(pCtx);
395 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, UINT64_MAX, NULL);
396 if (RT_SUCCESS(rc))
397 return rc;
398 if (rc == VERR_NEM_FLUSH_TLB)
399 return PGMFlushTLB(pVCpu, pCtx->cr3, true /*fGlobal*/);
400 if (rc == VERR_NEM_CHANGE_PGM_MODE)
401 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
402 AssertLogRelRCReturn(rc, rc);
403 return rc;
404
405#else
406 WHV_REGISTER_NAME aenmNames[128];
407
408 /* GPRs */
409 aenmNames[0] = WHvX64RegisterRax;
410 aenmNames[1] = WHvX64RegisterRcx;
411 aenmNames[2] = WHvX64RegisterRdx;
412 aenmNames[3] = WHvX64RegisterRbx;
413 aenmNames[4] = WHvX64RegisterRsp;
414 aenmNames[5] = WHvX64RegisterRbp;
415 aenmNames[6] = WHvX64RegisterRsi;
416 aenmNames[7] = WHvX64RegisterRdi;
417 aenmNames[8] = WHvX64RegisterR8;
418 aenmNames[9] = WHvX64RegisterR9;
419 aenmNames[10] = WHvX64RegisterR10;
420 aenmNames[11] = WHvX64RegisterR11;
421 aenmNames[12] = WHvX64RegisterR12;
422 aenmNames[13] = WHvX64RegisterR13;
423 aenmNames[14] = WHvX64RegisterR14;
424 aenmNames[15] = WHvX64RegisterR15;
425
426 /* RIP & Flags */
427 aenmNames[16] = WHvX64RegisterRip;
428 aenmNames[17] = WHvX64RegisterRflags;
429
430 /* Segments */
431 aenmNames[18] = WHvX64RegisterEs;
432 aenmNames[19] = WHvX64RegisterCs;
433 aenmNames[20] = WHvX64RegisterSs;
434 aenmNames[21] = WHvX64RegisterDs;
435 aenmNames[22] = WHvX64RegisterFs;
436 aenmNames[23] = WHvX64RegisterGs;
437 aenmNames[24] = WHvX64RegisterLdtr;
438 aenmNames[25] = WHvX64RegisterTr;
439
440 /* Descriptor tables. */
441 aenmNames[26] = WHvX64RegisterIdtr;
442 aenmNames[27] = WHvX64RegisterGdtr;
443
444 /* Control registers. */
445 aenmNames[28] = WHvX64RegisterCr0;
446 aenmNames[29] = WHvX64RegisterCr2;
447 aenmNames[30] = WHvX64RegisterCr3;
448 aenmNames[31] = WHvX64RegisterCr4;
449 aenmNames[32] = WHvX64RegisterCr8;
450
451 /* Debug registers. */
452 aenmNames[33] = WHvX64RegisterDr0;
453 aenmNames[34] = WHvX64RegisterDr1;
454 aenmNames[35] = WHvX64RegisterDr2;
455 aenmNames[36] = WHvX64RegisterDr3;
456 aenmNames[37] = WHvX64RegisterDr6;
457 aenmNames[38] = WHvX64RegisterDr7;
458
459 /* Vector state. */
460 aenmNames[39] = WHvX64RegisterXmm0;
461 aenmNames[40] = WHvX64RegisterXmm1;
462 aenmNames[41] = WHvX64RegisterXmm2;
463 aenmNames[42] = WHvX64RegisterXmm3;
464 aenmNames[43] = WHvX64RegisterXmm4;
465 aenmNames[44] = WHvX64RegisterXmm5;
466 aenmNames[45] = WHvX64RegisterXmm6;
467 aenmNames[46] = WHvX64RegisterXmm7;
468 aenmNames[47] = WHvX64RegisterXmm8;
469 aenmNames[48] = WHvX64RegisterXmm9;
470 aenmNames[49] = WHvX64RegisterXmm10;
471 aenmNames[50] = WHvX64RegisterXmm11;
472 aenmNames[51] = WHvX64RegisterXmm12;
473 aenmNames[52] = WHvX64RegisterXmm13;
474 aenmNames[53] = WHvX64RegisterXmm14;
475 aenmNames[54] = WHvX64RegisterXmm15;
476
477 /* Floating point state. */
478 aenmNames[55] = WHvX64RegisterFpMmx0;
479 aenmNames[56] = WHvX64RegisterFpMmx1;
480 aenmNames[57] = WHvX64RegisterFpMmx2;
481 aenmNames[58] = WHvX64RegisterFpMmx3;
482 aenmNames[59] = WHvX64RegisterFpMmx4;
483 aenmNames[60] = WHvX64RegisterFpMmx5;
484 aenmNames[61] = WHvX64RegisterFpMmx6;
485 aenmNames[62] = WHvX64RegisterFpMmx7;
486 aenmNames[63] = WHvX64RegisterFpControlStatus;
487 aenmNames[64] = WHvX64RegisterXmmControlStatus;
488
489 /* MSRs */
490 // WHvX64RegisterTsc - don't touch
491 aenmNames[65] = WHvX64RegisterEfer;
492 aenmNames[66] = WHvX64RegisterKernelGsBase;
493 aenmNames[67] = WHvX64RegisterApicBase;
494 aenmNames[68] = WHvX64RegisterPat;
495 aenmNames[69] = WHvX64RegisterSysenterCs;
496 aenmNames[70] = WHvX64RegisterSysenterEip;
497 aenmNames[71] = WHvX64RegisterSysenterEsp;
498 aenmNames[72] = WHvX64RegisterStar;
499 aenmNames[73] = WHvX64RegisterLstar;
500 aenmNames[74] = WHvX64RegisterCstar;
501 aenmNames[75] = WHvX64RegisterSfmask;
502
503 /* event injection */
504 aenmNames[76] = WHvRegisterPendingInterruption;
505 aenmNames[77] = WHvRegisterInterruptState;
506 aenmNames[78] = WHvRegisterInterruptState;
507 aenmNames[79] = WHvRegisterPendingEvent0;
508 aenmNames[80] = WHvRegisterPendingEvent1;
509 unsigned const cRegs = 81;
510
511 /*
512 * Get the registers.
513 */
514 WHV_REGISTER_VALUE aValues[cRegs];
515 RT_ZERO(aValues);
516 Assert(RT_ELEMENTS(aValues) >= cRegs);
517 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
518#ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
519 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
520 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
521#endif
522 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues);
523 if (SUCCEEDED(hrc))
524 {
525 /* GPRs */
526 Assert(aenmNames[0] == WHvX64RegisterRax);
527 Assert(aenmNames[15] == WHvX64RegisterR15);
528 pCtx->rax = aValues[0].Reg64;
529 pCtx->rcx = aValues[1].Reg64;
530 pCtx->rdx = aValues[2].Reg64;
531 pCtx->rbx = aValues[3].Reg64;
532 pCtx->rsp = aValues[4].Reg64;
533 pCtx->rbp = aValues[5].Reg64;
534 pCtx->rsi = aValues[6].Reg64;
535 pCtx->rdi = aValues[7].Reg64;
536 pCtx->r8 = aValues[8].Reg64;
537 pCtx->r9 = aValues[9].Reg64;
538 pCtx->r10 = aValues[10].Reg64;
539 pCtx->r11 = aValues[11].Reg64;
540 pCtx->r12 = aValues[12].Reg64;
541 pCtx->r13 = aValues[13].Reg64;
542 pCtx->r14 = aValues[14].Reg64;
543 pCtx->r15 = aValues[15].Reg64;
544
545 /* RIP & Flags */
546 Assert(aenmNames[16] == WHvX64RegisterRip);
547 pCtx->rip = aValues[16].Reg64;
548 pCtx->rflags.u = aValues[17].Reg64;
549
550 /* Segments */
551#define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
552 do { \
553 Assert(aenmNames[a_idx] == a_enmName); \
554 (a_SReg).u64Base = aValues[a_idx].Segment.Base; \
555 (a_SReg).u32Limit = aValues[a_idx].Segment.Limit; \
556 (a_SReg).ValidSel = (a_SReg).Sel = aValues[a_idx].Segment.Selector; \
557 (a_SReg).Attr.u = aValues[a_idx].Segment.Attributes; \
558 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
559 } while (0)
560 COPY_BACK_SEG(18, WHvX64RegisterEs, pCtx->es);
561 COPY_BACK_SEG(19, WHvX64RegisterCs, pCtx->cs);
562 COPY_BACK_SEG(20, WHvX64RegisterSs, pCtx->ss);
563 COPY_BACK_SEG(21, WHvX64RegisterDs, pCtx->ds);
564 COPY_BACK_SEG(22, WHvX64RegisterFs, pCtx->fs);
565 COPY_BACK_SEG(23, WHvX64RegisterGs, pCtx->gs);
566 COPY_BACK_SEG(24, WHvX64RegisterLdtr, pCtx->ldtr);
567 COPY_BACK_SEG(25, WHvX64RegisterTr, pCtx->tr);
568
569 /* Descriptor tables. */
570 Assert(aenmNames[26] == WHvX64RegisterIdtr);
571 pCtx->idtr.cbIdt = aValues[26].Table.Limit;
572 pCtx->idtr.pIdt = aValues[26].Table.Base;
573 Assert(aenmNames[27] == WHvX64RegisterGdtr);
574 pCtx->gdtr.cbGdt = aValues[27].Table.Limit;
575 pCtx->gdtr.pGdt = aValues[27].Table.Base;
576
577 /* Control registers. */
578 Assert(aenmNames[28] == WHvX64RegisterCr0);
579 bool fMaybeChangedMode = false;
580 bool fFlushTlb = false;
581 bool fFlushGlobalTlb = false;
582 if (pCtx->cr0 != aValues[28].Reg64)
583 {
584 CPUMSetGuestCR0(pVCpu, aValues[28].Reg64);
585 fMaybeChangedMode = true;
586 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
587 }
588 Assert(aenmNames[29] == WHvX64RegisterCr2);
589 pCtx->cr2 = aValues[29].Reg64;
590 if (pCtx->cr3 != aValues[30].Reg64)
591 {
592 CPUMSetGuestCR3(pVCpu, aValues[30].Reg64);
593 fFlushTlb = true;
594 }
595 if (pCtx->cr4 != aValues[31].Reg64)
596 {
597 CPUMSetGuestCR4(pVCpu, aValues[31].Reg64);
598 fMaybeChangedMode = true;
599 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
600 }
601 APICSetTpr(pVCpu, (uint8_t)aValues[32].Reg64 << 4);
602
603 /* Debug registers. */
604 Assert(aenmNames[33] == WHvX64RegisterDr0);
605 /** @todo fixme */
606 if (pCtx->dr[0] != aValues[33].Reg64)
607 CPUMSetGuestDR0(pVCpu, aValues[33].Reg64);
608 if (pCtx->dr[1] != aValues[34].Reg64)
609 CPUMSetGuestDR1(pVCpu, aValues[34].Reg64);
610 if (pCtx->dr[2] != aValues[35].Reg64)
611 CPUMSetGuestDR2(pVCpu, aValues[35].Reg64);
612 if (pCtx->dr[3] != aValues[36].Reg64)
613 CPUMSetGuestDR3(pVCpu, aValues[36].Reg64);
614 Assert(aenmNames[37] == WHvX64RegisterDr6);
615 Assert(aenmNames[38] == WHvX64RegisterDr7);
616 if (pCtx->dr[6] != aValues[37].Reg64)
617 CPUMSetGuestDR6(pVCpu, aValues[37].Reg64);
618 if (pCtx->dr[7] != aValues[38].Reg64)
619 CPUMSetGuestDR6(pVCpu, aValues[38].Reg64);
620
621 /* Vector state. */
622 Assert(aenmNames[39] == WHvX64RegisterXmm0);
623 Assert(aenmNames[54] == WHvX64RegisterXmm15);
624 pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Lo = aValues[39].Reg128.Low64;
625 pCtx->pXStateR3->x87.aXMM[0].uXmm.s.Hi = aValues[39].Reg128.High64;
626 pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Lo = aValues[40].Reg128.Low64;
627 pCtx->pXStateR3->x87.aXMM[1].uXmm.s.Hi = aValues[40].Reg128.High64;
628 pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Lo = aValues[41].Reg128.Low64;
629 pCtx->pXStateR3->x87.aXMM[2].uXmm.s.Hi = aValues[41].Reg128.High64;
630 pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Lo = aValues[42].Reg128.Low64;
631 pCtx->pXStateR3->x87.aXMM[3].uXmm.s.Hi = aValues[42].Reg128.High64;
632 pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Lo = aValues[43].Reg128.Low64;
633 pCtx->pXStateR3->x87.aXMM[4].uXmm.s.Hi = aValues[43].Reg128.High64;
634 pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Lo = aValues[44].Reg128.Low64;
635 pCtx->pXStateR3->x87.aXMM[5].uXmm.s.Hi = aValues[44].Reg128.High64;
636 pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Lo = aValues[45].Reg128.Low64;
637 pCtx->pXStateR3->x87.aXMM[6].uXmm.s.Hi = aValues[45].Reg128.High64;
638 pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Lo = aValues[46].Reg128.Low64;
639 pCtx->pXStateR3->x87.aXMM[7].uXmm.s.Hi = aValues[46].Reg128.High64;
640 pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Lo = aValues[47].Reg128.Low64;
641 pCtx->pXStateR3->x87.aXMM[8].uXmm.s.Hi = aValues[47].Reg128.High64;
642 pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Lo = aValues[48].Reg128.Low64;
643 pCtx->pXStateR3->x87.aXMM[9].uXmm.s.Hi = aValues[48].Reg128.High64;
644 pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo = aValues[49].Reg128.Low64;
645 pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi = aValues[49].Reg128.High64;
646 pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo = aValues[50].Reg128.Low64;
647 pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi = aValues[50].Reg128.High64;
648 pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo = aValues[51].Reg128.Low64;
649 pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi = aValues[51].Reg128.High64;
650 pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo = aValues[52].Reg128.Low64;
651 pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi = aValues[52].Reg128.High64;
652 pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo = aValues[53].Reg128.Low64;
653 pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi = aValues[53].Reg128.High64;
654 pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo = aValues[54].Reg128.Low64;
655 pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi = aValues[54].Reg128.High64;
656
657 /* Floating point state. */
658 Assert(aenmNames[55] == WHvX64RegisterFpMmx0);
659 Assert(aenmNames[62] == WHvX64RegisterFpMmx7);
660 pCtx->pXStateR3->x87.aRegs[0].au64[0] = aValues[55].Fp.AsUINT128.Low64;
661 pCtx->pXStateR3->x87.aRegs[0].au64[1] = aValues[55].Fp.AsUINT128.High64;
662 pCtx->pXStateR3->x87.aRegs[1].au64[0] = aValues[56].Fp.AsUINT128.Low64;
663 pCtx->pXStateR3->x87.aRegs[1].au64[1] = aValues[56].Fp.AsUINT128.High64;
664 pCtx->pXStateR3->x87.aRegs[2].au64[0] = aValues[57].Fp.AsUINT128.Low64;
665 pCtx->pXStateR3->x87.aRegs[2].au64[1] = aValues[57].Fp.AsUINT128.High64;
666 pCtx->pXStateR3->x87.aRegs[3].au64[0] = aValues[58].Fp.AsUINT128.Low64;
667 pCtx->pXStateR3->x87.aRegs[3].au64[1] = aValues[58].Fp.AsUINT128.High64;
668 pCtx->pXStateR3->x87.aRegs[4].au64[0] = aValues[59].Fp.AsUINT128.Low64;
669 pCtx->pXStateR3->x87.aRegs[4].au64[1] = aValues[59].Fp.AsUINT128.High64;
670 pCtx->pXStateR3->x87.aRegs[5].au64[0] = aValues[60].Fp.AsUINT128.Low64;
671 pCtx->pXStateR3->x87.aRegs[5].au64[1] = aValues[60].Fp.AsUINT128.High64;
672 pCtx->pXStateR3->x87.aRegs[6].au64[0] = aValues[61].Fp.AsUINT128.Low64;
673 pCtx->pXStateR3->x87.aRegs[6].au64[1] = aValues[61].Fp.AsUINT128.High64;
674 pCtx->pXStateR3->x87.aRegs[7].au64[0] = aValues[62].Fp.AsUINT128.Low64;
675 pCtx->pXStateR3->x87.aRegs[7].au64[1] = aValues[62].Fp.AsUINT128.High64;
676
677 Assert(aenmNames[63] == WHvX64RegisterFpControlStatus);
678 pCtx->pXStateR3->x87.FCW = aValues[63].FpControlStatus.FpControl;
679 pCtx->pXStateR3->x87.FSW = aValues[63].FpControlStatus.FpStatus;
680 pCtx->pXStateR3->x87.FTW = aValues[63].FpControlStatus.FpTag
681 /*| (aValues[63].FpControlStatus.Reserved << 8)*/;
682 pCtx->pXStateR3->x87.FOP = aValues[63].FpControlStatus.LastFpOp;
683 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[63].FpControlStatus.LastFpRip;
684 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[63].FpControlStatus.LastFpRip >> 32);
685 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[63].FpControlStatus.LastFpRip >> 48);
686
687 Assert(aenmNames[64] == WHvX64RegisterXmmControlStatus);
688 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[64].XmmControlStatus.LastFpRdp;
689 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[64].XmmControlStatus.LastFpRdp >> 32);
690 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[64].XmmControlStatus.LastFpRdp >> 48);
691 pCtx->pXStateR3->x87.MXCSR = aValues[64].XmmControlStatus.XmmStatusControl;
692 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[64].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
693
694 /* MSRs */
695 // WHvX64RegisterTsc - don't touch
696 Assert(aenmNames[65] == WHvX64RegisterEfer);
697 if (aValues[65].Reg64 != pCtx->msrEFER)
698 {
699 pCtx->msrEFER = aValues[65].Reg64;
700 fMaybeChangedMode = true;
701 }
702
703 Assert(aenmNames[66] == WHvX64RegisterKernelGsBase);
704 pCtx->msrKERNELGSBASE = aValues[66].Reg64;
705
706 Assert(aenmNames[67] == WHvX64RegisterApicBase);
707 if (aValues[67].Reg64 != APICGetBaseMsrNoCheck(pVCpu))
708 {
709 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[67].Reg64);
710 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
711 }
712
713 Assert(aenmNames[68] == WHvX64RegisterPat);
714 pCtx->msrPAT = aValues[68].Reg64;
715 /// @todo WHvX64RegisterSysenterCs
716 /// @todo WHvX64RegisterSysenterEip
717 /// @todo WHvX64RegisterSysenterEsp
718 Assert(aenmNames[72] == WHvX64RegisterStar);
719 pCtx->msrSTAR = aValues[72].Reg64;
720 Assert(aenmNames[73] == WHvX64RegisterLstar);
721 pCtx->msrLSTAR = aValues[73].Reg64;
722 Assert(aenmNames[74] == WHvX64RegisterCstar);
723 pCtx->msrCSTAR = aValues[74].Reg64;
724 Assert(aenmNames[75] == WHvX64RegisterSfmask);
725 pCtx->msrSFMASK = aValues[75].Reg64;
726
727 /// @todo WHvRegisterPendingInterruption
728 Assert(aenmNames[76] == WHvRegisterPendingInterruption);
729 WHV_X64_PENDING_INTERRUPTION_REGISTER const * pPendingInt = (WHV_X64_PENDING_INTERRUPTION_REGISTER const *)&aValues[76];
730 if (pPendingInt->InterruptionPending)
731 {
732 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
733 pPendingInt->InterruptionType, pPendingInt->InterruptionVector, pPendingInt->DeliverErrorCode,
734 pPendingInt->ErrorCode, pPendingInt->InstructionLength, pPendingInt->NestedEvent));
735 AssertMsg((pPendingInt->AsUINT64 & UINT64_C(0xfc00)) == 0, ("%#RX64\n", pPendingInt->AsUINT64));
736 }
737
738 /// @todo WHvRegisterInterruptState
739 /// @todo WHvRegisterPendingEvent0
740 /// @todo WHvRegisterPendingEvent1
741
742
743 if (fMaybeChangedMode)
744 {
745 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
746 AssertRC(rc);
747 }
748 if (fFlushTlb)
749 {
750 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
751 AssertRC(rc);
752 }
753
754 return VINF_SUCCESS;
755 }
756
757 AssertLogRelMsgFailed(("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
758 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs,
759 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
760 return VERR_INTERNAL_ERROR;
761#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
762}
763
764
765#ifdef LOG_ENABLED
766/**
767 * Get the virtual processor running status.
768 */
769DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
770{
771# ifdef IN_RING0
772 NOREF(pVCpu);
773 return VidProcessorStatusUndefined;
774# else
775 RTERRVARS Saved;
776 RTErrVarsSave(&Saved);
777
778 /*
779 * This API is disabled in release builds, it seems. On build 17101 it requires
780 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
781 */
782 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
783 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
784 AssertRC(rcNt);
785
786 RTErrVarsRestore(&Saved);
787 return enmCpuStatus;
788# endif
789}
790#endif
791
792
793#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
794
795# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
796/**
797 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
798 *
799 * This is an experiment only.
800 *
801 * @returns VBox status code.
802 * @param pVM The cross context VM structure.
803 * @param pVCpu The cross context virtual CPU structure of the
804 * calling EMT.
805 */
806NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
807{
808 /*
809 * Work the state.
810 *
811 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
812 * So, we just need to modify the state and kick the EMT if it's waiting on
813 * messages. For the latter we use QueueUserAPC / KeAlterThread.
814 */
815 for (;;)
816 {
817 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
818 switch (enmState)
819 {
820 case VMCPUSTATE_STARTED_EXEC_NEM:
821 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
822 {
823 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
824 return VINF_SUCCESS;
825 }
826 break;
827
828 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
829 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
830 {
831# ifdef IN_RING0
832 NTSTATUS rcNt = KeAlertThread(??);
833# else
834 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
835# endif
836 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
837 Assert(rcNt == STATUS_SUCCESS);
838 if (NT_SUCCESS(rcNt))
839 return VINF_SUCCESS;
840 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
841 }
842 break;
843
844 default:
845 return VINF_SUCCESS;
846 }
847
848 ASMNopPause();
849 RT_NOREF(pVM);
850 }
851}
852# endif /* IN_RING3 */
853
854
855/**
856 * Fills in WHV_VP_EXIT_CONTEXT from HV_X64_INTERCEPT_MESSAGE_HEADER.
857 */
858DECLINLINE(void) nemHCWinConvertX64MsgHdrToVpExitCtx(HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr, WHV_VP_EXIT_CONTEXT *pCtx)
859{
860 pCtx->ExecutionState.AsUINT16 = pHdr->ExecutionState.AsUINT16;
861 pCtx->InstructionLength = pHdr->InstructionLength;
862 pCtx->Cs.Base = pHdr->CsSegment.Base;
863 pCtx->Cs.Limit = pHdr->CsSegment.Limit;
864 pCtx->Cs.Selector = pHdr->CsSegment.Selector;
865 pCtx->Cs.Attributes = pHdr->CsSegment.Attributes;
866 pCtx->Rip = pHdr->Rip;
867 pCtx->Rflags = pHdr->Rflags;
868}
869
870
871/**
872 * Convert hyper-V exit message to the WinHvPlatform structures.
873 *
874 * @returns VBox status code
875 * @param pMsgHdr The message to convert.
876 * @param pExitCtx The output structure. Assumes zeroed.
877 */
878NEM_TMPL_STATIC int nemHCWinRunVirtualProcessorConvertPending(HV_MESSAGE_HEADER const *pMsgHdr, WHV_RUN_VP_EXIT_CONTEXT *pExitCtx)
879{
880 switch (pMsgHdr->MessageType)
881 {
882 case HvMessageTypeUnmappedGpa:
883 case HvMessageTypeGpaIntercept:
884 {
885 PCHV_X64_MEMORY_INTERCEPT_MESSAGE pMemMsg = (PCHV_X64_MEMORY_INTERCEPT_MESSAGE)(pMsgHdr + 1);
886 Assert(pMsgHdr->PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
887
888 pExitCtx->ExitReason = WHvRunVpExitReasonMemoryAccess;
889 nemHCWinConvertX64MsgHdrToVpExitCtx(&pMemMsg->Header, &pExitCtx->MemoryAccess.VpContext);
890 pExitCtx->MemoryAccess.InstructionByteCount = pMemMsg->InstructionByteCount;
891 ((uint64_t *)pExitCtx->MemoryAccess.InstructionBytes)[0] = ((uint64_t const *)pMemMsg->InstructionBytes)[0];
892 ((uint64_t *)pExitCtx->MemoryAccess.InstructionBytes)[1] = ((uint64_t const *)pMemMsg->InstructionBytes)[1];
893
894 pExitCtx->MemoryAccess.AccessInfo.AccessType = pMemMsg->Header.InterceptAccessType;
895 pExitCtx->MemoryAccess.AccessInfo.GpaUnmapped = pMsgHdr->MessageType == HvMessageTypeUnmappedGpa;
896 pExitCtx->MemoryAccess.AccessInfo.GvaValid = pMemMsg->MemoryAccessInfo.GvaValid;
897 pExitCtx->MemoryAccess.AccessInfo.Reserved = pMemMsg->MemoryAccessInfo.Reserved;
898 pExitCtx->MemoryAccess.Gpa = pMemMsg->GuestPhysicalAddress;
899 pExitCtx->MemoryAccess.Gva = pMemMsg->GuestVirtualAddress;
900 return VINF_SUCCESS;
901 }
902
903 case HvMessageTypeX64IoPortIntercept:
904 {
905 PCHV_X64_IO_PORT_INTERCEPT_MESSAGE pPioMsg= (PCHV_X64_IO_PORT_INTERCEPT_MESSAGE)(pMsgHdr + 1);
906 Assert(pMsgHdr->PayloadSize == sizeof(*pPioMsg));
907
908 pExitCtx->ExitReason = WHvRunVpExitReasonX64IoPortAccess;
909 nemHCWinConvertX64MsgHdrToVpExitCtx(&pPioMsg->Header, &pExitCtx->IoPortAccess.VpContext);
910 pExitCtx->IoPortAccess.InstructionByteCount = pPioMsg->InstructionByteCount;
911 ((uint64_t *)pExitCtx->IoPortAccess.InstructionBytes)[0] = ((uint64_t const *)pPioMsg->InstructionBytes)[0];
912 ((uint64_t *)pExitCtx->IoPortAccess.InstructionBytes)[1] = ((uint64_t const *)pPioMsg->InstructionBytes)[1];
913
914 pExitCtx->IoPortAccess.AccessInfo.IsWrite = pPioMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE;
915 pExitCtx->IoPortAccess.AccessInfo.AccessSize = pPioMsg->AccessInfo.AccessSize;
916 pExitCtx->IoPortAccess.AccessInfo.StringOp = pPioMsg->AccessInfo.StringOp;
917 pExitCtx->IoPortAccess.AccessInfo.RepPrefix = pPioMsg->AccessInfo.RepPrefix;
918 pExitCtx->IoPortAccess.AccessInfo.Reserved = pPioMsg->AccessInfo.Reserved;
919 pExitCtx->IoPortAccess.PortNumber = pPioMsg->PortNumber;
920 pExitCtx->IoPortAccess.Rax = pPioMsg->Rax;
921 pExitCtx->IoPortAccess.Rcx = pPioMsg->Rcx;
922 pExitCtx->IoPortAccess.Rsi = pPioMsg->Rsi;
923 pExitCtx->IoPortAccess.Rdi = pPioMsg->Rdi;
924 pExitCtx->IoPortAccess.Ds.Base = pPioMsg->DsSegment.Base;
925 pExitCtx->IoPortAccess.Ds.Limit = pPioMsg->DsSegment.Limit;
926 pExitCtx->IoPortAccess.Ds.Selector = pPioMsg->DsSegment.Selector;
927 pExitCtx->IoPortAccess.Ds.Attributes = pPioMsg->DsSegment.Attributes;
928 pExitCtx->IoPortAccess.Es.Base = pPioMsg->EsSegment.Base;
929 pExitCtx->IoPortAccess.Es.Limit = pPioMsg->EsSegment.Limit;
930 pExitCtx->IoPortAccess.Es.Selector = pPioMsg->EsSegment.Selector;
931 pExitCtx->IoPortAccess.Es.Attributes = pPioMsg->EsSegment.Attributes;
932 return VINF_SUCCESS;
933 }
934
935 case HvMessageTypeX64Halt:
936 {
937 PCHV_X64_HALT_MESSAGE pHaltMsg = (PCHV_X64_HALT_MESSAGE)(pMsgHdr + 1);
938 AssertMsg(pHaltMsg->u64Reserved == 0, ("HALT reserved: %#RX64\n", pHaltMsg->u64Reserved));
939 pExitCtx->ExitReason = WHvRunVpExitReasonX64Halt;
940 return VINF_SUCCESS;
941 }
942
943 case HvMessageTypeX64InterruptWindow:
944 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n", pMsgHdr->MessageType), VERR_INTERNAL_ERROR_2);
945
946 case HvMessageTypeInvalidVpRegisterValue:
947 case HvMessageTypeUnrecoverableException:
948 case HvMessageTypeUnsupportedFeature:
949 case HvMessageTypeTlbPageSizeMismatch:
950 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n", pMsgHdr->MessageType), VERR_INTERNAL_ERROR_2);
951
952 case HvMessageTypeX64MsrIntercept:
953 case HvMessageTypeX64CpuidIntercept:
954 case HvMessageTypeX64ExceptionIntercept:
955 case HvMessageTypeX64ApicEoi:
956 case HvMessageTypeX64LegacyFpError:
957 case HvMessageTypeX64RegisterIntercept:
958 case HvMessageTypeApicEoi:
959 case HvMessageTypeFerrAsserted:
960 case HvMessageTypeEventLogBufferComplete:
961 case HvMessageTimerExpired:
962 AssertLogRelMsgFailedReturn(("Unexpected message type #x!\n", pMsgHdr->MessageType), VERR_INTERNAL_ERROR_2);
963
964 default:
965 AssertLogRelMsgFailedReturn(("Unknown message type #x!\n", pMsgHdr->MessageType), VERR_INTERNAL_ERROR_2);
966 }
967}
968
969
970/**
971 * Our own WHvRunVirtualProcessor that can later be moved to ring-0.
972 *
973 * This is an experiment only.
974 *
975 * @returns VBox status code.
976 * @param pVM The cross context VM structure.
977 * @param pVCpu The cross context virtual CPU structure of the
978 * calling EMT.
979 * @param pExitCtx Where to return exit information.
980 * @param cbExitCtx Size of the exit information area.
981 */
982NEM_TMPL_STATIC int nemHCWinRunVirtualProcessor(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT *pExitCtx, size_t cbExitCtx)
983{
984 RT_BZERO(pExitCtx, cbExitCtx);
985
986 /*
987 * Tell the CPU to execute stuff if we haven't got a pending message.
988 */
989 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
990 uint32_t fHandleAndGetFlags;
991 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
992 {
993 uint8_t const bMsgState = pVCpu->nem.s.bMsgState;
994 if (bMsgState == NEM_WIN_MSG_STATE_PENDING_MSG)
995 {
996 Assert(pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage);
997 fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE | VID_MSHAGN_F_HANDLE_MESSAGE;
998 Log8(("nemHCWinRunVirtualProcessor: #1: msg pending, no need to start CPU (cpu state %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
999 }
1000 else if (bMsgState != NEM_WIN_MSG_STATE_STARTED)
1001 {
1002 if (bMsgState == NEM_WIN_MSG_STATE_PENDING_STOP_AND_MSG)
1003 {
1004 Log8(("nemHCWinRunVirtualProcessor: #0: pending stop+message (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
1005 /* ACK the pending message and get the stop message. */
1006 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
1007 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 5000);
1008 AssertLogRelMsg(fWait, ("dwErr=%u (%#x) rcNt=%#x\n", RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()));
1009
1010 /* ACK the stop message. */
1011 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
1012 VID_MSHAGN_F_HANDLE_MESSAGE, 5000);
1013 AssertLogRelMsg(fWait, ("dwErr=%u (%#x) rcNt=%#x\n", RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()));
1014
1015 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_STOPPED;
1016 }
1017
1018 Log8(("nemHCWinRunVirtualProcessor: #1: starting CPU (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
1019 if (g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu))
1020 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_STARTED;
1021 else
1022 {
1023 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM);
1024 AssertLogRelMsgFailedReturn(("VidStartVirtualProcessor failed for CPU #%u: rcNt=%#x dwErr=%u\n",
1025 pVCpu->idCpu, RTNtLastStatusValue(), RTNtLastErrorValue()),
1026 VERR_INTERNAL_ERROR_3);
1027 }
1028 fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
1029 }
1030 else
1031 {
1032 /* This shouldn't happen. */
1033 fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
1034 Log8(("nemHCWinRunVirtualProcessor: #1: NO MSG PENDING! No need to start CPU (cpu state %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
1035 }
1036 }
1037 else
1038 {
1039 Log8(("nemHCWinRunVirtualProcessor: #1: state=%u -> canceled (cpu status %u)\n",
1040 VMCPU_GET_STATE(pVCpu), nemHCWinCpuGetRunningStatus(pVCpu)));
1041 pExitCtx->ExitReason = WHvRunVpExitReasonCanceled;
1042 return VINF_SUCCESS;
1043 }
1044
1045 /*
1046 * Wait for it to stop and give us a reason to work with.
1047 */
1048 uint32_t cMillies = 5000; // Starting low so we can experiment without getting stuck.
1049 for (;;)
1050 {
1051 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
1052 {
1053 Log8(("nemHCWinRunVirtualProcessor: #2: Waiting %#x (cpu status %u)...\n",
1054 fHandleAndGetFlags, nemHCWinCpuGetRunningStatus(pVCpu)));
1055 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
1056 fHandleAndGetFlags, cMillies);
1057 if (fWait)
1058 {
1059 /* Not sure yet, but we have to check whether there is anything pending
1060 and retry if there isn't. */
1061 VID_MESSAGE_TYPE const enmVidMsgType = pMappingHeader->enmVidMsgType;
1062 if (enmVidMsgType == VidMessageHypervisorMessage)
1063 {
1064 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1065 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1066 Log8(("nemHCWinRunVirtualProcessor: #3: wait succeeded: %#x / %#x (cpu status %u)\n",
1067 enmVidMsgType, ((HV_MESSAGE_HEADER const *)(pMappingHeader + 1))->MessageType,
1068 nemHCWinCpuGetRunningStatus(pVCpu) ));
1069 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_PENDING_MSG;
1070 return nemHCWinRunVirtualProcessorConvertPending((HV_MESSAGE_HEADER const *)(pMappingHeader + 1), pExitCtx);
1071 }
1072
1073 /* This shouldn't happen, and I think its wrong. */
1074 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
1075#ifdef DEBUG_bird
1076 __debugbreak();
1077#endif
1078 Log8(("nemHCWinRunVirtualProcessor: #3: wait succeeded, but nothing pending: %#x / %#x (cpu status %u)\n",
1079 enmVidMsgType, ((HV_MESSAGE_HEADER const *)(pMappingHeader + 1))->MessageType, nemHCWinCpuGetRunningStatus(pVCpu) ));
1080 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_STARTED;
1081 AssertLogRelMsgReturnStmt(enmVidMsgType == VidMessageStopRequestComplete,
1082 ("enmVidMsgType=%#x\n", enmVidMsgType),
1083 g_pfnVidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
1084 VERR_INTERNAL_ERROR_3);
1085 fHandleAndGetFlags &= ~VID_MSHAGN_F_HANDLE_MESSAGE;
1086 }
1087 else
1088 {
1089 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
1090
1091 /* Note! VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT. */
1092 DWORD const dwErr = RTNtLastErrorValue();
1093 AssertLogRelMsgReturnStmt( dwErr == STATUS_TIMEOUT
1094 || dwErr == STATUS_ALERTED || dwErr == STATUS_USER_APC, /* just in case */
1095 ("dwErr=%u (%#x) (cpu status %u)\n", dwErr, dwErr, nemHCWinCpuGetRunningStatus(pVCpu)),
1096 g_pfnVidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
1097 VERR_INTERNAL_ERROR_3);
1098 Log8(("nemHCWinRunVirtualProcessor: #3: wait timed out (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
1099 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_STARTED;
1100 fHandleAndGetFlags &= ~VID_MSHAGN_F_HANDLE_MESSAGE;
1101 }
1102 }
1103 else
1104 {
1105 /*
1106 * State changed and we need to return.
1107 *
1108 * We must ensure that the processor is not running while we
1109 * return, and that can be a bit complicated.
1110 */
1111 Log8(("nemHCWinRunVirtualProcessor: #4: state changed to %u (cpu status %u)\n",
1112 VMCPU_GET_STATE(pVCpu), nemHCWinCpuGetRunningStatus(pVCpu) ));
1113 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1114
1115 /* If we haven't marked the pervious message as handled, simply return
1116 without doing anything special. */
1117 if (fHandleAndGetFlags & VID_MSHAGN_F_HANDLE_MESSAGE)
1118 {
1119 Log8(("nemHCWinRunVirtualProcessor: #5: Didn't resume previous message.\n"));
1120 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_PENDING_MSG;
1121 pExitCtx->ExitReason = WHvRunVpExitReasonCanceled;
1122 return VINF_SUCCESS;
1123 }
1124
1125 /* The processor is running, so try stop it. */
1126 BOOL fStop = g_pfnVidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
1127 if (fStop)
1128 {
1129 Log8(("nemHCWinRunVirtualProcessor: #5: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
1130 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_STOPPED;
1131 pExitCtx->ExitReason = WHvRunVpExitReasonCanceled;
1132 return VINF_SUCCESS;
1133 }
1134
1135 /* Dang, the CPU stopped by itself with a message pending. */
1136 DWORD dwErr = RTNtLastErrorValue();
1137 Log8(("nemHCWinRunVirtualProcessor: #5: Stopping CPU failed (%u/%#x) - cpu status %u\n",
1138 dwErr, dwErr, nemHCWinCpuGetRunningStatus(pVCpu) ));
1139 pExitCtx->ExitReason = WHvRunVpExitReasonCanceled;
1140 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u\n", dwErr), VERR_INTERNAL_ERROR_3);
1141
1142 /* Get the pending message. */
1143 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
1144 VID_MSHAGN_F_GET_NEXT_MESSAGE, 5000);
1145 AssertLogRelMsgReturn(fWait, ("error=%#u\n", RTNtLastErrorValue()), VERR_INTERNAL_ERROR_3);
1146
1147 VID_MESSAGE_TYPE const enmVidMsgType = pMappingHeader->enmVidMsgType;
1148 if (enmVidMsgType == VidMessageHypervisorMessage)
1149 {
1150 Log8(("nemHCWinRunVirtualProcessor: #6: wait succeeded: %#x / %#x (cpu status %u)\n", enmVidMsgType,
1151 ((HV_MESSAGE_HEADER const *)(pMappingHeader + 1))->MessageType, nemHCWinCpuGetRunningStatus(pVCpu) ));
1152 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_PENDING_STOP_AND_MSG;
1153 return nemHCWinRunVirtualProcessorConvertPending((HV_MESSAGE_HEADER const *)(pMappingHeader + 1), pExitCtx);
1154 }
1155
1156 /* ACK the stop message, if that's what it is. Don't think we'll ever get here. */
1157 Log8(("nemHCWinRunVirtualProcessor: #6b: wait succeeded: %#x / %#x (cpu status %u)\n", enmVidMsgType,
1158 ((HV_MESSAGE_HEADER const *)(pMappingHeader + 1))->MessageType, nemHCWinCpuGetRunningStatus(pVCpu) ));
1159 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete, ("enmVidMsgType=%#x\n", enmVidMsgType),
1160 VERR_INTERNAL_ERROR_3);
1161 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
1162 VID_MSHAGN_F_HANDLE_MESSAGE, 5000);
1163 AssertLogRelMsgReturn(fWait, ("dwErr=%#u\n", RTNtLastErrorValue()), VERR_INTERNAL_ERROR_3);
1164
1165 pVCpu->nem.s.bMsgState = NEM_WIN_MSG_STATE_STOPPED;
1166 pExitCtx->ExitReason = WHvRunVpExitReasonCanceled;
1167 return VINF_SUCCESS;
1168 }
1169
1170 /** @todo check flags and stuff? */
1171 }
1172}
1173
1174#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
1175
1176#ifdef LOG_ENABLED
1177/**
1178 * Logs the current CPU state.
1179 */
1180NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1181{
1182 if (LogIs3Enabled())
1183 {
1184# ifdef IN_RING3
1185 char szRegs[4096];
1186 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1187 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1188 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1189 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1190 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1191 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1192 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1193 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1194 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1195 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1196 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1197 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1198 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1199 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1200 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1201 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1202 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1203 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1204 " efer=%016VR{efer}\n"
1205 " pat=%016VR{pat}\n"
1206 " sf_mask=%016VR{sf_mask}\n"
1207 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1208 " lstar=%016VR{lstar}\n"
1209 " star=%016VR{star} cstar=%016VR{cstar}\n"
1210 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1211 );
1212
1213 char szInstr[256];
1214 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1215 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1216 szInstr, sizeof(szInstr), NULL);
1217 Log3(("%s%s\n", szRegs, szInstr));
1218# else
1219 /** @todo stat logging in ring-0 */
1220 RT_NOREF(pVM, pVCpu);
1221# endif
1222 }
1223}
1224#endif /* LOG_ENABLED */
1225
1226
1227/**
1228 * Advances the guest RIP and clear EFLAGS.RF.
1229 *
1230 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1231 *
1232 * @param pVCpu The cross context virtual CPU structure.
1233 * @param pCtx The CPU context to update.
1234 * @param pExitCtx The exit context.
1235 */
1236DECLINLINE(void) nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1237{
1238 /* Advance the RIP. */
1239 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);
1240 pCtx->rip += pExitCtx->InstructionLength;
1241 pCtx->rflags.Bits.u1RF = 0;
1242
1243 /* Update interrupt inhibition. */
1244 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1245 { /* likely */ }
1246 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1247 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1248}
1249
1250
1251NEM_TMPL_STATIC VBOXSTRICTRC
1252nemHCWinHandleHalt(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1253{
1254 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);
1255 LogFlow(("nemHCWinHandleHalt\n"));
1256 return VINF_EM_HALT;
1257}
1258
1259
1260NEM_TMPL_STATIC DECLCALLBACK(int)
1261nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1262{
1263 RT_NOREF_PV(pvUser);
1264#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1265 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1266 AssertRC(rc);
1267 if (RT_SUCCESS(rc))
1268#else
1269 RT_NOREF_PV(pVCpu);
1270 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1271 if (SUCCEEDED(hrc))
1272#endif
1273 {
1274 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1275 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1276 }
1277 else
1278 {
1279#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1280 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1281#else
1282 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1283 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1284 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1285#endif
1286 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1287 }
1288 if (pVM->nem.s.cMappedPages > 0)
1289 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1290 return VINF_SUCCESS;
1291}
1292
1293
1294/**
1295 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1296 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1297 */
1298typedef struct NEMHCWINHMACPCCSTATE
1299{
1300 /** Input: Write access. */
1301 bool fWriteAccess;
1302 /** Output: Set if we did something. */
1303 bool fDidSomething;
1304 /** Output: Set it we should resume. */
1305 bool fCanResume;
1306} NEMHCWINHMACPCCSTATE;
1307
1308/**
1309 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1310 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1311 * NEMHCWINHMACPCCSTATE structure. }
1312 */
1313NEM_TMPL_STATIC DECLCALLBACK(int)
1314nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1315{
1316 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1317 pState->fDidSomething = false;
1318 pState->fCanResume = false;
1319
1320 /* If A20 is disabled, we may need to make another query on the masked
1321 page to get the correct protection information. */
1322 uint8_t u2State = pInfo->u2NemState;
1323 RTGCPHYS GCPhysSrc;
1324 if ( pVM->nem.s.fA20Enabled
1325 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1326 GCPhysSrc = GCPhys;
1327 else
1328 {
1329 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1330 PGMPHYSNEMPAGEINFO Info2;
1331 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1332 AssertRCReturn(rc, rc);
1333
1334 *pInfo = Info2;
1335 pInfo->u2NemState = u2State;
1336 }
1337
1338 /*
1339 * Consolidate current page state with actual page protection and access type.
1340 * We don't really consider downgrades here, as they shouldn't happen.
1341 */
1342#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1343 /** @todo Someone at microsoft please explain:
1344 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1345 * readonly page as writable (unmap, then map again). Specifically, this was an
1346 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1347 * a hope to work around that we no longer pre-map anything, just unmap stuff
1348 * and do it lazily here. And here we will first unmap, restart, and then remap
1349 * with new protection or backing.
1350 */
1351#endif
1352 int rc;
1353 switch (u2State)
1354 {
1355 case NEM_WIN_PAGE_STATE_UNMAPPED:
1356 case NEM_WIN_PAGE_STATE_NOT_SET:
1357 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1358 {
1359 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1360 return VINF_SUCCESS;
1361 }
1362
1363 /* Don't bother remapping it if it's a write request to a non-writable page. */
1364 if ( pState->fWriteAccess
1365 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1366 {
1367 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1368 return VINF_SUCCESS;
1369 }
1370
1371 /* Map the page. */
1372 rc = nemHCNativeSetPhysPage(pVM,
1373 pVCpu,
1374 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1375 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1376 pInfo->fNemProt,
1377 &u2State,
1378 true /*fBackingState*/);
1379 pInfo->u2NemState = u2State;
1380 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1381 GCPhys, g_apszPageStates[u2State], rc));
1382 pState->fDidSomething = true;
1383 pState->fCanResume = true;
1384 return rc;
1385
1386 case NEM_WIN_PAGE_STATE_READABLE:
1387 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1388 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1389 {
1390 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1391 return VINF_SUCCESS;
1392 }
1393
1394#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1395 /* Upgrade page to writable. */
1396/** @todo test this*/
1397 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1398 && pState->fWriteAccess)
1399 {
1400 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1401 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1402 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1403 AssertRC(rc);
1404 if (RT_SUCCESS(rc))
1405 {
1406 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1407 pState->fDidSomething = true;
1408 pState->fCanResume = true;
1409 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1410 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1411 }
1412 }
1413 else
1414 {
1415 /* Need to emulate the acces. */
1416 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1417 rc = VINF_SUCCESS;
1418 }
1419 return rc;
1420#else
1421 break;
1422#endif
1423
1424 case NEM_WIN_PAGE_STATE_WRITABLE:
1425 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1426 {
1427 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1428 return VINF_SUCCESS;
1429 }
1430#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1431 AssertFailed(); /* There should be no downgrades. */
1432#endif
1433 break;
1434
1435 default:
1436 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_INTERNAL_ERROR_3);
1437 }
1438
1439 /*
1440 * Unmap and restart the instruction.
1441 * If this fails, which it does every so often, just unmap everything for now.
1442 */
1443#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1444 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1445 AssertRC(rc);
1446 if (RT_SUCCESS(rc))
1447#else
1448 /** @todo figure out whether we mess up the state or if it's WHv. */
1449 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1450 if (SUCCEEDED(hrc))
1451#endif
1452 {
1453 pState->fDidSomething = true;
1454 pState->fCanResume = true;
1455 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1456 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1457 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1458 return VINF_SUCCESS;
1459 }
1460#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1461 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1462 return rc;
1463#else
1464 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1465 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1466 pVM->nem.s.cMappedPages));
1467
1468 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1469 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1470
1471 pState->fDidSomething = true;
1472 pState->fCanResume = true;
1473 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1474 return VINF_SUCCESS;
1475#endif
1476}
1477
1478
1479#if 0 /* later */
1480NEM_TMPL_STATIC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu)
1481{
1482#ifdef LOG_ENABLED
1483 if (LogIs3Enabled())
1484 {
1485 Log3(("nemR3NativeRunGC: Entering #%u\n", pVCpu->idCpu));
1486 nemR3WinLogState(pVM, pVCpu);
1487 }
1488#endif
1489
1490 /*
1491 * The run loop.
1492 *
1493 * Current approach to state updating to use the sledgehammer and sync
1494 * everything every time. This will be optimized later.
1495 */
1496 const bool fSingleStepping = false; /** @todo get this from somewhere. */
1497 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1498 for (unsigned iLoop = 0;;iLoop++)
1499 {
1500 /*
1501 * Copy the state.
1502 */
1503 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1504 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx);
1505 AssertRCBreakStmt(rc2, rcStrict = rc2);
1506
1507 /*
1508 * Run a bit.
1509 */
1510 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
1511 RT_ZERO(ExitReason);
1512 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
1513 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
1514 {
1515#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1516 int rc2 = nemR3WinRunVirtualProcessor(pVM, pVCpu, &ExitReason, sizeof(ExitReason));
1517 AssertRCBreakStmt(rc2, rcStrict = rc2);
1518#else
1519 Log8(("Calling WHvRunVirtualProcessor\n"));
1520 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED);
1521 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
1522 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM);
1523 AssertLogRelMsgBreakStmt(SUCCEEDED(hrc),
1524 ("WHvRunVirtualProcessor(%p, %u,,) -> %Rhrc (Last=%#x/%u)\n", pVM->nem.s.hPartition, pVCpu->idCpu,
1525 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()),
1526 rcStrict = VERR_INTERNAL_ERROR);
1527 Log2(("WHvRunVirtualProcessor -> %#x; exit code %#x (%d) (cpu status %u)\n",
1528 hrc, ExitReason.ExitReason, ExitReason.ExitReason, nemR3WinCpuGetRunningStatus(pVCpu) ));
1529#endif
1530 }
1531 else
1532 {
1533 LogFlow(("nemR3NativeRunGC: returning: pending FF (pre exec)\n"));
1534 break;
1535 }
1536
1537 /*
1538 * Copy back the state.
1539 */
1540 rc2 = nemR3WinCopyStateFromHyperV(pVM, pVCpu, pCtx);
1541 AssertRCBreakStmt(rc2, rcStrict = rc2);
1542
1543#ifdef LOG_ENABLED
1544 /*
1545 * Do some logging.
1546 */
1547 if (LogIs2Enabled())
1548 nemR3WinLogExitReason(&ExitReason);
1549 if (LogIs3Enabled())
1550 nemR3WinLogState(pVM, pVCpu);
1551#endif
1552
1553#ifdef VBOX_STRICT
1554 /* Assert that the VpContext field makes sense. */
1555 switch (ExitReason.ExitReason)
1556 {
1557 case WHvRunVpExitReasonMemoryAccess:
1558 case WHvRunVpExitReasonX64IoPortAccess:
1559 case WHvRunVpExitReasonX64MsrAccess:
1560 case WHvRunVpExitReasonX64Cpuid:
1561 case WHvRunVpExitReasonException:
1562 case WHvRunVpExitReasonUnrecoverableException:
1563 Assert( ExitReason.IoPortAccess.VpContext.InstructionLength > 0
1564 || ( ExitReason.ExitReason == WHvRunVpExitReasonMemoryAccess
1565 && ExitReason.MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessExecute));
1566 Assert(ExitReason.IoPortAccess.VpContext.InstructionLength < 16);
1567 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Cpl == CPUMGetGuestCPL(pVCpu));
1568 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Cr0Pe == RT_BOOL(pCtx->cr0 & X86_CR0_PE));
1569 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Cr0Am == RT_BOOL(pCtx->cr0 & X86_CR0_AM));
1570 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.EferLma == RT_BOOL(pCtx->msrEFER & MSR_K6_EFER_LMA));
1571 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.DebugActive == RT_BOOL(pCtx->dr[7] & X86_DR7_ENABLED_MASK));
1572 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Reserved0 == 0);
1573 Assert(ExitReason.IoPortAccess.VpContext.ExecutionState.Reserved1 == 0);
1574 Assert(ExitReason.IoPortAccess.VpContext.Rip == pCtx->rip);
1575 Assert(ExitReason.IoPortAccess.VpContext.Rflags == pCtx->rflags.u);
1576 Assert( ExitReason.IoPortAccess.VpContext.Cs.Base == pCtx->cs.u64Base
1577 && ExitReason.IoPortAccess.VpContext.Cs.Limit == pCtx->cs.u32Limit
1578 && ExitReason.IoPortAccess.VpContext.Cs.Selector == pCtx->cs.Sel);
1579 break;
1580 default: break; /* shut up compiler. */
1581 }
1582#endif
1583
1584 /*
1585 * Deal with the exit.
1586 */
1587 switch (ExitReason.ExitReason)
1588 {
1589 /* Frequent exits: */
1590 case WHvRunVpExitReasonCanceled:
1591 case WHvRunVpExitReasonAlerted:
1592 rcStrict = VINF_SUCCESS;
1593 break;
1594
1595 case WHvRunVpExitReasonX64Halt:
1596 rcStrict = nemR3WinHandleHalt(pVM, pVCpu, pCtx);
1597 break;
1598
1599 case WHvRunVpExitReasonMemoryAccess:
1600 rcStrict = nemR3WinHandleMemoryAccess(pVM, pVCpu, pCtx, &ExitReason.MemoryAccess);
1601 break;
1602
1603 case WHvRunVpExitReasonX64IoPortAccess:
1604 rcStrict = nemR3WinHandleIoPortAccess(pVM, pVCpu, pCtx, &ExitReason.IoPortAccess);
1605 break;
1606
1607 case WHvRunVpExitReasonX64InterruptWindow:
1608 rcStrict = nemR3WinHandleInterruptWindow(pVM, pVCpu, pCtx, &ExitReason);
1609 break;
1610
1611 case WHvRunVpExitReasonX64MsrAccess: /* needs configuring */
1612 rcStrict = nemR3WinHandleMsrAccess(pVM, pVCpu, pCtx, &ExitReason);
1613 break;
1614
1615 case WHvRunVpExitReasonX64Cpuid: /* needs configuring */
1616 rcStrict = nemR3WinHandleCpuId(pVM, pVCpu, pCtx, &ExitReason);
1617 break;
1618
1619 case WHvRunVpExitReasonException: /* needs configuring */
1620 rcStrict = nemR3WinHandleException(pVM, pVCpu, pCtx, &ExitReason);
1621 break;
1622
1623 /* Unlikely exits: */
1624 case WHvRunVpExitReasonUnsupportedFeature:
1625 rcStrict = nemR3WinHandleUD(pVM, pVCpu, pCtx, &ExitReason);
1626 break;
1627
1628 case WHvRunVpExitReasonUnrecoverableException:
1629 rcStrict = nemR3WinHandleTripleFault(pVM, pVCpu, pCtx, &ExitReason);
1630 break;
1631
1632 case WHvRunVpExitReasonInvalidVpRegisterValue:
1633 rcStrict = nemR3WinHandleInvalidState(pVM, pVCpu, pCtx, &ExitReason);
1634 break;
1635
1636 /* Undesired exits: */
1637 case WHvRunVpExitReasonNone:
1638 default:
1639 AssertLogRelMsgFailed(("Unknown ExitReason: %#x\n", ExitReason.ExitReason));
1640 rcStrict = VERR_INTERNAL_ERROR_3;
1641 break;
1642 }
1643 if (rcStrict != VINF_SUCCESS)
1644 {
1645 LogFlow(("nemR3NativeRunGC: returning: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1646 break;
1647 }
1648
1649#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1650 /* Hack alert! */
1651 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
1652 if (cMappedPages < 4000)
1653 { /* likely */ }
1654 else
1655 {
1656 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1657 Log(("nemR3NativeRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
1658 }
1659#endif
1660
1661 /* If any FF is pending, return to the EM loops. That's okay for the
1662 current sledgehammer approach. */
1663 if ( VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
1664 || VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
1665 {
1666 LogFlow(("nemR3NativeRunGC: returning: pending FF (%#x / %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
1667 break;
1668 }
1669 }
1670
1671 return rcStrict;
1672}
1673#endif /* later */
1674
1675
1676#endif /* IN_RING0 */
1677
1678
1679/**
1680 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
1681 */
1682NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
1683 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1684{
1685 /* We'll just unmap the memory. */
1686 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
1687 {
1688#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1689 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1690 AssertRC(rc);
1691 if (RT_SUCCESS(rc))
1692#else
1693 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1694 if (SUCCEEDED(hrc))
1695#endif
1696 {
1697 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1698 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
1699 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1700 }
1701 else
1702 {
1703#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1704 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1705 return rc;
1706#else
1707 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
1708 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1709 return VERR_INTERNAL_ERROR_2;
1710#endif
1711 }
1712 }
1713 RT_NOREF(pVCpu, pvUser);
1714 return VINF_SUCCESS;
1715}
1716
1717
1718/**
1719 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
1720 *
1721 * @returns The PGMPhysNemQueryPageInfo result.
1722 * @param pVM The cross context VM structure.
1723 * @param pVCpu The cross context virtual CPU structure.
1724 * @param GCPhys The page to unmap.
1725 */
1726NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1727{
1728 PGMPHYSNEMPAGEINFO Info;
1729 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
1730 nemHCWinUnsetForA20CheckerCallback, NULL);
1731}
1732
1733
1734void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1735{
1736 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1737 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1738}
1739
1740
1741void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1742 int fRestoreAsRAM, bool fRestoreAsRAM2)
1743{
1744 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
1745 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
1746 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
1747}
1748
1749
1750void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1751 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1752{
1753 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1754 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1755 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1756}
1757
1758
1759/**
1760 * Worker that maps pages into Hyper-V.
1761 *
1762 * This is used by the PGM physical page notifications as well as the memory
1763 * access VMEXIT handlers.
1764 *
1765 * @returns VBox status code.
1766 * @param pVM The cross context VM structure.
1767 * @param pVCpu The cross context virtual CPU structure of the
1768 * calling EMT.
1769 * @param GCPhysSrc The source page address.
1770 * @param GCPhysDst The hyper-V destination page. This may differ from
1771 * GCPhysSrc when A20 is disabled.
1772 * @param fPageProt NEM_PAGE_PROT_XXX.
1773 * @param pu2State Our page state (input/output).
1774 * @param fBackingChanged Set if the page backing is being changed.
1775 * @thread EMT(pVCpu)
1776 */
1777NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
1778 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
1779{
1780#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1781 /*
1782 * When using the hypercalls instead of the ring-3 APIs, we don't need to
1783 * unmap memory before modifying it. We still want to track the state though,
1784 * since unmap will fail when called an unmapped page and we don't want to redo
1785 * upgrades/downgrades.
1786 */
1787 uint8_t const u2OldState = *pu2State;
1788 int rc;
1789 if (fPageProt == NEM_PAGE_PROT_NONE)
1790 {
1791 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
1792 {
1793 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
1794 if (RT_SUCCESS(rc))
1795 {
1796 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
1797 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1798 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
1799 }
1800 else
1801 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
1802 }
1803 else
1804 rc = VINF_SUCCESS;
1805 }
1806 else if (fPageProt & NEM_PAGE_PROT_WRITE)
1807 {
1808 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
1809 {
1810 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
1811 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1812 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1813 if (RT_SUCCESS(rc))
1814 {
1815 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
1816 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
1817 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
1818 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
1819 NOREF(cMappedPages);
1820 }
1821 else
1822 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
1823 }
1824 else
1825 rc = VINF_SUCCESS;
1826 }
1827 else
1828 {
1829 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
1830 {
1831 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
1832 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1833 if (RT_SUCCESS(rc))
1834 {
1835 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
1836 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
1837 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
1838 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
1839 NOREF(cMappedPages);
1840 }
1841 else
1842 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
1843 }
1844 else
1845 rc = VINF_SUCCESS;
1846 }
1847
1848 return VINF_SUCCESS;
1849
1850#else
1851 /*
1852 * Looks like we need to unmap a page before we can change the backing
1853 * or even modify the protection. This is going to be *REALLY* efficient.
1854 * PGM lends us two bits to keep track of the state here.
1855 */
1856 uint8_t const u2OldState = *pu2State;
1857 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
1858 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
1859 if ( fBackingChanged
1860 || u2NewState != u2OldState)
1861 {
1862 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
1863 {
1864# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1865 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
1866 AssertRC(rc);
1867 if (RT_SUCCESS(rc))
1868 {
1869 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
1870 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1871 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
1872 {
1873 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
1874 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
1875 return VINF_SUCCESS;
1876 }
1877 }
1878 else
1879 {
1880 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
1881 return rc;
1882 }
1883# else
1884 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
1885 if (SUCCEEDED(hrc))
1886 {
1887 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
1888 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1889 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
1890 {
1891 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
1892 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
1893 return VINF_SUCCESS;
1894 }
1895 }
1896 else
1897 {
1898 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
1899 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1900 return VERR_NEM_INIT_FAILED;
1901 }
1902# endif
1903 }
1904 }
1905
1906 /*
1907 * Writeable mapping?
1908 */
1909 if (fPageProt & NEM_PAGE_PROT_WRITE)
1910 {
1911# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1912 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
1913 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1914 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1915 AssertRC(rc);
1916 if (RT_SUCCESS(rc))
1917 {
1918 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
1919 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1920 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
1921 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
1922 return VINF_SUCCESS;
1923 }
1924 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
1925 return rc;
1926# else
1927 void *pvPage;
1928 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
1929 if (RT_SUCCESS(rc))
1930 {
1931 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
1932 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
1933 if (SUCCEEDED(hrc))
1934 {
1935 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
1936 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1937 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
1938 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
1939 return VINF_SUCCESS;
1940 }
1941 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
1942 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1943 return VERR_NEM_INIT_FAILED;
1944 }
1945 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
1946 return rc;
1947# endif
1948 }
1949
1950 if (fPageProt & NEM_PAGE_PROT_READ)
1951 {
1952# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1953 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
1954 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1955 AssertRC(rc);
1956 if (RT_SUCCESS(rc))
1957 {
1958 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
1959 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1960 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
1961 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
1962 return VINF_SUCCESS;
1963 }
1964 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
1965 return rc;
1966# else
1967 const void *pvPage;
1968 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
1969 if (RT_SUCCESS(rc))
1970 {
1971 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
1972 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
1973 if (SUCCEEDED(hrc))
1974 {
1975 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
1976 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1977 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
1978 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
1979 return VINF_SUCCESS;
1980 }
1981 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
1982 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
1983 return VERR_NEM_INIT_FAILED;
1984 }
1985 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
1986 return rc;
1987# endif
1988 }
1989
1990 /* We already unmapped it above. */
1991 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
1992 return VINF_SUCCESS;
1993#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
1994}
1995
1996
1997NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
1998{
1999 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
2000 {
2001 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
2002 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2003 return VINF_SUCCESS;
2004 }
2005
2006#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
2007 PVMCPU pVCpu = VMMGetCpu(pVM);
2008 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
2009 AssertRC(rc);
2010 if (RT_SUCCESS(rc))
2011 {
2012 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2013 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
2014 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2015 return VINF_SUCCESS;
2016 }
2017 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
2018 return rc;
2019#else
2020 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
2021 if (SUCCEEDED(hrc))
2022 {
2023 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
2024 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
2025 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
2026 return VINF_SUCCESS;
2027 }
2028 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
2029 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
2030 return VERR_INTERNAL_ERROR_3;
2031#endif
2032}
2033
2034
2035int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2036 PGMPAGETYPE enmType, uint8_t *pu2State)
2037{
2038 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2039 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2040 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
2041
2042 int rc;
2043#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
2044 PVMCPU pVCpu = VMMGetCpu(pVM);
2045 if ( pVM->nem.s.fA20Enabled
2046 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
2047 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
2048 else
2049 {
2050 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
2051 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
2052 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
2053 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
2054
2055 }
2056#else
2057 RT_NOREF_PV(fPageProt);
2058 if ( pVM->nem.s.fA20Enabled
2059 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
2060 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2061 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
2062 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2063 else
2064 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
2065#endif
2066 return rc;
2067}
2068
2069
2070void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2071 PGMPAGETYPE enmType, uint8_t *pu2State)
2072{
2073 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2074 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2075 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
2076
2077#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
2078 PVMCPU pVCpu = VMMGetCpu(pVM);
2079 if ( pVM->nem.s.fA20Enabled
2080 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
2081 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
2082 else
2083 {
2084 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
2085 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
2086 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
2087 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
2088 }
2089#else
2090 RT_NOREF_PV(fPageProt);
2091 if ( pVM->nem.s.fA20Enabled
2092 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
2093 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2094 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
2095 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2096 /* else: ignore since we've got the alias page at this address. */
2097#endif
2098}
2099
2100
2101void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2102 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2103{
2104 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2105 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2106 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
2107
2108#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
2109 PVMCPU pVCpu = VMMGetCpu(pVM);
2110 if ( pVM->nem.s.fA20Enabled
2111 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
2112 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
2113 else
2114 {
2115 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
2116 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
2117 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
2118 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
2119 }
2120#else
2121 RT_NOREF_PV(fPageProt);
2122 if ( pVM->nem.s.fA20Enabled
2123 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
2124 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2125 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
2126 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
2127 /* else: ignore since we've got the alias page at this address. */
2128#endif
2129}
2130
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette