VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 80334

最後變更 在這個檔案從80334是 80334,由 vboxsync 提交於 6 年 前

VMM: Eliminating the VBOX_BUGREF_9217 preprocessor macro. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 113.8 KB
 
1/* $Id: NEMR0Native-win.cpp 80334 2019-08-17 00:43:24Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PVMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @param pVM The cross context VM handle.
159 * @thread EMT(0)
160 */
161VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVMCC pVM)
162{
163 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
164 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
165
166 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
167 AssertRCReturn(rc, rc);
168
169 /*
170 * We want to perform hypercalls here. The NT kernel started to expose a very low
171 * level interface to do this thru somewhere between build 14271 and 16299. Since
172 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
173 *
174 * We also need to deposit memory to the hypervisor for use with partition (page
175 * mapping structures, stuff).
176 */
177 RTDBGKRNLINFO hKrnlInfo;
178 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
179 if (RT_SUCCESS(rc))
180 {
181 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
182 if (RT_SUCCESS(rc))
183 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
184 RTR0DbgKrnlInfoRelease(hKrnlInfo);
185 if (RT_SUCCESS(rc))
186 {
187 /*
188 * Allocate a page for non-EMT threads to use for hypercalls (update
189 * statistics and such) and a critical section protecting it.
190 */
191 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
192 if (RT_SUCCESS(rc))
193 {
194 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
195 if (RT_SUCCESS(rc))
196 {
197 /*
198 * Allocate a page for each VCPU to place hypercall data on.
199 */
200 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
201 {
202 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
203 if (RT_FAILURE(rc))
204 {
205 while (i-- > 0)
206 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
207 break;
208 }
209 }
210 if (RT_SUCCESS(rc))
211 {
212 /*
213 * So far, so good.
214 */
215 return rc;
216 }
217
218 /*
219 * Bail out.
220 */
221 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
222 }
223 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
224 }
225 }
226 else
227 rc = VERR_NEM_MISSING_KERNEL_API;
228 }
229
230 RT_NOREF(pVM);
231 return rc;
232}
233
234
235/**
236 * Perform an I/O control operation on the partition handle (VID.SYS).
237 *
238 * @returns NT status code.
239 * @param pGVM The ring-0 VM structure.
240 * @param pVCpu The cross context CPU structure of the calling EMT.
241 * @param uFunction The function to perform.
242 * @param pvInput The input buffer. This must point within the VM
243 * structure so we can easily convert to a ring-3
244 * pointer if necessary.
245 * @param cbInput The size of the input. @a pvInput must be NULL when
246 * zero.
247 * @param pvOutput The output buffer. This must also point within the
248 * VM structure for ring-3 pointer magic.
249 * @param cbOutput The size of the output. @a pvOutput must be NULL
250 * when zero.
251 * @thread EMT(pVCpu)
252 */
253DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PVMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
254 void *pvOutput, uint32_t cbOutput)
255{
256#ifdef RT_STRICT
257 /*
258 * Input and output parameters are part of the VM CPU structure.
259 */
260 VMCPU_ASSERT_EMT(pVCpu);
261 if (pvInput)
262 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);
263 if (pvOutput)
264 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);
265#endif
266
267 int32_t rcNt = STATUS_UNSUCCESSFUL;
268 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
269 pvInput,
270 pvInput ? (uintptr_t)pvInput + pVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
271 cbInput,
272 pvOutput,
273 pvOutput ? (uintptr_t)pvOutput + pVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
274 cbOutput,
275 &rcNt);
276 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
277 return (NTSTATUS)rcNt;
278 return STATUS_UNSUCCESSFUL;
279}
280
281
282/**
283 * 2nd part of the initialization, after we've got a partition handle.
284 *
285 * @returns VBox status code.
286 * @param pGVM The ring-0 VM handle.
287 * @param pVM The cross context VM handle.
288 * @thread EMT(0)
289 */
290VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVMCC pVM)
291{
292 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
293 AssertRCReturn(rc, rc);
294 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
295 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
296
297 /*
298 * Copy and validate the I/O control information from ring-3.
299 */
300 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
301 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
302 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
303 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
304 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
305
306 pGVM->nemr0.s.fMayUseRing0Runloop = pVM->nem.s.fUseRing0Runloop;
307
308 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
309 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
310 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
311 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
312 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
313 if (RT_SUCCESS(rc))
314 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
315
316 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
317 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
318 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
319 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
320 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
321 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
322 if (RT_SUCCESS(rc))
323 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
324
325 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
326 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
327 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
328 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
329 rc = VERR_NEM_INIT_FAILED);
330 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
331 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
332 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
333 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
334 if (RT_SUCCESS(rc))
335 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
336
337 if ( RT_SUCCESS(rc)
338 || !pVM->nem.s.fUseRing0Runloop)
339 {
340 /*
341 * Setup of an I/O control context for the partition handle for later use.
342 */
343 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
344 AssertLogRelRCReturn(rc, rc);
345 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
346 {
347 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
348 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
349 }
350
351 /*
352 * Get the partition ID.
353 */
354 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
355 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
356 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
357 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
358 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
359 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pVM->nem.s.idHvPartition,
360 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pVM->nem.s.idHvPartition),
361 VERR_NEM_INIT_FAILED);
362 }
363
364 return rc;
365}
366
367
368/**
369 * Cleanup the NEM parts of the VM in ring-0.
370 *
371 * This is always called and must deal the state regardless of whether
372 * NEMR0InitVM() was called or not. So, take care here.
373 *
374 * @param pGVM The ring-0 VM handle.
375 */
376VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
377{
378 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
379
380 /* Clean up I/O control context. */
381 if (pGVM->nemr0.s.pIoCtlCtx)
382 {
383 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
384 AssertRC(rc);
385 pGVM->nemr0.s.pIoCtlCtx = NULL;
386 }
387
388 /* Free the hypercall pages. */
389 VMCPUID i = pGVM->cCpus;
390 while (i-- > 0)
391 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
392
393 /* The non-EMT one too. */
394 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
395 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
396 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
397}
398
399
400#if 0 /* for debugging GPA unmapping. */
401static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
402{
403 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
404 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
405 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
406 pIn->VpIndex = pGVCpu->idCpu;
407 pIn->ByteCount = 0x10;
408 pIn->BaseGpa = GCPhys;
409 pIn->ControlFlags.AsUINT64 = 0;
410 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
411 memset(pOut, 0xfe, sizeof(*pOut));
412 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
413 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
414 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
415 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
416 __debugbreak();
417
418 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
419}
420#endif
421
422
423/**
424 * Worker for NEMR0MapPages and others.
425 */
426NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
427 uint32_t cPages, uint32_t fFlags)
428{
429 /*
430 * Validate.
431 */
432 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
433
434 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
435 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
436 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
437 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
438 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
439 if (GCPhysSrc != GCPhysDst)
440 {
441 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
442 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
443 }
444
445 /*
446 * Compose and make the hypercall.
447 * Ring-3 is not allowed to fill in the host physical addresses of the call.
448 */
449 for (uint32_t iTries = 0;; iTries++)
450 {
451 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
452 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
453 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
454 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
455 pMapPages->MapFlags = fFlags;
456 pMapPages->u32ExplicitPadding = 0;
457 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
458 {
459 RTHCPHYS HCPhys = NIL_RTGCPHYS;
460 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
461 AssertRCReturn(rc, rc);
462 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
463 }
464
465 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
466 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
467 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
468 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
469 if (uResult == ((uint64_t)cPages << 32))
470 return VINF_SUCCESS;
471
472 /*
473 * If the partition is out of memory, try donate another 512 pages to
474 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
475 */
476 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
477 || iTries > 16
478 || g_pfnWinHvDepositMemory == NULL)
479 {
480 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
481 return VERR_NEM_MAP_PAGES_FAILED;
482 }
483
484 size_t cPagesAdded = 0;
485 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
486 if (!cPagesAdded)
487 {
488 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
489 return VERR_NEM_MAP_PAGES_FAILED;
490 }
491 }
492}
493
494
495/**
496 * Maps pages into the guest physical address space.
497 *
498 * Generally the caller will be under the PGM lock already, so no extra effort
499 * is needed to make sure all changes happens under it.
500 *
501 * @returns VBox status code.
502 * @param pGVM The ring-0 VM handle.
503 * @param pVM The cross context VM handle.
504 * @param idCpu The calling EMT. Necessary for getting the
505 * hypercall page and arguments.
506 * @thread EMT(idCpu)
507 */
508VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
509{
510 /*
511 * Unpack the call.
512 */
513 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
514 if (RT_SUCCESS(rc))
515 {
516 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
517 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
518
519 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
520 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
521 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
522 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
523
524 /*
525 * Do the work.
526 */
527 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
528 }
529 return rc;
530}
531
532
533/**
534 * Worker for NEMR0UnmapPages and others.
535 */
536NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
537{
538 /*
539 * Validate input.
540 */
541 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
542
543 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
544 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
545 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
546 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
547
548 /*
549 * Compose and make the hypercall.
550 */
551 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
552 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
553 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
554 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
555 pUnmapPages->fFlags = 0;
556
557 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
558 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
559 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
560 if (uResult == ((uint64_t)cPages << 32))
561 {
562#if 1 /* Do we need to do this? Hopefully not... */
563 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
564 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
565 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
566#endif
567 return VINF_SUCCESS;
568 }
569
570 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
571 return VERR_NEM_UNMAP_PAGES_FAILED;
572}
573
574
575/**
576 * Unmaps pages from the guest physical address space.
577 *
578 * Generally the caller will be under the PGM lock already, so no extra effort
579 * is needed to make sure all changes happens under it.
580 *
581 * @returns VBox status code.
582 * @param pGVM The ring-0 VM handle.
583 * @param pVM The cross context VM handle.
584 * @param idCpu The calling EMT. Necessary for getting the
585 * hypercall page and arguments.
586 * @thread EMT(idCpu)
587 */
588VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
589{
590 /*
591 * Unpack the call.
592 */
593 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
594 if (RT_SUCCESS(rc))
595 {
596 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
597 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
598
599 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
600 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
601
602 /*
603 * Do the work.
604 */
605 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
606 }
607 return rc;
608}
609
610
611#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
612/**
613 * Worker for NEMR0ExportState.
614 *
615 * Intention is to use it internally later.
616 *
617 * @returns VBox status code.
618 * @param pGVM The ring-0 VM handle.
619 * @param pGVCpu The ring-0 VCPU handle.
620 * @param pCtx The CPU context structure to import into.
621 */
622NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
623{
624 PVMCPUCC pVCpu = pGVCpu;
625 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
626 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
627 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
628
629 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
630 pInput->VpIndex = pGVCpu->idCpu;
631 pInput->RsvdZ = 0;
632
633 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
634 if ( !fWhat
635 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
636 return VINF_SUCCESS;
637 uintptr_t iReg = 0;
638
639 /* GPRs */
640 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
641 {
642 if (fWhat & CPUMCTX_EXTRN_RAX)
643 {
644 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
645 pInput->Elements[iReg].Name = HvX64RegisterRax;
646 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
647 iReg++;
648 }
649 if (fWhat & CPUMCTX_EXTRN_RCX)
650 {
651 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
652 pInput->Elements[iReg].Name = HvX64RegisterRcx;
653 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
654 iReg++;
655 }
656 if (fWhat & CPUMCTX_EXTRN_RDX)
657 {
658 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
659 pInput->Elements[iReg].Name = HvX64RegisterRdx;
660 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
661 iReg++;
662 }
663 if (fWhat & CPUMCTX_EXTRN_RBX)
664 {
665 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
666 pInput->Elements[iReg].Name = HvX64RegisterRbx;
667 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
668 iReg++;
669 }
670 if (fWhat & CPUMCTX_EXTRN_RSP)
671 {
672 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
673 pInput->Elements[iReg].Name = HvX64RegisterRsp;
674 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
675 iReg++;
676 }
677 if (fWhat & CPUMCTX_EXTRN_RBP)
678 {
679 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
680 pInput->Elements[iReg].Name = HvX64RegisterRbp;
681 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
682 iReg++;
683 }
684 if (fWhat & CPUMCTX_EXTRN_RSI)
685 {
686 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
687 pInput->Elements[iReg].Name = HvX64RegisterRsi;
688 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
689 iReg++;
690 }
691 if (fWhat & CPUMCTX_EXTRN_RDI)
692 {
693 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
694 pInput->Elements[iReg].Name = HvX64RegisterRdi;
695 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
696 iReg++;
697 }
698 if (fWhat & CPUMCTX_EXTRN_R8_R15)
699 {
700 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
701 pInput->Elements[iReg].Name = HvX64RegisterR8;
702 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
703 iReg++;
704 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
705 pInput->Elements[iReg].Name = HvX64RegisterR9;
706 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
707 iReg++;
708 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
709 pInput->Elements[iReg].Name = HvX64RegisterR10;
710 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
711 iReg++;
712 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
713 pInput->Elements[iReg].Name = HvX64RegisterR11;
714 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
715 iReg++;
716 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
717 pInput->Elements[iReg].Name = HvX64RegisterR12;
718 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
719 iReg++;
720 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
721 pInput->Elements[iReg].Name = HvX64RegisterR13;
722 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
723 iReg++;
724 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
725 pInput->Elements[iReg].Name = HvX64RegisterR14;
726 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
727 iReg++;
728 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
729 pInput->Elements[iReg].Name = HvX64RegisterR15;
730 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
731 iReg++;
732 }
733 }
734
735 /* RIP & Flags */
736 if (fWhat & CPUMCTX_EXTRN_RIP)
737 {
738 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
739 pInput->Elements[iReg].Name = HvX64RegisterRip;
740 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
741 iReg++;
742 }
743 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
744 {
745 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
746 pInput->Elements[iReg].Name = HvX64RegisterRflags;
747 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
748 iReg++;
749 }
750
751 /* Segments */
752# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
753 do { \
754 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
755 pInput->Elements[a_idx].Name = a_enmName; \
756 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
757 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
758 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
759 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
760 } while (0)
761 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
762 {
763 if (fWhat & CPUMCTX_EXTRN_CS)
764 {
765 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
766 iReg++;
767 }
768 if (fWhat & CPUMCTX_EXTRN_ES)
769 {
770 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
771 iReg++;
772 }
773 if (fWhat & CPUMCTX_EXTRN_SS)
774 {
775 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
776 iReg++;
777 }
778 if (fWhat & CPUMCTX_EXTRN_DS)
779 {
780 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
781 iReg++;
782 }
783 if (fWhat & CPUMCTX_EXTRN_FS)
784 {
785 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
786 iReg++;
787 }
788 if (fWhat & CPUMCTX_EXTRN_GS)
789 {
790 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
791 iReg++;
792 }
793 }
794
795 /* Descriptor tables & task segment. */
796 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
797 {
798 if (fWhat & CPUMCTX_EXTRN_LDTR)
799 {
800 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
801 iReg++;
802 }
803 if (fWhat & CPUMCTX_EXTRN_TR)
804 {
805 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
806 iReg++;
807 }
808
809 if (fWhat & CPUMCTX_EXTRN_IDTR)
810 {
811 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
812 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
813 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
814 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
815 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
816 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
817 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
818 iReg++;
819 }
820 if (fWhat & CPUMCTX_EXTRN_GDTR)
821 {
822 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
823 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
824 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
825 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
826 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
827 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
828 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
829 iReg++;
830 }
831 }
832
833 /* Control registers. */
834 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
835 {
836 if (fWhat & CPUMCTX_EXTRN_CR0)
837 {
838 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
839 pInput->Elements[iReg].Name = HvX64RegisterCr0;
840 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
841 iReg++;
842 }
843 if (fWhat & CPUMCTX_EXTRN_CR2)
844 {
845 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
846 pInput->Elements[iReg].Name = HvX64RegisterCr2;
847 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
848 iReg++;
849 }
850 if (fWhat & CPUMCTX_EXTRN_CR3)
851 {
852 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
853 pInput->Elements[iReg].Name = HvX64RegisterCr3;
854 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
855 iReg++;
856 }
857 if (fWhat & CPUMCTX_EXTRN_CR4)
858 {
859 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
860 pInput->Elements[iReg].Name = HvX64RegisterCr4;
861 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
862 iReg++;
863 }
864 }
865 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
866 {
867 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
868 pInput->Elements[iReg].Name = HvX64RegisterCr8;
869 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
870 iReg++;
871 }
872
873 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
874
875 /* Debug registers. */
876/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
877 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
878 {
879 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
880 pInput->Elements[iReg].Name = HvX64RegisterDr0;
881 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
882 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
883 iReg++;
884 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
885 pInput->Elements[iReg].Name = HvX64RegisterDr1;
886 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
887 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
888 iReg++;
889 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
890 pInput->Elements[iReg].Name = HvX64RegisterDr2;
891 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
892 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
893 iReg++;
894 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
895 pInput->Elements[iReg].Name = HvX64RegisterDr3;
896 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
897 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
898 iReg++;
899 }
900 if (fWhat & CPUMCTX_EXTRN_DR6)
901 {
902 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
903 pInput->Elements[iReg].Name = HvX64RegisterDr6;
904 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
905 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
906 iReg++;
907 }
908 if (fWhat & CPUMCTX_EXTRN_DR7)
909 {
910 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
911 pInput->Elements[iReg].Name = HvX64RegisterDr7;
912 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
913 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
914 iReg++;
915 }
916
917 /* Floating point state. */
918 if (fWhat & CPUMCTX_EXTRN_X87)
919 {
920 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
921 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
922 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
923 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
924 iReg++;
925 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
926 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
927 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
928 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
929 iReg++;
930 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
931 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
932 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
933 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
934 iReg++;
935 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
936 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
937 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
938 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
939 iReg++;
940 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
941 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
942 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
943 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
944 iReg++;
945 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
946 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
947 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
948 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
949 iReg++;
950 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
951 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
952 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
953 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
954 iReg++;
955 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
956 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
957 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
958 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
959 iReg++;
960
961 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
962 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
963 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
964 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
965 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
966 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
967 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
968 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
969 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
970 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
971 iReg++;
972/** @todo we've got trouble if if we try write just SSE w/o X87. */
973 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
974 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
975 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
976 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
977 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
978 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
979 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
980 iReg++;
981 }
982
983 /* Vector state. */
984 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
985 {
986 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
987 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
988 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
989 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
990 iReg++;
991 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
992 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
993 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
994 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
995 iReg++;
996 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
997 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
998 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
999 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
1000 iReg++;
1001 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1002 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1003 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
1004 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
1005 iReg++;
1006 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1007 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1008 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
1009 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
1010 iReg++;
1011 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1012 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1013 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
1014 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
1015 iReg++;
1016 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1017 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1018 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1019 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1020 iReg++;
1021 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1022 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1023 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1024 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1025 iReg++;
1026 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1027 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1028 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1029 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1030 iReg++;
1031 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1032 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1033 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1034 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1035 iReg++;
1036 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1037 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1038 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1039 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1040 iReg++;
1041 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1042 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1043 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1044 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1045 iReg++;
1046 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1047 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1048 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1049 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1050 iReg++;
1051 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1052 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1053 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1054 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1055 iReg++;
1056 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1057 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1058 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1059 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1060 iReg++;
1061 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1062 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1063 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1064 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1065 iReg++;
1066 }
1067
1068 /* MSRs */
1069 // HvX64RegisterTsc - don't touch
1070 if (fWhat & CPUMCTX_EXTRN_EFER)
1071 {
1072 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1073 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1074 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1075 iReg++;
1076 }
1077 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1078 {
1079 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1080 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1081 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1082 iReg++;
1083 }
1084 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1085 {
1086 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1087 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1088 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1089 iReg++;
1090 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1091 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1092 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1093 iReg++;
1094 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1095 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1096 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1097 iReg++;
1098 }
1099 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1100 {
1101 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1102 pInput->Elements[iReg].Name = HvX64RegisterStar;
1103 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1104 iReg++;
1105 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1106 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1107 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1108 iReg++;
1109 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1110 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1111 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1112 iReg++;
1113 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1114 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1115 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1116 iReg++;
1117 }
1118 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1119 {
1120 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1121 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1122 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1123 iReg++;
1124 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1125 pInput->Elements[iReg].Name = HvX64RegisterPat;
1126 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1127 iReg++;
1128# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1129 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1130 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1131 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1132 iReg++;
1133# endif
1134
1135 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1136
1137 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1138 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1139 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1140 iReg++;
1141
1142 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1143
1144 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1145 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1146 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1147 iReg++;
1148 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1149 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1150 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1151 iReg++;
1152 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1153 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1154 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1155 iReg++;
1156 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1157 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1158 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1159 iReg++;
1160 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1161 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1162 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1163 iReg++;
1164 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1165 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1166 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1167 iReg++;
1168 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1169 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1170 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1171 iReg++;
1172 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1173 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1174 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1175 iReg++;
1176 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1177 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1178 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1179 iReg++;
1180 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1181 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1182 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1183 iReg++;
1184 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1185 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1186 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1187 iReg++;
1188 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1189 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1190 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1191 iReg++;
1192
1193# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1194 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1195 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1196 {
1197 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1198 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1199 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1200 iReg++;
1201 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1202 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1203 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1204 iReg++;
1205 }
1206# endif
1207 }
1208
1209 /* event injection (clear it). */
1210 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1211 {
1212 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1213 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1214 pInput->Elements[iReg].Value.Reg64 = 0;
1215 iReg++;
1216 }
1217
1218 /* Interruptibility state. This can get a little complicated since we get
1219 half of the state via HV_X64_VP_EXECUTION_STATE. */
1220 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1221 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1222 {
1223 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1224 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1225 pInput->Elements[iReg].Value.Reg64 = 0;
1226 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1227 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1228 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1229 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1230 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1231 iReg++;
1232 }
1233 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1234 {
1235 if ( pVCpu->nem.s.fLastInterruptShadow
1236 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1237 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1238 {
1239 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1240 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1241 pInput->Elements[iReg].Value.Reg64 = 0;
1242 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1243 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1244 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1245 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1246 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1247 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1248 iReg++;
1249 }
1250 }
1251 else
1252 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1253
1254 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1255 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1256 if ( fDesiredIntWin
1257 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1258 {
1259 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1260 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1261 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1262 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1263 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1264 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1265 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1266 iReg++;
1267 }
1268
1269 /// @todo HvRegisterPendingEvent0
1270 /// @todo HvRegisterPendingEvent1
1271
1272 /*
1273 * Set the registers.
1274 */
1275 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1276
1277 /*
1278 * Make the hypercall.
1279 */
1280 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1281 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1282 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1283 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1284 VERR_NEM_SET_REGISTERS_FAILED);
1285 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1286 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1287 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1288 return VINF_SUCCESS;
1289}
1290#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1291
1292
1293/**
1294 * Export the state to the native API (out of CPUMCTX).
1295 *
1296 * @returns VBox status code
1297 * @param pGVM The ring-0 VM handle.
1298 * @param pVM The cross context VM handle.
1299 * @param idCpu The calling EMT. Necessary for getting the
1300 * hypercall page and arguments.
1301 */
1302VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
1303{
1304#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1305 /*
1306 * Validate the call.
1307 */
1308 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1309 if (RT_SUCCESS(rc))
1310 {
1311 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
1312 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1313 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1314
1315 /*
1316 * Call worker.
1317 */
1318 rc = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
1319 }
1320 return rc;
1321#else
1322 RT_NOREF(pGVM, pVM, idCpu);
1323 return VERR_NOT_IMPLEMENTED;
1324#endif
1325}
1326
1327
1328#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1329/**
1330 * Worker for NEMR0ImportState.
1331 *
1332 * Intention is to use it internally later.
1333 *
1334 * @returns VBox status code.
1335 * @param pGVM The ring-0 VM handle.
1336 * @param pGVCpu The ring-0 VCPU handle.
1337 * @param pCtx The CPU context structure to import into.
1338 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1339 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1340 */
1341NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1342{
1343 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1344 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1345 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1346 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1347
1348 fWhat &= pCtx->fExtrn;
1349
1350 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1351 pInput->VpIndex = pGVCpu->idCpu;
1352 pInput->fFlags = 0;
1353
1354 /* GPRs */
1355 uintptr_t iReg = 0;
1356 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1357 {
1358 if (fWhat & CPUMCTX_EXTRN_RAX)
1359 pInput->Names[iReg++] = HvX64RegisterRax;
1360 if (fWhat & CPUMCTX_EXTRN_RCX)
1361 pInput->Names[iReg++] = HvX64RegisterRcx;
1362 if (fWhat & CPUMCTX_EXTRN_RDX)
1363 pInput->Names[iReg++] = HvX64RegisterRdx;
1364 if (fWhat & CPUMCTX_EXTRN_RBX)
1365 pInput->Names[iReg++] = HvX64RegisterRbx;
1366 if (fWhat & CPUMCTX_EXTRN_RSP)
1367 pInput->Names[iReg++] = HvX64RegisterRsp;
1368 if (fWhat & CPUMCTX_EXTRN_RBP)
1369 pInput->Names[iReg++] = HvX64RegisterRbp;
1370 if (fWhat & CPUMCTX_EXTRN_RSI)
1371 pInput->Names[iReg++] = HvX64RegisterRsi;
1372 if (fWhat & CPUMCTX_EXTRN_RDI)
1373 pInput->Names[iReg++] = HvX64RegisterRdi;
1374 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1375 {
1376 pInput->Names[iReg++] = HvX64RegisterR8;
1377 pInput->Names[iReg++] = HvX64RegisterR9;
1378 pInput->Names[iReg++] = HvX64RegisterR10;
1379 pInput->Names[iReg++] = HvX64RegisterR11;
1380 pInput->Names[iReg++] = HvX64RegisterR12;
1381 pInput->Names[iReg++] = HvX64RegisterR13;
1382 pInput->Names[iReg++] = HvX64RegisterR14;
1383 pInput->Names[iReg++] = HvX64RegisterR15;
1384 }
1385 }
1386
1387 /* RIP & Flags */
1388 if (fWhat & CPUMCTX_EXTRN_RIP)
1389 pInput->Names[iReg++] = HvX64RegisterRip;
1390 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1391 pInput->Names[iReg++] = HvX64RegisterRflags;
1392
1393 /* Segments */
1394 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1395 {
1396 if (fWhat & CPUMCTX_EXTRN_CS)
1397 pInput->Names[iReg++] = HvX64RegisterCs;
1398 if (fWhat & CPUMCTX_EXTRN_ES)
1399 pInput->Names[iReg++] = HvX64RegisterEs;
1400 if (fWhat & CPUMCTX_EXTRN_SS)
1401 pInput->Names[iReg++] = HvX64RegisterSs;
1402 if (fWhat & CPUMCTX_EXTRN_DS)
1403 pInput->Names[iReg++] = HvX64RegisterDs;
1404 if (fWhat & CPUMCTX_EXTRN_FS)
1405 pInput->Names[iReg++] = HvX64RegisterFs;
1406 if (fWhat & CPUMCTX_EXTRN_GS)
1407 pInput->Names[iReg++] = HvX64RegisterGs;
1408 }
1409
1410 /* Descriptor tables and the task segment. */
1411 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1412 {
1413 if (fWhat & CPUMCTX_EXTRN_LDTR)
1414 pInput->Names[iReg++] = HvX64RegisterLdtr;
1415 if (fWhat & CPUMCTX_EXTRN_TR)
1416 pInput->Names[iReg++] = HvX64RegisterTr;
1417 if (fWhat & CPUMCTX_EXTRN_IDTR)
1418 pInput->Names[iReg++] = HvX64RegisterIdtr;
1419 if (fWhat & CPUMCTX_EXTRN_GDTR)
1420 pInput->Names[iReg++] = HvX64RegisterGdtr;
1421 }
1422
1423 /* Control registers. */
1424 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1425 {
1426 if (fWhat & CPUMCTX_EXTRN_CR0)
1427 pInput->Names[iReg++] = HvX64RegisterCr0;
1428 if (fWhat & CPUMCTX_EXTRN_CR2)
1429 pInput->Names[iReg++] = HvX64RegisterCr2;
1430 if (fWhat & CPUMCTX_EXTRN_CR3)
1431 pInput->Names[iReg++] = HvX64RegisterCr3;
1432 if (fWhat & CPUMCTX_EXTRN_CR4)
1433 pInput->Names[iReg++] = HvX64RegisterCr4;
1434 }
1435 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1436 pInput->Names[iReg++] = HvX64RegisterCr8;
1437
1438 /* Debug registers. */
1439 if (fWhat & CPUMCTX_EXTRN_DR7)
1440 pInput->Names[iReg++] = HvX64RegisterDr7;
1441 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1442 {
1443 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1444 {
1445 fWhat |= CPUMCTX_EXTRN_DR7;
1446 pInput->Names[iReg++] = HvX64RegisterDr7;
1447 }
1448 pInput->Names[iReg++] = HvX64RegisterDr0;
1449 pInput->Names[iReg++] = HvX64RegisterDr1;
1450 pInput->Names[iReg++] = HvX64RegisterDr2;
1451 pInput->Names[iReg++] = HvX64RegisterDr3;
1452 }
1453 if (fWhat & CPUMCTX_EXTRN_DR6)
1454 pInput->Names[iReg++] = HvX64RegisterDr6;
1455
1456 /* Floating point state. */
1457 if (fWhat & CPUMCTX_EXTRN_X87)
1458 {
1459 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1460 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1461 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1462 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1463 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1464 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1465 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1466 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1467 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1468 }
1469 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1470 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1471
1472 /* Vector state. */
1473 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1474 {
1475 pInput->Names[iReg++] = HvX64RegisterXmm0;
1476 pInput->Names[iReg++] = HvX64RegisterXmm1;
1477 pInput->Names[iReg++] = HvX64RegisterXmm2;
1478 pInput->Names[iReg++] = HvX64RegisterXmm3;
1479 pInput->Names[iReg++] = HvX64RegisterXmm4;
1480 pInput->Names[iReg++] = HvX64RegisterXmm5;
1481 pInput->Names[iReg++] = HvX64RegisterXmm6;
1482 pInput->Names[iReg++] = HvX64RegisterXmm7;
1483 pInput->Names[iReg++] = HvX64RegisterXmm8;
1484 pInput->Names[iReg++] = HvX64RegisterXmm9;
1485 pInput->Names[iReg++] = HvX64RegisterXmm10;
1486 pInput->Names[iReg++] = HvX64RegisterXmm11;
1487 pInput->Names[iReg++] = HvX64RegisterXmm12;
1488 pInput->Names[iReg++] = HvX64RegisterXmm13;
1489 pInput->Names[iReg++] = HvX64RegisterXmm14;
1490 pInput->Names[iReg++] = HvX64RegisterXmm15;
1491 }
1492
1493 /* MSRs */
1494 // HvX64RegisterTsc - don't touch
1495 if (fWhat & CPUMCTX_EXTRN_EFER)
1496 pInput->Names[iReg++] = HvX64RegisterEfer;
1497 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1498 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1499 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1500 {
1501 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1502 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1503 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1504 }
1505 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1506 {
1507 pInput->Names[iReg++] = HvX64RegisterStar;
1508 pInput->Names[iReg++] = HvX64RegisterLstar;
1509 pInput->Names[iReg++] = HvX64RegisterCstar;
1510 pInput->Names[iReg++] = HvX64RegisterSfmask;
1511 }
1512
1513# ifdef LOG_ENABLED
1514 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1515# endif
1516 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1517 {
1518 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1519 pInput->Names[iReg++] = HvX64RegisterPat;
1520# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1521 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1522# endif
1523 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1524 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1525 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1526 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1527 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1528 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1529 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1530 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1531 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1532 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1533 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1534 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1535 pInput->Names[iReg++] = HvX64RegisterTscAux;
1536# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1537 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1538 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1539# endif
1540# ifdef LOG_ENABLED
1541 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1542 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1543# endif
1544 }
1545
1546 /* Interruptibility. */
1547 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1548 {
1549 pInput->Names[iReg++] = HvRegisterInterruptState;
1550 pInput->Names[iReg++] = HvX64RegisterRip;
1551 }
1552
1553 /* event injection */
1554 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1555 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1556 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1557 size_t const cRegs = iReg;
1558 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1559
1560 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1561 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1562 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1563
1564 /*
1565 * Make the hypercall.
1566 */
1567 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1568 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
1569 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
1570 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1571 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1572 VERR_NEM_GET_REGISTERS_FAILED);
1573 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1574
1575 /*
1576 * Copy information to the CPUM context.
1577 */
1578 PVMCPUCC pVCpu = pGVCpu;
1579 iReg = 0;
1580
1581 /* GPRs */
1582 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1583 {
1584 if (fWhat & CPUMCTX_EXTRN_RAX)
1585 {
1586 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1587 pCtx->rax = paValues[iReg++].Reg64;
1588 }
1589 if (fWhat & CPUMCTX_EXTRN_RCX)
1590 {
1591 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1592 pCtx->rcx = paValues[iReg++].Reg64;
1593 }
1594 if (fWhat & CPUMCTX_EXTRN_RDX)
1595 {
1596 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1597 pCtx->rdx = paValues[iReg++].Reg64;
1598 }
1599 if (fWhat & CPUMCTX_EXTRN_RBX)
1600 {
1601 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1602 pCtx->rbx = paValues[iReg++].Reg64;
1603 }
1604 if (fWhat & CPUMCTX_EXTRN_RSP)
1605 {
1606 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1607 pCtx->rsp = paValues[iReg++].Reg64;
1608 }
1609 if (fWhat & CPUMCTX_EXTRN_RBP)
1610 {
1611 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1612 pCtx->rbp = paValues[iReg++].Reg64;
1613 }
1614 if (fWhat & CPUMCTX_EXTRN_RSI)
1615 {
1616 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1617 pCtx->rsi = paValues[iReg++].Reg64;
1618 }
1619 if (fWhat & CPUMCTX_EXTRN_RDI)
1620 {
1621 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1622 pCtx->rdi = paValues[iReg++].Reg64;
1623 }
1624 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1625 {
1626 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1627 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1628 pCtx->r8 = paValues[iReg++].Reg64;
1629 pCtx->r9 = paValues[iReg++].Reg64;
1630 pCtx->r10 = paValues[iReg++].Reg64;
1631 pCtx->r11 = paValues[iReg++].Reg64;
1632 pCtx->r12 = paValues[iReg++].Reg64;
1633 pCtx->r13 = paValues[iReg++].Reg64;
1634 pCtx->r14 = paValues[iReg++].Reg64;
1635 pCtx->r15 = paValues[iReg++].Reg64;
1636 }
1637 }
1638
1639 /* RIP & Flags */
1640 if (fWhat & CPUMCTX_EXTRN_RIP)
1641 {
1642 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1643 pCtx->rip = paValues[iReg++].Reg64;
1644 }
1645 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1646 {
1647 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1648 pCtx->rflags.u = paValues[iReg++].Reg64;
1649 }
1650
1651 /* Segments */
1652# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1653 do { \
1654 Assert(pInput->Names[a_idx] == a_enmName); \
1655 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1656 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1657 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1658 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1659 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1660 } while (0)
1661 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1662 {
1663 if (fWhat & CPUMCTX_EXTRN_CS)
1664 {
1665 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1666 iReg++;
1667 }
1668 if (fWhat & CPUMCTX_EXTRN_ES)
1669 {
1670 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1671 iReg++;
1672 }
1673 if (fWhat & CPUMCTX_EXTRN_SS)
1674 {
1675 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1676 iReg++;
1677 }
1678 if (fWhat & CPUMCTX_EXTRN_DS)
1679 {
1680 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1681 iReg++;
1682 }
1683 if (fWhat & CPUMCTX_EXTRN_FS)
1684 {
1685 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1686 iReg++;
1687 }
1688 if (fWhat & CPUMCTX_EXTRN_GS)
1689 {
1690 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1691 iReg++;
1692 }
1693 }
1694 /* Descriptor tables and the task segment. */
1695 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1696 {
1697 if (fWhat & CPUMCTX_EXTRN_LDTR)
1698 {
1699 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1700 iReg++;
1701 }
1702 if (fWhat & CPUMCTX_EXTRN_TR)
1703 {
1704 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1705 avoid to trigger sanity assertions around the code, always fix this. */
1706 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1707 switch (pCtx->tr.Attr.n.u4Type)
1708 {
1709 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1710 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1711 break;
1712 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1713 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1714 break;
1715 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1716 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1717 break;
1718 }
1719 iReg++;
1720 }
1721 if (fWhat & CPUMCTX_EXTRN_IDTR)
1722 {
1723 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1724 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1725 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1726 iReg++;
1727 }
1728 if (fWhat & CPUMCTX_EXTRN_GDTR)
1729 {
1730 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1731 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1732 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1733 iReg++;
1734 }
1735 }
1736
1737 /* Control registers. */
1738 bool fMaybeChangedMode = false;
1739 bool fUpdateCr3 = false;
1740 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1741 {
1742 if (fWhat & CPUMCTX_EXTRN_CR0)
1743 {
1744 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1745 if (pCtx->cr0 != paValues[iReg].Reg64)
1746 {
1747 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1748 fMaybeChangedMode = true;
1749 }
1750 iReg++;
1751 }
1752 if (fWhat & CPUMCTX_EXTRN_CR2)
1753 {
1754 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1755 pCtx->cr2 = paValues[iReg].Reg64;
1756 iReg++;
1757 }
1758 if (fWhat & CPUMCTX_EXTRN_CR3)
1759 {
1760 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1761 if (pCtx->cr3 != paValues[iReg].Reg64)
1762 {
1763 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1764 fUpdateCr3 = true;
1765 }
1766 iReg++;
1767 }
1768 if (fWhat & CPUMCTX_EXTRN_CR4)
1769 {
1770 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1771 if (pCtx->cr4 != paValues[iReg].Reg64)
1772 {
1773 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1774 fMaybeChangedMode = true;
1775 }
1776 iReg++;
1777 }
1778 }
1779 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1780 {
1781 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1782 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1783 iReg++;
1784 }
1785
1786 /* Debug registers. */
1787 if (fWhat & CPUMCTX_EXTRN_DR7)
1788 {
1789 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1790 if (pCtx->dr[7] != paValues[iReg].Reg64)
1791 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1792 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1793 iReg++;
1794 }
1795 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1796 {
1797 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1798 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1799 if (pCtx->dr[0] != paValues[iReg].Reg64)
1800 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1801 iReg++;
1802 if (pCtx->dr[1] != paValues[iReg].Reg64)
1803 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1804 iReg++;
1805 if (pCtx->dr[2] != paValues[iReg].Reg64)
1806 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1807 iReg++;
1808 if (pCtx->dr[3] != paValues[iReg].Reg64)
1809 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1810 iReg++;
1811 }
1812 if (fWhat & CPUMCTX_EXTRN_DR6)
1813 {
1814 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1815 if (pCtx->dr[6] != paValues[iReg].Reg64)
1816 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1817 iReg++;
1818 }
1819
1820 /* Floating point state. */
1821 if (fWhat & CPUMCTX_EXTRN_X87)
1822 {
1823 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1824 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1825 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1826 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1827 iReg++;
1828 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1829 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1830 iReg++;
1831 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1832 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1833 iReg++;
1834 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1835 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1836 iReg++;
1837 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1838 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1839 iReg++;
1840 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1841 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1842 iReg++;
1843 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1844 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1845 iReg++;
1846 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1847 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1848 iReg++;
1849
1850 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1851 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1852 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1853 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1854 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1855 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1856 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1857 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1858 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1859 iReg++;
1860 }
1861
1862 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1863 {
1864 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1865 if (fWhat & CPUMCTX_EXTRN_X87)
1866 {
1867 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1868 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1869 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1870 }
1871 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1872 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1873 iReg++;
1874 }
1875
1876 /* Vector state. */
1877 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1878 {
1879 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1880 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1881 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1882 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1883 iReg++;
1884 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1885 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1886 iReg++;
1887 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1888 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1889 iReg++;
1890 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1891 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1892 iReg++;
1893 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1894 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1895 iReg++;
1896 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1897 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1898 iReg++;
1899 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1900 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1901 iReg++;
1902 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1903 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1904 iReg++;
1905 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1906 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1907 iReg++;
1908 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1909 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1910 iReg++;
1911 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1912 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1913 iReg++;
1914 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1915 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1916 iReg++;
1917 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1918 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1919 iReg++;
1920 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1921 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1922 iReg++;
1923 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1924 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1925 iReg++;
1926 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1927 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1928 iReg++;
1929 }
1930
1931
1932 /* MSRs */
1933 // HvX64RegisterTsc - don't touch
1934 if (fWhat & CPUMCTX_EXTRN_EFER)
1935 {
1936 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1937 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1938 {
1939 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1940 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1941 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1942 pCtx->msrEFER = paValues[iReg].Reg64;
1943 fMaybeChangedMode = true;
1944 }
1945 iReg++;
1946 }
1947 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1948 {
1949 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1950 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1951 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1952 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1953 iReg++;
1954 }
1955 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1956 {
1957 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
1958 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
1959 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
1960 pCtx->SysEnter.cs = paValues[iReg].Reg64;
1961 iReg++;
1962
1963 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
1964 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
1965 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
1966 pCtx->SysEnter.eip = paValues[iReg].Reg64;
1967 iReg++;
1968
1969 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
1970 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
1971 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
1972 pCtx->SysEnter.esp = paValues[iReg].Reg64;
1973 iReg++;
1974 }
1975 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1976 {
1977 Assert(pInput->Names[iReg] == HvX64RegisterStar);
1978 if (pCtx->msrSTAR != paValues[iReg].Reg64)
1979 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
1980 pCtx->msrSTAR = paValues[iReg].Reg64;
1981 iReg++;
1982
1983 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
1984 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
1985 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
1986 pCtx->msrLSTAR = paValues[iReg].Reg64;
1987 iReg++;
1988
1989 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
1990 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
1991 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
1992 pCtx->msrCSTAR = paValues[iReg].Reg64;
1993 iReg++;
1994
1995 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
1996 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
1997 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
1998 pCtx->msrSFMASK = paValues[iReg].Reg64;
1999 iReg++;
2000 }
2001 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2002 {
2003 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
2004 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
2005 if (paValues[iReg].Reg64 != uOldBase)
2006 {
2007 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2008 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2009 int rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
2010 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2011 }
2012 iReg++;
2013
2014 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2015 if (pCtx->msrPAT != paValues[iReg].Reg64)
2016 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2017 pCtx->msrPAT = paValues[iReg].Reg64;
2018 iReg++;
2019
2020# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2021 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2022 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
2023 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
2024 iReg++;
2025# endif
2026
2027 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
2028 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2029 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2030 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2031 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2032 iReg++;
2033
2034 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2035
2036 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2037 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2038 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2039 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2040 iReg++;
2041
2042 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2043 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2044 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2045 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2046 iReg++;
2047
2048 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2049 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2050 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2051 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2052 iReg++;
2053
2054 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2055 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2056 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2057 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2058 iReg++;
2059
2060 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2061 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2062 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2063 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2064 iReg++;
2065
2066 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2067 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2068 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2069 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2070 iReg++;
2071
2072 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2073 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2074 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2075 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2076 iReg++;
2077
2078 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2079 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2080 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2081 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2082 iReg++;
2083
2084 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2085 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2086 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2087 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2088 iReg++;
2089
2090 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2091 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2092 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2093 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2094 iReg++;
2095
2096 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2097 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2098 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2099 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2100 iReg++;
2101
2102 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2103 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2104 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2105 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2106 iReg++;
2107
2108# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2109 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2110 {
2111 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2112 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2113 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2114 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2115 iReg++;
2116 }
2117# endif
2118# ifdef LOG_ENABLED
2119 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2120 {
2121 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2122 if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl)
2123 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
2124 iReg++;
2125 }
2126# endif
2127 }
2128
2129 /* Interruptibility. */
2130 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2131 {
2132 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2133 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2134
2135 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2136 {
2137 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2138 if (paValues[iReg].InterruptState.InterruptShadow)
2139 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2140 else
2141 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2142 }
2143
2144 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2145 {
2146 if (paValues[iReg].InterruptState.NmiMasked)
2147 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2148 else
2149 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2150 }
2151
2152 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2153 iReg += 2;
2154 }
2155
2156 /* Event injection. */
2157 /// @todo HvRegisterPendingInterruption
2158 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2159 if (paValues[iReg].PendingInterruption.InterruptionPending)
2160 {
2161 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2162 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2163 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2164 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2165 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2166 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2167 }
2168
2169 /// @todo HvRegisterPendingEvent0
2170 /// @todo HvRegisterPendingEvent1
2171
2172 /* Almost done, just update extrn flags and maybe change PGM mode. */
2173 pCtx->fExtrn &= ~fWhat;
2174 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2175 pCtx->fExtrn = 0;
2176
2177 /* Typical. */
2178 if (!fMaybeChangedMode && !fUpdateCr3)
2179 return VINF_SUCCESS;
2180
2181 /*
2182 * Slow.
2183 */
2184 int rc = VINF_SUCCESS;
2185 if (fMaybeChangedMode)
2186 {
2187 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2188 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2189 }
2190
2191 if (fUpdateCr3)
2192 {
2193 if (fCanUpdateCr3)
2194 {
2195 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2196 rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
2197 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2198 }
2199 else
2200 {
2201 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2202 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2203 }
2204 }
2205
2206 return rc;
2207}
2208#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2209
2210
2211/**
2212 * Import the state from the native API (back to CPUMCTX).
2213 *
2214 * @returns VBox status code
2215 * @param pGVM The ring-0 VM handle.
2216 * @param pVM The cross context VM handle.
2217 * @param idCpu The calling EMT. Necessary for getting the
2218 * hypercall page and arguments.
2219 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2220 * CPUMCTX_EXTERN_ALL for everything.
2221 */
2222VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t fWhat)
2223{
2224#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2225 /*
2226 * Validate the call.
2227 */
2228 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2229 if (RT_SUCCESS(rc))
2230 {
2231 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2232 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2233 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2234
2235 /*
2236 * Call worker.
2237 */
2238 rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2239 }
2240 return rc;
2241#else
2242 RT_NOREF(pGVM, pVM, idCpu, fWhat);
2243 return VERR_NOT_IMPLEMENTED;
2244#endif
2245}
2246
2247
2248#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2249/**
2250 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2251 *
2252 * @returns VBox status code.
2253 * @param pGVM The ring-0 VM handle.
2254 * @param pGVCpu The ring-0 VCPU handle.
2255 * @param pcTicks Where to return the current CPU tick count.
2256 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2257 */
2258NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2259{
2260 /*
2261 * Hypercall parameters.
2262 */
2263 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2264 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2265 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2266
2267 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2268 pInput->VpIndex = pGVCpu->idCpu;
2269 pInput->fFlags = 0;
2270 pInput->Names[0] = HvX64RegisterTsc;
2271 pInput->Names[1] = HvX64RegisterTscAux;
2272
2273 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2274 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2275 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2276
2277 /*
2278 * Make the hypercall.
2279 */
2280 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2281 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2282 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2283 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2284 VERR_NEM_GET_REGISTERS_FAILED);
2285
2286 /*
2287 * Get results.
2288 */
2289 *pcTicks = paValues[0].Reg64;
2290 if (pcAux)
2291 *pcAux = paValues[0].Reg32;
2292 return VINF_SUCCESS;
2293}
2294#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2295
2296
2297/**
2298 * Queries the TSC and TSC_AUX values, putting the results in .
2299 *
2300 * @returns VBox status code
2301 * @param pGVM The ring-0 VM handle.
2302 * @param pVM The cross context VM handle.
2303 * @param idCpu The calling EMT. Necessary for getting the
2304 * hypercall page and arguments.
2305 */
2306VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
2307{
2308#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2309 /*
2310 * Validate the call.
2311 */
2312 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2313 if (RT_SUCCESS(rc))
2314 {
2315 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2316 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2317 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2318
2319 /*
2320 * Call worker.
2321 */
2322 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2323 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2324 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2325 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2326 }
2327 return rc;
2328#else
2329 RT_NOREF(pGVM, pVM, idCpu);
2330 return VERR_NOT_IMPLEMENTED;
2331#endif
2332}
2333
2334
2335#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2336/**
2337 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2338 *
2339 * @returns VBox status code.
2340 * @param pGVM The ring-0 VM handle.
2341 * @param pGVCpu The ring-0 VCPU handle.
2342 * @param uPausedTscValue The TSC value at the time of pausing.
2343 */
2344NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2345{
2346 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2347
2348 /*
2349 * Set up the hypercall parameters.
2350 */
2351 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2352 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2353
2354 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2355 pInput->VpIndex = 0;
2356 pInput->RsvdZ = 0;
2357 pInput->Elements[0].Name = HvX64RegisterTsc;
2358 pInput->Elements[0].Pad0 = 0;
2359 pInput->Elements[0].Pad1 = 0;
2360 pInput->Elements[0].Value.Reg128.High64 = 0;
2361 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2362
2363 /*
2364 * Disable interrupts and do the first virtual CPU.
2365 */
2366 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2367 uint64_t const uFirstTsc = ASMReadTSC();
2368 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2369 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2370 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2371 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2372
2373 /*
2374 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2375 * that we don't introduce too much drift here.
2376 */
2377 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2378 {
2379 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2380 Assert(pInput->RsvdZ == 0);
2381 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2382 Assert(pInput->Elements[0].Pad0 == 0);
2383 Assert(pInput->Elements[0].Pad1 == 0);
2384 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2385
2386 pInput->VpIndex = iCpu;
2387 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2388 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2389
2390 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2391 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2392 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2393 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2394 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2395 }
2396
2397 /*
2398 * Done.
2399 */
2400 ASMSetFlags(fSavedFlags);
2401 return VINF_SUCCESS;
2402}
2403#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2404
2405
2406/**
2407 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2408 *
2409 * @returns VBox status code
2410 * @param pGVM The ring-0 VM handle.
2411 * @param pVM The cross context VM handle.
2412 * @param idCpu The calling EMT. Necessary for getting the
2413 * hypercall page and arguments.
2414 * @param uPausedTscValue The TSC value at the time of pausing.
2415 */
2416VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2417{
2418#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2419 /*
2420 * Validate the call.
2421 */
2422 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2423 if (RT_SUCCESS(rc))
2424 {
2425 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2426 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2427 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2428
2429 /*
2430 * Call worker.
2431 */
2432 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2433 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2434 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2435 }
2436 return rc;
2437#else
2438 RT_NOREF(pGVM, pVM, idCpu, uPausedTscValue);
2439 return VERR_NOT_IMPLEMENTED;
2440#endif
2441}
2442
2443
2444VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2445{
2446#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2447 if (pGVM->nemr0.s.fMayUseRing0Runloop)
2448 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2449 return VERR_NEM_RING3_ONLY;
2450#else
2451 RT_NOREF(pGVM, idCpu);
2452 return VERR_NOT_IMPLEMENTED;
2453#endif
2454}
2455
2456
2457/**
2458 * Updates statistics in the VM structure.
2459 *
2460 * @returns VBox status code.
2461 * @param pGVM The ring-0 VM handle.
2462 * @param pVM The cross context VM handle.
2463 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2464 * page and arguments.
2465 */
2466VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
2467{
2468 /*
2469 * Validate the call.
2470 */
2471 int rc;
2472 if (idCpu == NIL_VMCPUID)
2473 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2474 else
2475 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2476 if (RT_SUCCESS(rc))
2477 {
2478 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2479
2480 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2481 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
2482 : &pGVM->nemr0.s.HypercallData;
2483 if ( RT_VALID_PTR(pHypercallData->pbPage)
2484 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2485 {
2486 if (idCpu == NIL_VMCPUID)
2487 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
2488 if (RT_SUCCESS(rc))
2489 {
2490 /*
2491 * Query the memory statistics for the partition.
2492 */
2493 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2494 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
2495 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2496 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2497 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2498 pInput->ProximityDomainInfo.Id = 0;
2499
2500 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2501 RT_ZERO(*pOutput);
2502
2503 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2504 pHypercallData->HCPhysPage,
2505 pHypercallData->HCPhysPage + sizeof(*pInput));
2506 if (uResult == HV_STATUS_SUCCESS)
2507 {
2508 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2509 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2510 rc = VINF_SUCCESS;
2511 }
2512 else
2513 {
2514 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2515 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2516 rc = VERR_NEM_IPE_0;
2517 }
2518
2519 if (idCpu == NIL_VMCPUID)
2520 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
2521 }
2522 }
2523 else
2524 rc = VERR_WRONG_ORDER;
2525 }
2526 return rc;
2527}
2528
2529
2530#if 1 && defined(DEBUG_bird)
2531/**
2532 * Debug only interface for poking around and exploring Hyper-V stuff.
2533 *
2534 * @param pGVM The ring-0 VM handle.
2535 * @param pVM The cross context VM handle.
2536 * @param idCpu The calling EMT.
2537 * @param u64Arg What to query. 0 == registers.
2538 */
2539VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t u64Arg)
2540{
2541 /*
2542 * Resolve CPU structures.
2543 */
2544 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2545 if (RT_SUCCESS(rc))
2546 {
2547 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2548
2549 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2550 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2551 if (u64Arg == 0)
2552 {
2553 /*
2554 * Query register.
2555 */
2556 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2557 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2558
2559 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2560 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2561 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2562
2563 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2564 pInput->VpIndex = pGVCpu->idCpu;
2565 pInput->fFlags = 0;
2566 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2567
2568 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2569 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2570 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2571 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2572 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2573 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2574 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2575 rc = VINF_SUCCESS;
2576 }
2577 else if (u64Arg == 1)
2578 {
2579 /*
2580 * Query partition property.
2581 */
2582 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
2583 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2584
2585 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2586 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2587 pOutput->PropertyValue = 0;
2588
2589 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2590 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2591 pInput->uPadding = 0;
2592
2593 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2594 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2595 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2596 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2597 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2598 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2599 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2600 rc = VINF_SUCCESS;
2601 }
2602 else if (u64Arg == 2)
2603 {
2604 /*
2605 * Set register.
2606 */
2607 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2608 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2609 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2610
2611 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2612 pInput->VpIndex = pGVCpu->idCpu;
2613 pInput->RsvdZ = 0;
2614 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2615 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2616 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2617
2618 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2619 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
2620 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2621 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2622 rc = VINF_SUCCESS;
2623 }
2624 else
2625 rc = VERR_INVALID_FUNCTION;
2626 }
2627 return rc;
2628}
2629#endif /* DEBUG_bird */
2630
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette