VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/NEMR0Native-win.cpp@ 80274

最後變更 在這個檔案從80274是 80274,由 vboxsync 提交於 6 年 前

VMM: Refactoring VMMR0/* and VMMRZ/* to use VMCC & VMMCPUCC. bugref:9217

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 115.3 KB
 
1/* $Id: NEMR0Native-win.cpp 80274 2019-08-14 14:34:38Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-0 Windows backend.
4 */
5
6/*
7 * Copyright (C) 2018-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_NEM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <iprt/nt/nt.h>
25#include <iprt/nt/hyperv.h>
26#include <iprt/nt/vid.h>
27#include <winerror.h>
28
29#include <VBox/vmm/nem.h>
30#include <VBox/vmm/iem.h>
31#include <VBox/vmm/em.h>
32#include <VBox/vmm/apic.h>
33#include <VBox/vmm/pdm.h>
34#include <VBox/vmm/dbgftrace.h>
35#include "NEMInternal.h"
36#include <VBox/vmm/gvm.h>
37#include <VBox/vmm/vmcc.h>
38#include <VBox/vmm/gvmm.h>
39#include <VBox/param.h>
40
41#include <iprt/dbg.h>
42#include <iprt/memobj.h>
43#include <iprt/string.h>
44#include <iprt/time.h>
45
46
47/* Assert compile context sanity. */
48#ifndef RT_OS_WINDOWS
49# error "Windows only file!"
50#endif
51#ifndef RT_ARCH_AMD64
52# error "AMD64 only file!"
53#endif
54
55
56/*********************************************************************************************************************************
57* Internal Functions *
58*********************************************************************************************************************************/
59typedef uint32_t DWORD; /* for winerror.h constants */
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65static uint64_t (*g_pfnHvlInvokeHypercall)(uint64_t uCallInfo, uint64_t HCPhysInput, uint64_t HCPhysOutput);
66
67/**
68 * WinHvr.sys!WinHvDepositMemory
69 *
70 * This API will try allocates cPages on IdealNode and deposit it to the
71 * hypervisor for use with the given partition. The memory will be freed when
72 * VID.SYS calls WinHvWithdrawAllMemory when the partition is cleanedup.
73 *
74 * Apparently node numbers above 64 has a different meaning.
75 */
76static NTSTATUS (*g_pfnWinHvDepositMemory)(uintptr_t idPartition, size_t cPages, uintptr_t IdealNode, size_t *pcActuallyAdded);
77
78
79/*********************************************************************************************************************************
80* Internal Functions *
81*********************************************************************************************************************************/
82NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
83 uint32_t cPages, uint32_t fFlags);
84NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages);
85#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
86NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx);
87NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3);
88NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux);
89NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue);
90#endif
91DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PVMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
92 void *pvOutput, uint32_t cbOutput);
93
94
95/*
96 * Instantate the code we share with ring-0.
97 */
98#ifdef NEM_WIN_WITH_RING0_RUNLOOP
99# define NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
100#else
101# undef NEM_WIN_TEMPLATE_MODE_OWN_RUN_API
102#endif
103#include "../VMMAll/NEMAllNativeTemplate-win.cpp.h"
104
105
106
107/**
108 * Worker for NEMR0InitVM that allocates a hypercall page.
109 *
110 * @returns VBox status code.
111 * @param pHypercallData The hypercall data page to initialize.
112 */
113static int nemR0InitHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
114{
115 int rc = RTR0MemObjAllocPage(&pHypercallData->hMemObj, PAGE_SIZE, false /*fExecutable*/);
116 if (RT_SUCCESS(rc))
117 {
118 pHypercallData->HCPhysPage = RTR0MemObjGetPagePhysAddr(pHypercallData->hMemObj, 0 /*iPage*/);
119 AssertStmt(pHypercallData->HCPhysPage != NIL_RTHCPHYS, rc = VERR_INTERNAL_ERROR_3);
120 pHypercallData->pbPage = (uint8_t *)RTR0MemObjAddress(pHypercallData->hMemObj);
121 AssertStmt(pHypercallData->pbPage, rc = VERR_INTERNAL_ERROR_3);
122 if (RT_SUCCESS(rc))
123 return VINF_SUCCESS;
124
125 /* bail out */
126 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
127 }
128 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
129 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
130 pHypercallData->pbPage = NULL;
131 return rc;
132}
133
134/**
135 * Worker for NEMR0CleanupVM and NEMR0InitVM that cleans up a hypercall page.
136 *
137 * @param pHypercallData The hypercall data page to uninitialize.
138 */
139static void nemR0DeleteHypercallData(PNEMR0HYPERCALLDATA pHypercallData)
140{
141 /* Check pbPage here since it's NULL, whereas the hMemObj can be either
142 NIL_RTR0MEMOBJ or 0 (they aren't necessarily the same). */
143 if (pHypercallData->pbPage != NULL)
144 {
145 RTR0MemObjFree(pHypercallData->hMemObj, true /*fFreeMappings*/);
146 pHypercallData->pbPage = NULL;
147 }
148 pHypercallData->hMemObj = NIL_RTR0MEMOBJ;
149 pHypercallData->HCPhysPage = NIL_RTHCPHYS;
150}
151
152
153/**
154 * Called by NEMR3Init to make sure we've got what we need.
155 *
156 * @returns VBox status code.
157 * @param pGVM The ring-0 VM handle.
158 * @param pVM The cross context VM handle.
159 * @thread EMT(0)
160 */
161VMMR0_INT_DECL(int) NEMR0InitVM(PGVM pGVM, PVMCC pVM)
162{
163 AssertCompile(sizeof(pGVM->nemr0.s) <= sizeof(pGVM->nemr0.padding));
164 AssertCompile(sizeof(pGVM->aCpus[0].nemr0.s) <= sizeof(pGVM->aCpus[0].nemr0.padding));
165
166 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
167 AssertRCReturn(rc, rc);
168
169 /*
170 * We want to perform hypercalls here. The NT kernel started to expose a very low
171 * level interface to do this thru somewhere between build 14271 and 16299. Since
172 * we need build 17134 to get anywhere at all, the exact build is not relevant here.
173 *
174 * We also need to deposit memory to the hypervisor for use with partition (page
175 * mapping structures, stuff).
176 */
177 RTDBGKRNLINFO hKrnlInfo;
178 rc = RTR0DbgKrnlInfoOpen(&hKrnlInfo, 0);
179 if (RT_SUCCESS(rc))
180 {
181 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, NULL, "HvlInvokeHypercall", (void **)&g_pfnHvlInvokeHypercall);
182 if (RT_SUCCESS(rc))
183 rc = RTR0DbgKrnlInfoQuerySymbol(hKrnlInfo, "winhvr.sys", "WinHvDepositMemory", (void **)&g_pfnWinHvDepositMemory);
184 RTR0DbgKrnlInfoRelease(hKrnlInfo);
185 if (RT_SUCCESS(rc))
186 {
187 /*
188 * Allocate a page for non-EMT threads to use for hypercalls (update
189 * statistics and such) and a critical section protecting it.
190 */
191 rc = RTCritSectInit(&pGVM->nemr0.s.HypercallDataCritSect);
192 if (RT_SUCCESS(rc))
193 {
194 rc = nemR0InitHypercallData(&pGVM->nemr0.s.HypercallData);
195 if (RT_SUCCESS(rc))
196 {
197 /*
198 * Allocate a page for each VCPU to place hypercall data on.
199 */
200 for (VMCPUID i = 0; i < pGVM->cCpus; i++)
201 {
202 rc = nemR0InitHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
203 if (RT_FAILURE(rc))
204 {
205 while (i-- > 0)
206 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
207 break;
208 }
209 }
210 if (RT_SUCCESS(rc))
211 {
212 /*
213 * So far, so good.
214 */
215 return rc;
216 }
217
218 /*
219 * Bail out.
220 */
221 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
222 }
223 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
224 }
225 }
226 else
227 rc = VERR_NEM_MISSING_KERNEL_API;
228 }
229
230 RT_NOREF(pVM);
231 return rc;
232}
233
234
235/**
236 * Perform an I/O control operation on the partition handle (VID.SYS).
237 *
238 * @returns NT status code.
239 * @param pGVM The ring-0 VM structure.
240 * @param pVCpu The cross context CPU structure of the calling EMT.
241 * @param uFunction The function to perform.
242 * @param pvInput The input buffer. This must point within the VM
243 * structure so we can easily convert to a ring-3
244 * pointer if necessary.
245 * @param cbInput The size of the input. @a pvInput must be NULL when
246 * zero.
247 * @param pvOutput The output buffer. This must also point within the
248 * VM structure for ring-3 pointer magic.
249 * @param cbOutput The size of the output. @a pvOutput must be NULL
250 * when zero.
251 * @thread EMT(pVCpu)
252 */
253DECLINLINE(NTSTATUS) nemR0NtPerformIoControl(PGVM pGVM, PVMCPUCC pVCpu, uint32_t uFunction, void *pvInput, uint32_t cbInput,
254 void *pvOutput, uint32_t cbOutput)
255{
256#ifdef RT_STRICT
257 /*
258 * Input and output parameters are part of the VM CPU structure.
259 */
260 VMCPU_ASSERT_EMT(pVCpu);
261# ifdef VBOX_BUGREF_9217
262 if (pvInput)
263 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);
264 if (pvOutput)
265 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVCpu <= sizeof(*pVCpu), VERR_INVALID_PARAMETER);
266# else
267 PVMCC pVM = pGVM->pVM;
268 size_t const cbVM = RT_UOFFSETOF_DYN(VM, aCpus[pGVM->cCpus]);
269 if (pvInput)
270 AssertReturn(((uintptr_t)pvInput + cbInput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
271 if (pvOutput)
272 AssertReturn(((uintptr_t)pvOutput + cbOutput) - (uintptr_t)pVM <= cbVM, VERR_INVALID_PARAMETER);
273# endif
274#endif
275
276 int32_t rcNt = STATUS_UNSUCCESSFUL;
277 int rc = SUPR0IoCtlPerform(pGVM->nemr0.s.pIoCtlCtx, uFunction,
278 pvInput,
279#ifdef VBOX_BUGREF_9217
280 pvInput ? (uintptr_t)pvInput + pVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
281#else
282 pvInput ? (uintptr_t)pvInput + pGVM->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
283#endif
284 cbInput,
285 pvOutput,
286#ifdef VBOX_BUGREF_9217
287 pvOutput ? (uintptr_t)pvOutput + pVCpu->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
288#else
289 pvOutput ? (uintptr_t)pvOutput + pGVM->nemr0.s.offRing3ConversionDelta : NIL_RTR3PTR,
290#endif
291 cbOutput,
292 &rcNt);
293 if (RT_SUCCESS(rc) || !NT_SUCCESS((NTSTATUS)rcNt))
294 return (NTSTATUS)rcNt;
295#ifndef VBOX_BUGREF_9217
296 RT_NOREF(pVCpu);
297#endif
298 return STATUS_UNSUCCESSFUL;
299}
300
301
302/**
303 * 2nd part of the initialization, after we've got a partition handle.
304 *
305 * @returns VBox status code.
306 * @param pGVM The ring-0 VM handle.
307 * @param pVM The cross context VM handle.
308 * @thread EMT(0)
309 */
310VMMR0_INT_DECL(int) NEMR0InitVMPart2(PGVM pGVM, PVMCC pVM)
311{
312 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, 0);
313 AssertRCReturn(rc, rc);
314 SUPR0Printf("NEMR0InitVMPart2\n"); LogRel(("2: NEMR0InitVMPart2\n"));
315 Assert(pGVM->nemr0.s.fMayUseRing0Runloop == false);
316
317 /*
318 * Copy and validate the I/O control information from ring-3.
319 */
320 NEMWINIOCTL Copy = pVM->nem.s.IoCtlGetHvPartitionId;
321 AssertLogRelReturn(Copy.uFunction != 0, VERR_NEM_INIT_FAILED);
322 AssertLogRelReturn(Copy.cbInput == 0, VERR_NEM_INIT_FAILED);
323 AssertLogRelReturn(Copy.cbOutput == sizeof(HV_PARTITION_ID), VERR_NEM_INIT_FAILED);
324 pGVM->nemr0.s.IoCtlGetHvPartitionId = Copy;
325
326 pGVM->nemr0.s.fMayUseRing0Runloop = pVM->nem.s.fUseRing0Runloop;
327
328 Copy = pVM->nem.s.IoCtlStartVirtualProcessor;
329 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
330 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
331 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
332 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
333 if (RT_SUCCESS(rc))
334 pGVM->nemr0.s.IoCtlStartVirtualProcessor = Copy;
335
336 Copy = pVM->nem.s.IoCtlStopVirtualProcessor;
337 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
338 AssertLogRelStmt(Copy.cbInput == sizeof(HV_VP_INDEX), rc = VERR_NEM_INIT_FAILED);
339 AssertLogRelStmt(Copy.cbOutput == 0, rc = VERR_NEM_INIT_FAILED);
340 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
341 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
342 if (RT_SUCCESS(rc))
343 pGVM->nemr0.s.IoCtlStopVirtualProcessor = Copy;
344
345 Copy = pVM->nem.s.IoCtlMessageSlotHandleAndGetNext;
346 AssertLogRelStmt(Copy.uFunction != 0, rc = VERR_NEM_INIT_FAILED);
347 AssertLogRelStmt( Copy.cbInput == sizeof(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT)
348 || Copy.cbInput == RT_OFFSETOF(VID_IOCTL_INPUT_MESSAGE_SLOT_HANDLE_AND_GET_NEXT, cMillies),
349 rc = VERR_NEM_INIT_FAILED);
350 AssertLogRelStmt(Copy.cbOutput == 0, VERR_NEM_INIT_FAILED);
351 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, rc = VERR_NEM_INIT_FAILED);
352 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStartVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
353 AssertLogRelStmt(Copy.uFunction != pGVM->nemr0.s.IoCtlStopVirtualProcessor.uFunction, rc = VERR_NEM_INIT_FAILED);
354 if (RT_SUCCESS(rc))
355 pGVM->nemr0.s.IoCtlMessageSlotHandleAndGetNext = Copy;
356
357 if ( RT_SUCCESS(rc)
358 || !pVM->nem.s.fUseRing0Runloop)
359 {
360 /*
361 * Setup of an I/O control context for the partition handle for later use.
362 */
363 rc = SUPR0IoCtlSetupForHandle(pGVM->pSession, pVM->nem.s.hPartitionDevice, 0, &pGVM->nemr0.s.pIoCtlCtx);
364 AssertLogRelRCReturn(rc, rc);
365#ifdef VBOX_BUGREF_9217
366 for (VMCPUID idCpu = 0; idCpu < pGVM->cCpus; idCpu++)
367 {
368 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
369 pGVCpu->nemr0.s.offRing3ConversionDelta = (uintptr_t)pGVM->aCpus[idCpu].pVCpuR3 - (uintptr_t)pGVCpu;
370 }
371#else
372 pGVM->nemr0.s.offRing3ConversionDelta = (uintptr_t)pVM->pVMR3 - (uintptr_t)pGVM->pVM;
373#endif
374
375 /*
376 * Get the partition ID.
377 */
378#ifdef VBOX_BUGREF_9217
379 PVMCPUCC pVCpu0 = &pGVM->aCpus[0];
380#else
381 PVMCPUCC pVCpu0 = &pGVM->pVM->aCpus[0];
382#endif
383 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pVCpu0, pGVM->nemr0.s.IoCtlGetHvPartitionId.uFunction, NULL, 0,
384 &pVCpu0->nem.s.uIoCtlBuf.idPartition, sizeof(pVCpu0->nem.s.uIoCtlBuf.idPartition));
385 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("IoCtlGetHvPartitionId failed: %#x\n", rcNt), VERR_NEM_INIT_FAILED);
386 pGVM->nemr0.s.idHvPartition = pVCpu0->nem.s.uIoCtlBuf.idPartition;
387 AssertLogRelMsgReturn(pGVM->nemr0.s.idHvPartition == pVM->nem.s.idHvPartition,
388 ("idHvPartition mismatch: r0=%#RX64, r3=%#RX64\n", pGVM->nemr0.s.idHvPartition, pVM->nem.s.idHvPartition),
389 VERR_NEM_INIT_FAILED);
390 }
391
392 return rc;
393}
394
395
396/**
397 * Cleanup the NEM parts of the VM in ring-0.
398 *
399 * This is always called and must deal the state regardless of whether
400 * NEMR0InitVM() was called or not. So, take care here.
401 *
402 * @param pGVM The ring-0 VM handle.
403 */
404VMMR0_INT_DECL(void) NEMR0CleanupVM(PGVM pGVM)
405{
406 pGVM->nemr0.s.idHvPartition = HV_PARTITION_ID_INVALID;
407
408 /* Clean up I/O control context. */
409 if (pGVM->nemr0.s.pIoCtlCtx)
410 {
411 int rc = SUPR0IoCtlCleanup(pGVM->nemr0.s.pIoCtlCtx);
412 AssertRC(rc);
413 pGVM->nemr0.s.pIoCtlCtx = NULL;
414 }
415
416 /* Free the hypercall pages. */
417 VMCPUID i = pGVM->cCpus;
418 while (i-- > 0)
419 nemR0DeleteHypercallData(&pGVM->aCpus[i].nemr0.s.HypercallData);
420
421 /* The non-EMT one too. */
422 if (RTCritSectIsInitialized(&pGVM->nemr0.s.HypercallDataCritSect))
423 RTCritSectDelete(&pGVM->nemr0.s.HypercallDataCritSect);
424 nemR0DeleteHypercallData(&pGVM->nemr0.s.HypercallData);
425}
426
427
428#if 0 /* for debugging GPA unmapping. */
429static int nemR3WinDummyReadGpa(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys)
430{
431 PHV_INPUT_READ_GPA pIn = (PHV_INPUT_READ_GPA)pGVCpu->nemr0.s.pbHypercallData;
432 PHV_OUTPUT_READ_GPA pOut = (PHV_OUTPUT_READ_GPA)(pIn + 1);
433 pIn->PartitionId = pGVM->nemr0.s.idHvPartition;
434 pIn->VpIndex = pGVCpu->idCpu;
435 pIn->ByteCount = 0x10;
436 pIn->BaseGpa = GCPhys;
437 pIn->ControlFlags.AsUINT64 = 0;
438 pIn->ControlFlags.CacheType = HvCacheTypeX64WriteCombining;
439 memset(pOut, 0xfe, sizeof(*pOut));
440 uint64_t volatile uResult = g_pfnHvlInvokeHypercall(HvCallReadGpa, pGVCpu->nemr0.s.HCPhysHypercallData,
441 pGVCpu->nemr0.s.HCPhysHypercallData + sizeof(*pIn));
442 LogRel(("nemR3WinDummyReadGpa: %RGp -> %#RX64; code=%u rsvd=%u abData=%.16Rhxs\n",
443 GCPhys, uResult, pOut->AccessResult.ResultCode, pOut->AccessResult.Reserved, pOut->Data));
444 __debugbreak();
445
446 return uResult != 0 ? VERR_READ_ERROR : VINF_SUCCESS;
447}
448#endif
449
450
451/**
452 * Worker for NEMR0MapPages and others.
453 */
454NEM_TMPL_STATIC int nemR0WinMapPages(PGVM pGVM, PVMCC pVM, PGVMCPU pGVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
455 uint32_t cPages, uint32_t fFlags)
456{
457 /*
458 * Validate.
459 */
460 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
461
462 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
463 AssertReturn(cPages <= NEM_MAX_MAP_PAGES, VERR_OUT_OF_RANGE);
464 AssertReturn(!(fFlags & ~(HV_MAP_GPA_MAYBE_ACCESS_MASK & ~HV_MAP_GPA_DUNNO_ACCESS)), VERR_INVALID_FLAGS);
465 AssertMsgReturn(!(GCPhysDst & X86_PAGE_OFFSET_MASK), ("GCPhysDst=%RGp\n", GCPhysDst), VERR_OUT_OF_RANGE);
466 AssertReturn(GCPhysDst < _1E, VERR_OUT_OF_RANGE);
467 if (GCPhysSrc != GCPhysDst)
468 {
469 AssertMsgReturn(!(GCPhysSrc & X86_PAGE_OFFSET_MASK), ("GCPhysSrc=%RGp\n", GCPhysSrc), VERR_OUT_OF_RANGE);
470 AssertReturn(GCPhysSrc < _1E, VERR_OUT_OF_RANGE);
471 }
472
473 /*
474 * Compose and make the hypercall.
475 * Ring-3 is not allowed to fill in the host physical addresses of the call.
476 */
477 for (uint32_t iTries = 0;; iTries++)
478 {
479 HV_INPUT_MAP_GPA_PAGES *pMapPages = (HV_INPUT_MAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
480 AssertPtrReturn(pMapPages, VERR_INTERNAL_ERROR_3);
481 pMapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
482 pMapPages->TargetGpaBase = GCPhysDst >> X86_PAGE_SHIFT;
483 pMapPages->MapFlags = fFlags;
484 pMapPages->u32ExplicitPadding = 0;
485 for (uint32_t iPage = 0; iPage < cPages; iPage++, GCPhysSrc += X86_PAGE_SIZE)
486 {
487 RTHCPHYS HCPhys = NIL_RTGCPHYS;
488 int rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysSrc, &HCPhys);
489 AssertRCReturn(rc, rc);
490 pMapPages->PageList[iPage] = HCPhys >> X86_PAGE_SHIFT;
491 }
492
493 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallMapGpaPages | ((uint64_t)cPages << 32),
494 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
495 Log6(("NEMR0MapPages: %RGp/%RGp L %u prot %#x -> %#RX64\n",
496 GCPhysDst, GCPhysSrc - cPages * X86_PAGE_SIZE, cPages, fFlags, uResult));
497 if (uResult == ((uint64_t)cPages << 32))
498 return VINF_SUCCESS;
499
500 /*
501 * If the partition is out of memory, try donate another 512 pages to
502 * it (2MB). VID.SYS does multiples of 512 pages, nothing smaller.
503 */
504 if ( uResult != HV_STATUS_INSUFFICIENT_MEMORY
505 || iTries > 16
506 || g_pfnWinHvDepositMemory == NULL)
507 {
508 LogRel(("g_pfnHvlInvokeHypercall/MapGpaPages -> %#RX64\n", uResult));
509 return VERR_NEM_MAP_PAGES_FAILED;
510 }
511
512 size_t cPagesAdded = 0;
513 NTSTATUS rcNt = g_pfnWinHvDepositMemory(pGVM->nemr0.s.idHvPartition, 512, 0, &cPagesAdded);
514 if (!cPagesAdded)
515 {
516 LogRel(("g_pfnWinHvDepositMemory -> %#x / %#RX64\n", rcNt, uResult));
517 return VERR_NEM_MAP_PAGES_FAILED;
518 }
519 }
520}
521
522
523/**
524 * Maps pages into the guest physical address space.
525 *
526 * Generally the caller will be under the PGM lock already, so no extra effort
527 * is needed to make sure all changes happens under it.
528 *
529 * @returns VBox status code.
530 * @param pGVM The ring-0 VM handle.
531 * @param pVM The cross context VM handle.
532 * @param idCpu The calling EMT. Necessary for getting the
533 * hypercall page and arguments.
534 * @thread EMT(idCpu)
535 */
536VMMR0_INT_DECL(int) NEMR0MapPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
537{
538 /*
539 * Unpack the call.
540 */
541 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
542 if (RT_SUCCESS(rc))
543 {
544 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
545 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
546
547 RTGCPHYS const GCPhysSrc = pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc;
548 RTGCPHYS const GCPhysDst = pVCpu->nem.s.Hypercall.MapPages.GCPhysDst;
549 uint32_t const cPages = pVCpu->nem.s.Hypercall.MapPages.cPages;
550 HV_MAP_GPA_FLAGS const fFlags = pVCpu->nem.s.Hypercall.MapPages.fFlags;
551
552 /*
553 * Do the work.
554 */
555 rc = nemR0WinMapPages(pGVM, pVM, pGVCpu, GCPhysSrc, GCPhysDst, cPages, fFlags);
556 }
557 return rc;
558}
559
560
561/**
562 * Worker for NEMR0UnmapPages and others.
563 */
564NEM_TMPL_STATIC int nemR0WinUnmapPages(PGVM pGVM, PGVMCPU pGVCpu, RTGCPHYS GCPhys, uint32_t cPages)
565{
566 /*
567 * Validate input.
568 */
569 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
570
571 AssertReturn(cPages > 0, VERR_OUT_OF_RANGE);
572 AssertReturn(cPages <= NEM_MAX_UNMAP_PAGES, VERR_OUT_OF_RANGE);
573 AssertMsgReturn(!(GCPhys & X86_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_OUT_OF_RANGE);
574 AssertReturn(GCPhys < _1E, VERR_OUT_OF_RANGE);
575
576 /*
577 * Compose and make the hypercall.
578 */
579 HV_INPUT_UNMAP_GPA_PAGES *pUnmapPages = (HV_INPUT_UNMAP_GPA_PAGES *)pGVCpu->nemr0.s.HypercallData.pbPage;
580 AssertPtrReturn(pUnmapPages, VERR_INTERNAL_ERROR_3);
581 pUnmapPages->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
582 pUnmapPages->TargetGpaBase = GCPhys >> X86_PAGE_SHIFT;
583 pUnmapPages->fFlags = 0;
584
585 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallUnmapGpaPages | ((uint64_t)cPages << 32),
586 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
587 Log6(("NEMR0UnmapPages: %RGp L %u -> %#RX64\n", GCPhys, cPages, uResult));
588 if (uResult == ((uint64_t)cPages << 32))
589 {
590#if 1 /* Do we need to do this? Hopefully not... */
591 uint64_t volatile uR = g_pfnHvlInvokeHypercall(HvCallUncommitGpaPages | ((uint64_t)cPages << 32),
592 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
593 AssertMsg(uR == ((uint64_t)cPages << 32), ("uR=%#RX64\n", uR)); NOREF(uR);
594#endif
595 return VINF_SUCCESS;
596 }
597
598 LogRel(("g_pfnHvlInvokeHypercall/UnmapGpaPages -> %#RX64\n", uResult));
599 return VERR_NEM_UNMAP_PAGES_FAILED;
600}
601
602
603/**
604 * Unmaps pages from the guest physical address space.
605 *
606 * Generally the caller will be under the PGM lock already, so no extra effort
607 * is needed to make sure all changes happens under it.
608 *
609 * @returns VBox status code.
610 * @param pGVM The ring-0 VM handle.
611 * @param pVM The cross context VM handle.
612 * @param idCpu The calling EMT. Necessary for getting the
613 * hypercall page and arguments.
614 * @thread EMT(idCpu)
615 */
616VMMR0_INT_DECL(int) NEMR0UnmapPages(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
617{
618 /*
619 * Unpack the call.
620 */
621 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
622 if (RT_SUCCESS(rc))
623 {
624 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
625 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
626
627 RTGCPHYS const GCPhys = pVCpu->nem.s.Hypercall.UnmapPages.GCPhys;
628 uint32_t const cPages = pVCpu->nem.s.Hypercall.UnmapPages.cPages;
629
630 /*
631 * Do the work.
632 */
633 rc = nemR0WinUnmapPages(pGVM, pGVCpu, GCPhys, cPages);
634 }
635 return rc;
636}
637
638
639#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
640/**
641 * Worker for NEMR0ExportState.
642 *
643 * Intention is to use it internally later.
644 *
645 * @returns VBox status code.
646 * @param pGVM The ring-0 VM handle.
647 * @param pGVCpu The ring-0 VCPU handle.
648 * @param pCtx The CPU context structure to import into.
649 */
650NEM_TMPL_STATIC int nemR0WinExportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx)
651{
652#ifdef VBOX_BUGREF_9217
653 PVMCPUCC pVCpu = pGVCpu;
654#else
655 PVMCPUCC pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
656#endif
657 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
658 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
659 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
660
661 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
662 pInput->VpIndex = pGVCpu->idCpu;
663 pInput->RsvdZ = 0;
664
665 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
666 if ( !fWhat
667 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
668 return VINF_SUCCESS;
669 uintptr_t iReg = 0;
670
671 /* GPRs */
672 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
673 {
674 if (fWhat & CPUMCTX_EXTRN_RAX)
675 {
676 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
677 pInput->Elements[iReg].Name = HvX64RegisterRax;
678 pInput->Elements[iReg].Value.Reg64 = pCtx->rax;
679 iReg++;
680 }
681 if (fWhat & CPUMCTX_EXTRN_RCX)
682 {
683 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
684 pInput->Elements[iReg].Name = HvX64RegisterRcx;
685 pInput->Elements[iReg].Value.Reg64 = pCtx->rcx;
686 iReg++;
687 }
688 if (fWhat & CPUMCTX_EXTRN_RDX)
689 {
690 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
691 pInput->Elements[iReg].Name = HvX64RegisterRdx;
692 pInput->Elements[iReg].Value.Reg64 = pCtx->rdx;
693 iReg++;
694 }
695 if (fWhat & CPUMCTX_EXTRN_RBX)
696 {
697 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
698 pInput->Elements[iReg].Name = HvX64RegisterRbx;
699 pInput->Elements[iReg].Value.Reg64 = pCtx->rbx;
700 iReg++;
701 }
702 if (fWhat & CPUMCTX_EXTRN_RSP)
703 {
704 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
705 pInput->Elements[iReg].Name = HvX64RegisterRsp;
706 pInput->Elements[iReg].Value.Reg64 = pCtx->rsp;
707 iReg++;
708 }
709 if (fWhat & CPUMCTX_EXTRN_RBP)
710 {
711 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
712 pInput->Elements[iReg].Name = HvX64RegisterRbp;
713 pInput->Elements[iReg].Value.Reg64 = pCtx->rbp;
714 iReg++;
715 }
716 if (fWhat & CPUMCTX_EXTRN_RSI)
717 {
718 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
719 pInput->Elements[iReg].Name = HvX64RegisterRsi;
720 pInput->Elements[iReg].Value.Reg64 = pCtx->rsi;
721 iReg++;
722 }
723 if (fWhat & CPUMCTX_EXTRN_RDI)
724 {
725 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
726 pInput->Elements[iReg].Name = HvX64RegisterRdi;
727 pInput->Elements[iReg].Value.Reg64 = pCtx->rdi;
728 iReg++;
729 }
730 if (fWhat & CPUMCTX_EXTRN_R8_R15)
731 {
732 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
733 pInput->Elements[iReg].Name = HvX64RegisterR8;
734 pInput->Elements[iReg].Value.Reg64 = pCtx->r8;
735 iReg++;
736 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
737 pInput->Elements[iReg].Name = HvX64RegisterR9;
738 pInput->Elements[iReg].Value.Reg64 = pCtx->r9;
739 iReg++;
740 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
741 pInput->Elements[iReg].Name = HvX64RegisterR10;
742 pInput->Elements[iReg].Value.Reg64 = pCtx->r10;
743 iReg++;
744 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
745 pInput->Elements[iReg].Name = HvX64RegisterR11;
746 pInput->Elements[iReg].Value.Reg64 = pCtx->r11;
747 iReg++;
748 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
749 pInput->Elements[iReg].Name = HvX64RegisterR12;
750 pInput->Elements[iReg].Value.Reg64 = pCtx->r12;
751 iReg++;
752 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
753 pInput->Elements[iReg].Name = HvX64RegisterR13;
754 pInput->Elements[iReg].Value.Reg64 = pCtx->r13;
755 iReg++;
756 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
757 pInput->Elements[iReg].Name = HvX64RegisterR14;
758 pInput->Elements[iReg].Value.Reg64 = pCtx->r14;
759 iReg++;
760 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
761 pInput->Elements[iReg].Name = HvX64RegisterR15;
762 pInput->Elements[iReg].Value.Reg64 = pCtx->r15;
763 iReg++;
764 }
765 }
766
767 /* RIP & Flags */
768 if (fWhat & CPUMCTX_EXTRN_RIP)
769 {
770 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
771 pInput->Elements[iReg].Name = HvX64RegisterRip;
772 pInput->Elements[iReg].Value.Reg64 = pCtx->rip;
773 iReg++;
774 }
775 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
776 {
777 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
778 pInput->Elements[iReg].Name = HvX64RegisterRflags;
779 pInput->Elements[iReg].Value.Reg64 = pCtx->rflags.u;
780 iReg++;
781 }
782
783 /* Segments */
784# define COPY_OUT_SEG(a_idx, a_enmName, a_SReg) \
785 do { \
786 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[a_idx]); \
787 pInput->Elements[a_idx].Name = a_enmName; \
788 pInput->Elements[a_idx].Value.Segment.Base = (a_SReg).u64Base; \
789 pInput->Elements[a_idx].Value.Segment.Limit = (a_SReg).u32Limit; \
790 pInput->Elements[a_idx].Value.Segment.Selector = (a_SReg).Sel; \
791 pInput->Elements[a_idx].Value.Segment.Attributes = (a_SReg).Attr.u; \
792 } while (0)
793 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
794 {
795 if (fWhat & CPUMCTX_EXTRN_CS)
796 {
797 COPY_OUT_SEG(iReg, HvX64RegisterCs, pCtx->cs);
798 iReg++;
799 }
800 if (fWhat & CPUMCTX_EXTRN_ES)
801 {
802 COPY_OUT_SEG(iReg, HvX64RegisterEs, pCtx->es);
803 iReg++;
804 }
805 if (fWhat & CPUMCTX_EXTRN_SS)
806 {
807 COPY_OUT_SEG(iReg, HvX64RegisterSs, pCtx->ss);
808 iReg++;
809 }
810 if (fWhat & CPUMCTX_EXTRN_DS)
811 {
812 COPY_OUT_SEG(iReg, HvX64RegisterDs, pCtx->ds);
813 iReg++;
814 }
815 if (fWhat & CPUMCTX_EXTRN_FS)
816 {
817 COPY_OUT_SEG(iReg, HvX64RegisterFs, pCtx->fs);
818 iReg++;
819 }
820 if (fWhat & CPUMCTX_EXTRN_GS)
821 {
822 COPY_OUT_SEG(iReg, HvX64RegisterGs, pCtx->gs);
823 iReg++;
824 }
825 }
826
827 /* Descriptor tables & task segment. */
828 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
829 {
830 if (fWhat & CPUMCTX_EXTRN_LDTR)
831 {
832 COPY_OUT_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
833 iReg++;
834 }
835 if (fWhat & CPUMCTX_EXTRN_TR)
836 {
837 COPY_OUT_SEG(iReg, HvX64RegisterTr, pCtx->tr);
838 iReg++;
839 }
840
841 if (fWhat & CPUMCTX_EXTRN_IDTR)
842 {
843 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
844 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
845 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
846 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
847 pInput->Elements[iReg].Name = HvX64RegisterIdtr;
848 pInput->Elements[iReg].Value.Table.Limit = pCtx->idtr.cbIdt;
849 pInput->Elements[iReg].Value.Table.Base = pCtx->idtr.pIdt;
850 iReg++;
851 }
852 if (fWhat & CPUMCTX_EXTRN_GDTR)
853 {
854 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
855 pInput->Elements[iReg].Value.Table.Pad[0] = 0;
856 pInput->Elements[iReg].Value.Table.Pad[1] = 0;
857 pInput->Elements[iReg].Value.Table.Pad[2] = 0;
858 pInput->Elements[iReg].Name = HvX64RegisterGdtr;
859 pInput->Elements[iReg].Value.Table.Limit = pCtx->gdtr.cbGdt;
860 pInput->Elements[iReg].Value.Table.Base = pCtx->gdtr.pGdt;
861 iReg++;
862 }
863 }
864
865 /* Control registers. */
866 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
867 {
868 if (fWhat & CPUMCTX_EXTRN_CR0)
869 {
870 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
871 pInput->Elements[iReg].Name = HvX64RegisterCr0;
872 pInput->Elements[iReg].Value.Reg64 = pCtx->cr0;
873 iReg++;
874 }
875 if (fWhat & CPUMCTX_EXTRN_CR2)
876 {
877 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
878 pInput->Elements[iReg].Name = HvX64RegisterCr2;
879 pInput->Elements[iReg].Value.Reg64 = pCtx->cr2;
880 iReg++;
881 }
882 if (fWhat & CPUMCTX_EXTRN_CR3)
883 {
884 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
885 pInput->Elements[iReg].Name = HvX64RegisterCr3;
886 pInput->Elements[iReg].Value.Reg64 = pCtx->cr3;
887 iReg++;
888 }
889 if (fWhat & CPUMCTX_EXTRN_CR4)
890 {
891 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
892 pInput->Elements[iReg].Name = HvX64RegisterCr4;
893 pInput->Elements[iReg].Value.Reg64 = pCtx->cr4;
894 iReg++;
895 }
896 }
897 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
898 {
899 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
900 pInput->Elements[iReg].Name = HvX64RegisterCr8;
901 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestCR8(pVCpu);
902 iReg++;
903 }
904
905 /** @todo does HvX64RegisterXfem mean XCR0? What about the related MSR. */
906
907 /* Debug registers. */
908/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
909 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
910 {
911 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
912 pInput->Elements[iReg].Name = HvX64RegisterDr0;
913 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR0(pVCpu);
914 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[0];
915 iReg++;
916 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
917 pInput->Elements[iReg].Name = HvX64RegisterDr1;
918 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR1(pVCpu);
919 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[1];
920 iReg++;
921 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
922 pInput->Elements[iReg].Name = HvX64RegisterDr2;
923 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR2(pVCpu);
924 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[2];
925 iReg++;
926 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
927 pInput->Elements[iReg].Name = HvX64RegisterDr3;
928 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR3(pVCpu);
929 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[3];
930 iReg++;
931 }
932 if (fWhat & CPUMCTX_EXTRN_DR6)
933 {
934 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
935 pInput->Elements[iReg].Name = HvX64RegisterDr6;
936 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR6(pVCpu);
937 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[6];
938 iReg++;
939 }
940 if (fWhat & CPUMCTX_EXTRN_DR7)
941 {
942 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
943 pInput->Elements[iReg].Name = HvX64RegisterDr7;
944 //pInput->Elements[iReg].Value.Reg64 = CPUMGetHyperDR7(pVCpu);
945 pInput->Elements[iReg].Value.Reg64 = pCtx->dr[7];
946 iReg++;
947 }
948
949 /* Floating point state. */
950 if (fWhat & CPUMCTX_EXTRN_X87)
951 {
952 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
953 pInput->Elements[iReg].Name = HvX64RegisterFpMmx0;
954 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[0].au64[0];
955 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[0].au64[1];
956 iReg++;
957 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
958 pInput->Elements[iReg].Name = HvX64RegisterFpMmx1;
959 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[1].au64[0];
960 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[1].au64[1];
961 iReg++;
962 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
963 pInput->Elements[iReg].Name = HvX64RegisterFpMmx2;
964 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[2].au64[0];
965 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[2].au64[1];
966 iReg++;
967 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
968 pInput->Elements[iReg].Name = HvX64RegisterFpMmx3;
969 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[3].au64[0];
970 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[3].au64[1];
971 iReg++;
972 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
973 pInput->Elements[iReg].Name = HvX64RegisterFpMmx4;
974 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[4].au64[0];
975 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[4].au64[1];
976 iReg++;
977 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
978 pInput->Elements[iReg].Name = HvX64RegisterFpMmx5;
979 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[5].au64[0];
980 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[5].au64[1];
981 iReg++;
982 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
983 pInput->Elements[iReg].Name = HvX64RegisterFpMmx6;
984 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[6].au64[0];
985 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[6].au64[1];
986 iReg++;
987 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
988 pInput->Elements[iReg].Name = HvX64RegisterFpMmx7;
989 pInput->Elements[iReg].Value.Fp.AsUINT128.Low64 = pCtx->pXStateR0->x87.aRegs[7].au64[0];
990 pInput->Elements[iReg].Value.Fp.AsUINT128.High64 = pCtx->pXStateR0->x87.aRegs[7].au64[1];
991 iReg++;
992
993 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
994 pInput->Elements[iReg].Name = HvX64RegisterFpControlStatus;
995 pInput->Elements[iReg].Value.FpControlStatus.FpControl = pCtx->pXStateR0->x87.FCW;
996 pInput->Elements[iReg].Value.FpControlStatus.FpStatus = pCtx->pXStateR0->x87.FSW;
997 pInput->Elements[iReg].Value.FpControlStatus.FpTag = pCtx->pXStateR0->x87.FTW;
998 pInput->Elements[iReg].Value.FpControlStatus.Reserved = pCtx->pXStateR0->x87.FTW >> 8;
999 pInput->Elements[iReg].Value.FpControlStatus.LastFpOp = pCtx->pXStateR0->x87.FOP;
1000 pInput->Elements[iReg].Value.FpControlStatus.LastFpRip = (pCtx->pXStateR0->x87.FPUIP)
1001 | ((uint64_t)pCtx->pXStateR0->x87.CS << 32)
1002 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd1 << 48);
1003 iReg++;
1004/** @todo we've got trouble if if we try write just SSE w/o X87. */
1005 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1006 pInput->Elements[iReg].Name = HvX64RegisterXmmControlStatus;
1007 pInput->Elements[iReg].Value.XmmControlStatus.LastFpRdp = (pCtx->pXStateR0->x87.FPUDP)
1008 | ((uint64_t)pCtx->pXStateR0->x87.DS << 32)
1009 | ((uint64_t)pCtx->pXStateR0->x87.Rsrvd2 << 48);
1010 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControl = pCtx->pXStateR0->x87.MXCSR;
1011 pInput->Elements[iReg].Value.XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR0->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
1012 iReg++;
1013 }
1014
1015 /* Vector state. */
1016 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1017 {
1018 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1019 pInput->Elements[iReg].Name = HvX64RegisterXmm0;
1020 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo;
1021 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi;
1022 iReg++;
1023 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1024 pInput->Elements[iReg].Name = HvX64RegisterXmm1;
1025 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo;
1026 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi;
1027 iReg++;
1028 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1029 pInput->Elements[iReg].Name = HvX64RegisterXmm2;
1030 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo;
1031 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi;
1032 iReg++;
1033 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1034 pInput->Elements[iReg].Name = HvX64RegisterXmm3;
1035 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo;
1036 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi;
1037 iReg++;
1038 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1039 pInput->Elements[iReg].Name = HvX64RegisterXmm4;
1040 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo;
1041 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi;
1042 iReg++;
1043 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1044 pInput->Elements[iReg].Name = HvX64RegisterXmm5;
1045 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo;
1046 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi;
1047 iReg++;
1048 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1049 pInput->Elements[iReg].Name = HvX64RegisterXmm6;
1050 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo;
1051 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi;
1052 iReg++;
1053 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1054 pInput->Elements[iReg].Name = HvX64RegisterXmm7;
1055 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo;
1056 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi;
1057 iReg++;
1058 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1059 pInput->Elements[iReg].Name = HvX64RegisterXmm8;
1060 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo;
1061 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi;
1062 iReg++;
1063 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1064 pInput->Elements[iReg].Name = HvX64RegisterXmm9;
1065 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo;
1066 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi;
1067 iReg++;
1068 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1069 pInput->Elements[iReg].Name = HvX64RegisterXmm10;
1070 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo;
1071 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi;
1072 iReg++;
1073 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1074 pInput->Elements[iReg].Name = HvX64RegisterXmm11;
1075 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo;
1076 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi;
1077 iReg++;
1078 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1079 pInput->Elements[iReg].Name = HvX64RegisterXmm12;
1080 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo;
1081 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi;
1082 iReg++;
1083 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1084 pInput->Elements[iReg].Name = HvX64RegisterXmm13;
1085 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo;
1086 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi;
1087 iReg++;
1088 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1089 pInput->Elements[iReg].Name = HvX64RegisterXmm14;
1090 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo;
1091 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi;
1092 iReg++;
1093 HV_REGISTER_ASSOC_ZERO_PADDING(&pInput->Elements[iReg]);
1094 pInput->Elements[iReg].Name = HvX64RegisterXmm15;
1095 pInput->Elements[iReg].Value.Reg128.Low64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo;
1096 pInput->Elements[iReg].Value.Reg128.High64 = pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi;
1097 iReg++;
1098 }
1099
1100 /* MSRs */
1101 // HvX64RegisterTsc - don't touch
1102 if (fWhat & CPUMCTX_EXTRN_EFER)
1103 {
1104 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1105 pInput->Elements[iReg].Name = HvX64RegisterEfer;
1106 pInput->Elements[iReg].Value.Reg64 = pCtx->msrEFER;
1107 iReg++;
1108 }
1109 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1110 {
1111 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1112 pInput->Elements[iReg].Name = HvX64RegisterKernelGsBase;
1113 pInput->Elements[iReg].Value.Reg64 = pCtx->msrKERNELGSBASE;
1114 iReg++;
1115 }
1116 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1117 {
1118 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1119 pInput->Elements[iReg].Name = HvX64RegisterSysenterCs;
1120 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.cs;
1121 iReg++;
1122 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1123 pInput->Elements[iReg].Name = HvX64RegisterSysenterEip;
1124 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.eip;
1125 iReg++;
1126 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1127 pInput->Elements[iReg].Name = HvX64RegisterSysenterEsp;
1128 pInput->Elements[iReg].Value.Reg64 = pCtx->SysEnter.esp;
1129 iReg++;
1130 }
1131 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1132 {
1133 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1134 pInput->Elements[iReg].Name = HvX64RegisterStar;
1135 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSTAR;
1136 iReg++;
1137 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1138 pInput->Elements[iReg].Name = HvX64RegisterLstar;
1139 pInput->Elements[iReg].Value.Reg64 = pCtx->msrLSTAR;
1140 iReg++;
1141 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1142 pInput->Elements[iReg].Name = HvX64RegisterCstar;
1143 pInput->Elements[iReg].Value.Reg64 = pCtx->msrCSTAR;
1144 iReg++;
1145 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1146 pInput->Elements[iReg].Name = HvX64RegisterSfmask;
1147 pInput->Elements[iReg].Value.Reg64 = pCtx->msrSFMASK;
1148 iReg++;
1149 }
1150 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1151 {
1152 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1153 pInput->Elements[iReg].Name = HvX64RegisterApicBase;
1154 pInput->Elements[iReg].Value.Reg64 = APICGetBaseMsrNoCheck(pVCpu);
1155 iReg++;
1156 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1157 pInput->Elements[iReg].Name = HvX64RegisterPat;
1158 pInput->Elements[iReg].Value.Reg64 = pCtx->msrPAT;
1159 iReg++;
1160# if 0 /** @todo HvX64RegisterMtrrCap is read only? Seems it's not even readable. */
1161 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1162 pInput->Elements[iReg].Name = HvX64RegisterMtrrCap;
1163 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32MtrrCap(pVCpu);
1164 iReg++;
1165# endif
1166
1167 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1168
1169 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1170 pInput->Elements[iReg].Name = HvX64RegisterMtrrDefType;
1171 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrDefType;
1172 iReg++;
1173
1174 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
1175
1176 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1177 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix64k00000;
1178 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix64K_00000;
1179 iReg++;
1180 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1181 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16k80000;
1182 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_80000;
1183 iReg++;
1184 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1185 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix16kA0000;
1186 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix16K_A0000;
1187 iReg++;
1188 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1189 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC0000;
1190 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C0000;
1191 iReg++;
1192 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1193 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kC8000;
1194 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_C8000;
1195 iReg++;
1196 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1197 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD0000;
1198 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D0000;
1199 iReg++;
1200 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1201 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kD8000;
1202 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_D8000;
1203 iReg++;
1204 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1205 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE0000;
1206 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E0000;
1207 iReg++;
1208 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1209 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kE8000;
1210 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_E8000;
1211 iReg++;
1212 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1213 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF0000;
1214 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F0000;
1215 iReg++;
1216 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1217 pInput->Elements[iReg].Name = HvX64RegisterMtrrFix4kF8000;
1218 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MtrrFix4K_F8000;
1219 iReg++;
1220 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1221 pInput->Elements[iReg].Name = HvX64RegisterTscAux;
1222 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.TscAux;
1223 iReg++;
1224
1225# if 0 /** @todo Why can't we write these on Intel systems? Not that we really care... */
1226 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1227 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1228 {
1229 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1230 pInput->Elements[iReg].Name = HvX64RegisterIa32MiscEnable;
1231 pInput->Elements[iReg].Value.Reg64 = pCtxMsrs->msr.MiscEnable;
1232 iReg++;
1233 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1234 pInput->Elements[iReg].Name = HvX64RegisterIa32FeatureControl;
1235 pInput->Elements[iReg].Value.Reg64 = CPUMGetGuestIa32FeatureControl(pVCpu);
1236 iReg++;
1237 }
1238# endif
1239 }
1240
1241 /* event injection (clear it). */
1242 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
1243 {
1244 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1245 pInput->Elements[iReg].Name = HvRegisterPendingInterruption;
1246 pInput->Elements[iReg].Value.Reg64 = 0;
1247 iReg++;
1248 }
1249
1250 /* Interruptibility state. This can get a little complicated since we get
1251 half of the state via HV_X64_VP_EXECUTION_STATE. */
1252 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1253 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
1254 {
1255 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1256 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1257 pInput->Elements[iReg].Value.Reg64 = 0;
1258 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1259 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1260 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1261 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1262 pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1263 iReg++;
1264 }
1265 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
1266 {
1267 if ( pVCpu->nem.s.fLastInterruptShadow
1268 || ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1269 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
1270 {
1271 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1272 pInput->Elements[iReg].Name = HvRegisterInterruptState;
1273 pInput->Elements[iReg].Value.Reg64 = 0;
1274 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1275 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
1276 pInput->Elements[iReg].Value.InterruptState.InterruptShadow = 1;
1277 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
1278 //if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1279 // pInput->Elements[iReg].Value.InterruptState.NmiMasked = 1;
1280 iReg++;
1281 }
1282 }
1283 else
1284 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
1285
1286 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
1287 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
1288 if ( fDesiredIntWin
1289 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
1290 {
1291 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
1292 HV_REGISTER_ASSOC_ZERO_PADDING_AND_HI64(&pInput->Elements[iReg]);
1293 pInput->Elements[iReg].Name = HvX64RegisterDeliverabilityNotifications;
1294 pInput->Elements[iReg].Value.DeliverabilityNotifications.AsUINT64 = fDesiredIntWin;
1295 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
1296 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
1297 Assert(pInput->Elements[iReg].Value.DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
1298 iReg++;
1299 }
1300
1301 /// @todo HvRegisterPendingEvent0
1302 /// @todo HvRegisterPendingEvent1
1303
1304 /*
1305 * Set the registers.
1306 */
1307 Assert((uintptr_t)&pInput->Elements[iReg] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* max is 127 */
1308
1309 /*
1310 * Make the hypercall.
1311 */
1312 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, iReg),
1313 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /*GCPhysOutput*/);
1314 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(iReg),
1315 ("uResult=%RX64 iRegs=%#x\n", uResult, iReg),
1316 VERR_NEM_SET_REGISTERS_FAILED);
1317 //LogFlow(("nemR0WinExportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtrn=%#018RX64 -> %#018RX64\n", uResult, iReg, fWhat, pCtx->fExtrn,
1318 // pCtx->fExtrn | CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM ));
1319 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
1320 return VINF_SUCCESS;
1321}
1322#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1323
1324
1325/**
1326 * Export the state to the native API (out of CPUMCTX).
1327 *
1328 * @returns VBox status code
1329 * @param pGVM The ring-0 VM handle.
1330 * @param pVM The cross context VM handle.
1331 * @param idCpu The calling EMT. Necessary for getting the
1332 * hypercall page and arguments.
1333 */
1334VMMR0_INT_DECL(int) NEMR0ExportState(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
1335{
1336#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1337 /*
1338 * Validate the call.
1339 */
1340 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
1341 if (RT_SUCCESS(rc))
1342 {
1343 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
1344 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
1345 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1346
1347 /*
1348 * Call worker.
1349 */
1350 rc = nemR0WinExportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx);
1351 }
1352 return rc;
1353#else
1354 RT_NOREF(pGVM, pVM, idCpu);
1355 return VERR_NOT_IMPLEMENTED;
1356#endif
1357}
1358
1359
1360#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
1361/**
1362 * Worker for NEMR0ImportState.
1363 *
1364 * Intention is to use it internally later.
1365 *
1366 * @returns VBox status code.
1367 * @param pGVM The ring-0 VM handle.
1368 * @param pGVCpu The ring-0 VCPU handle.
1369 * @param pCtx The CPU context structure to import into.
1370 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1371 * @param fCanUpdateCr3 Whether it's safe to update CR3 or not.
1372 */
1373NEM_TMPL_STATIC int nemR0WinImportState(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, bool fCanUpdateCr3)
1374{
1375 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
1376 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
1377 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
1378#ifdef VBOX_BUGREF_9217
1379 Assert(pCtx == &pGVCpu->cpum.GstCtx);
1380#else
1381 Assert(pCtx == &pGVCpu->pVCpu->cpum.GstCtx);
1382#endif
1383
1384 fWhat &= pCtx->fExtrn;
1385
1386 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
1387 pInput->VpIndex = pGVCpu->idCpu;
1388 pInput->fFlags = 0;
1389
1390 /* GPRs */
1391 uintptr_t iReg = 0;
1392 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1393 {
1394 if (fWhat & CPUMCTX_EXTRN_RAX)
1395 pInput->Names[iReg++] = HvX64RegisterRax;
1396 if (fWhat & CPUMCTX_EXTRN_RCX)
1397 pInput->Names[iReg++] = HvX64RegisterRcx;
1398 if (fWhat & CPUMCTX_EXTRN_RDX)
1399 pInput->Names[iReg++] = HvX64RegisterRdx;
1400 if (fWhat & CPUMCTX_EXTRN_RBX)
1401 pInput->Names[iReg++] = HvX64RegisterRbx;
1402 if (fWhat & CPUMCTX_EXTRN_RSP)
1403 pInput->Names[iReg++] = HvX64RegisterRsp;
1404 if (fWhat & CPUMCTX_EXTRN_RBP)
1405 pInput->Names[iReg++] = HvX64RegisterRbp;
1406 if (fWhat & CPUMCTX_EXTRN_RSI)
1407 pInput->Names[iReg++] = HvX64RegisterRsi;
1408 if (fWhat & CPUMCTX_EXTRN_RDI)
1409 pInput->Names[iReg++] = HvX64RegisterRdi;
1410 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1411 {
1412 pInput->Names[iReg++] = HvX64RegisterR8;
1413 pInput->Names[iReg++] = HvX64RegisterR9;
1414 pInput->Names[iReg++] = HvX64RegisterR10;
1415 pInput->Names[iReg++] = HvX64RegisterR11;
1416 pInput->Names[iReg++] = HvX64RegisterR12;
1417 pInput->Names[iReg++] = HvX64RegisterR13;
1418 pInput->Names[iReg++] = HvX64RegisterR14;
1419 pInput->Names[iReg++] = HvX64RegisterR15;
1420 }
1421 }
1422
1423 /* RIP & Flags */
1424 if (fWhat & CPUMCTX_EXTRN_RIP)
1425 pInput->Names[iReg++] = HvX64RegisterRip;
1426 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1427 pInput->Names[iReg++] = HvX64RegisterRflags;
1428
1429 /* Segments */
1430 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1431 {
1432 if (fWhat & CPUMCTX_EXTRN_CS)
1433 pInput->Names[iReg++] = HvX64RegisterCs;
1434 if (fWhat & CPUMCTX_EXTRN_ES)
1435 pInput->Names[iReg++] = HvX64RegisterEs;
1436 if (fWhat & CPUMCTX_EXTRN_SS)
1437 pInput->Names[iReg++] = HvX64RegisterSs;
1438 if (fWhat & CPUMCTX_EXTRN_DS)
1439 pInput->Names[iReg++] = HvX64RegisterDs;
1440 if (fWhat & CPUMCTX_EXTRN_FS)
1441 pInput->Names[iReg++] = HvX64RegisterFs;
1442 if (fWhat & CPUMCTX_EXTRN_GS)
1443 pInput->Names[iReg++] = HvX64RegisterGs;
1444 }
1445
1446 /* Descriptor tables and the task segment. */
1447 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1448 {
1449 if (fWhat & CPUMCTX_EXTRN_LDTR)
1450 pInput->Names[iReg++] = HvX64RegisterLdtr;
1451 if (fWhat & CPUMCTX_EXTRN_TR)
1452 pInput->Names[iReg++] = HvX64RegisterTr;
1453 if (fWhat & CPUMCTX_EXTRN_IDTR)
1454 pInput->Names[iReg++] = HvX64RegisterIdtr;
1455 if (fWhat & CPUMCTX_EXTRN_GDTR)
1456 pInput->Names[iReg++] = HvX64RegisterGdtr;
1457 }
1458
1459 /* Control registers. */
1460 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1461 {
1462 if (fWhat & CPUMCTX_EXTRN_CR0)
1463 pInput->Names[iReg++] = HvX64RegisterCr0;
1464 if (fWhat & CPUMCTX_EXTRN_CR2)
1465 pInput->Names[iReg++] = HvX64RegisterCr2;
1466 if (fWhat & CPUMCTX_EXTRN_CR3)
1467 pInput->Names[iReg++] = HvX64RegisterCr3;
1468 if (fWhat & CPUMCTX_EXTRN_CR4)
1469 pInput->Names[iReg++] = HvX64RegisterCr4;
1470 }
1471 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1472 pInput->Names[iReg++] = HvX64RegisterCr8;
1473
1474 /* Debug registers. */
1475 if (fWhat & CPUMCTX_EXTRN_DR7)
1476 pInput->Names[iReg++] = HvX64RegisterDr7;
1477 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1478 {
1479 if (!(fWhat & CPUMCTX_EXTRN_DR7) && (pCtx->fExtrn & CPUMCTX_EXTRN_DR7))
1480 {
1481 fWhat |= CPUMCTX_EXTRN_DR7;
1482 pInput->Names[iReg++] = HvX64RegisterDr7;
1483 }
1484 pInput->Names[iReg++] = HvX64RegisterDr0;
1485 pInput->Names[iReg++] = HvX64RegisterDr1;
1486 pInput->Names[iReg++] = HvX64RegisterDr2;
1487 pInput->Names[iReg++] = HvX64RegisterDr3;
1488 }
1489 if (fWhat & CPUMCTX_EXTRN_DR6)
1490 pInput->Names[iReg++] = HvX64RegisterDr6;
1491
1492 /* Floating point state. */
1493 if (fWhat & CPUMCTX_EXTRN_X87)
1494 {
1495 pInput->Names[iReg++] = HvX64RegisterFpMmx0;
1496 pInput->Names[iReg++] = HvX64RegisterFpMmx1;
1497 pInput->Names[iReg++] = HvX64RegisterFpMmx2;
1498 pInput->Names[iReg++] = HvX64RegisterFpMmx3;
1499 pInput->Names[iReg++] = HvX64RegisterFpMmx4;
1500 pInput->Names[iReg++] = HvX64RegisterFpMmx5;
1501 pInput->Names[iReg++] = HvX64RegisterFpMmx6;
1502 pInput->Names[iReg++] = HvX64RegisterFpMmx7;
1503 pInput->Names[iReg++] = HvX64RegisterFpControlStatus;
1504 }
1505 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1506 pInput->Names[iReg++] = HvX64RegisterXmmControlStatus;
1507
1508 /* Vector state. */
1509 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1510 {
1511 pInput->Names[iReg++] = HvX64RegisterXmm0;
1512 pInput->Names[iReg++] = HvX64RegisterXmm1;
1513 pInput->Names[iReg++] = HvX64RegisterXmm2;
1514 pInput->Names[iReg++] = HvX64RegisterXmm3;
1515 pInput->Names[iReg++] = HvX64RegisterXmm4;
1516 pInput->Names[iReg++] = HvX64RegisterXmm5;
1517 pInput->Names[iReg++] = HvX64RegisterXmm6;
1518 pInput->Names[iReg++] = HvX64RegisterXmm7;
1519 pInput->Names[iReg++] = HvX64RegisterXmm8;
1520 pInput->Names[iReg++] = HvX64RegisterXmm9;
1521 pInput->Names[iReg++] = HvX64RegisterXmm10;
1522 pInput->Names[iReg++] = HvX64RegisterXmm11;
1523 pInput->Names[iReg++] = HvX64RegisterXmm12;
1524 pInput->Names[iReg++] = HvX64RegisterXmm13;
1525 pInput->Names[iReg++] = HvX64RegisterXmm14;
1526 pInput->Names[iReg++] = HvX64RegisterXmm15;
1527 }
1528
1529 /* MSRs */
1530 // HvX64RegisterTsc - don't touch
1531 if (fWhat & CPUMCTX_EXTRN_EFER)
1532 pInput->Names[iReg++] = HvX64RegisterEfer;
1533 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1534 pInput->Names[iReg++] = HvX64RegisterKernelGsBase;
1535 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1536 {
1537 pInput->Names[iReg++] = HvX64RegisterSysenterCs;
1538 pInput->Names[iReg++] = HvX64RegisterSysenterEip;
1539 pInput->Names[iReg++] = HvX64RegisterSysenterEsp;
1540 }
1541 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1542 {
1543 pInput->Names[iReg++] = HvX64RegisterStar;
1544 pInput->Names[iReg++] = HvX64RegisterLstar;
1545 pInput->Names[iReg++] = HvX64RegisterCstar;
1546 pInput->Names[iReg++] = HvX64RegisterSfmask;
1547 }
1548
1549# ifdef LOG_ENABLED
1550# ifdef VBOX_BUGREF_9217
1551 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM);
1552# else
1553 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
1554# endif
1555# endif
1556 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1557 {
1558 pInput->Names[iReg++] = HvX64RegisterApicBase; /// @todo APIC BASE
1559 pInput->Names[iReg++] = HvX64RegisterPat;
1560# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
1561 pInput->Names[iReg++] = HvX64RegisterMtrrCap;
1562# endif
1563 pInput->Names[iReg++] = HvX64RegisterMtrrDefType;
1564 pInput->Names[iReg++] = HvX64RegisterMtrrFix64k00000;
1565 pInput->Names[iReg++] = HvX64RegisterMtrrFix16k80000;
1566 pInput->Names[iReg++] = HvX64RegisterMtrrFix16kA0000;
1567 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC0000;
1568 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kC8000;
1569 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD0000;
1570 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kD8000;
1571 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE0000;
1572 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kE8000;
1573 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF0000;
1574 pInput->Names[iReg++] = HvX64RegisterMtrrFix4kF8000;
1575 pInput->Names[iReg++] = HvX64RegisterTscAux;
1576# if 0 /** @todo why can't we read HvX64RegisterIa32MiscEnable? */
1577 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1578 pInput->Names[iReg++] = HvX64RegisterIa32MiscEnable;
1579# endif
1580# ifdef LOG_ENABLED
1581 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
1582 pInput->Names[iReg++] = HvX64RegisterIa32FeatureControl;
1583# endif
1584 }
1585
1586 /* Interruptibility. */
1587 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1588 {
1589 pInput->Names[iReg++] = HvRegisterInterruptState;
1590 pInput->Names[iReg++] = HvX64RegisterRip;
1591 }
1592
1593 /* event injection */
1594 pInput->Names[iReg++] = HvRegisterPendingInterruption;
1595 pInput->Names[iReg++] = HvRegisterPendingEvent0;
1596 pInput->Names[iReg++] = HvRegisterPendingEvent1;
1597 size_t const cRegs = iReg;
1598 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF_DYN(HV_INPUT_GET_VP_REGISTERS, Names[cRegs]), 32);
1599
1600 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
1601 Assert((uintptr_t)&paValues[cRegs] - (uintptr_t)pGVCpu->nemr0.s.HypercallData.pbPage < PAGE_SIZE); /* (max is around 168 registers) */
1602 RT_BZERO(paValues, cRegs * sizeof(paValues[0]));
1603
1604 /*
1605 * Make the hypercall.
1606 */
1607 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, cRegs),
1608 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
1609 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
1610 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(cRegs),
1611 ("uResult=%RX64 cRegs=%#x\n", uResult, cRegs),
1612 VERR_NEM_GET_REGISTERS_FAILED);
1613 //LogFlow(("nemR0WinImportState: uResult=%#RX64 iReg=%zu fWhat=%#018RX64 fExtr=%#018RX64\n", uResult, cRegs, fWhat, pCtx->fExtrn));
1614
1615 /*
1616 * Copy information to the CPUM context.
1617 */
1618# ifdef VBOX_BUGREF_9217
1619 PVMCPUCC pVCpu = pGVCpu;
1620# else
1621 PVMCPUCC pVCpu = &pGVM->pVM->aCpus[pGVCpu->idCpu];
1622# endif
1623 iReg = 0;
1624
1625 /* GPRs */
1626 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
1627 {
1628 if (fWhat & CPUMCTX_EXTRN_RAX)
1629 {
1630 Assert(pInput->Names[iReg] == HvX64RegisterRax);
1631 pCtx->rax = paValues[iReg++].Reg64;
1632 }
1633 if (fWhat & CPUMCTX_EXTRN_RCX)
1634 {
1635 Assert(pInput->Names[iReg] == HvX64RegisterRcx);
1636 pCtx->rcx = paValues[iReg++].Reg64;
1637 }
1638 if (fWhat & CPUMCTX_EXTRN_RDX)
1639 {
1640 Assert(pInput->Names[iReg] == HvX64RegisterRdx);
1641 pCtx->rdx = paValues[iReg++].Reg64;
1642 }
1643 if (fWhat & CPUMCTX_EXTRN_RBX)
1644 {
1645 Assert(pInput->Names[iReg] == HvX64RegisterRbx);
1646 pCtx->rbx = paValues[iReg++].Reg64;
1647 }
1648 if (fWhat & CPUMCTX_EXTRN_RSP)
1649 {
1650 Assert(pInput->Names[iReg] == HvX64RegisterRsp);
1651 pCtx->rsp = paValues[iReg++].Reg64;
1652 }
1653 if (fWhat & CPUMCTX_EXTRN_RBP)
1654 {
1655 Assert(pInput->Names[iReg] == HvX64RegisterRbp);
1656 pCtx->rbp = paValues[iReg++].Reg64;
1657 }
1658 if (fWhat & CPUMCTX_EXTRN_RSI)
1659 {
1660 Assert(pInput->Names[iReg] == HvX64RegisterRsi);
1661 pCtx->rsi = paValues[iReg++].Reg64;
1662 }
1663 if (fWhat & CPUMCTX_EXTRN_RDI)
1664 {
1665 Assert(pInput->Names[iReg] == HvX64RegisterRdi);
1666 pCtx->rdi = paValues[iReg++].Reg64;
1667 }
1668 if (fWhat & CPUMCTX_EXTRN_R8_R15)
1669 {
1670 Assert(pInput->Names[iReg] == HvX64RegisterR8);
1671 Assert(pInput->Names[iReg + 7] == HvX64RegisterR15);
1672 pCtx->r8 = paValues[iReg++].Reg64;
1673 pCtx->r9 = paValues[iReg++].Reg64;
1674 pCtx->r10 = paValues[iReg++].Reg64;
1675 pCtx->r11 = paValues[iReg++].Reg64;
1676 pCtx->r12 = paValues[iReg++].Reg64;
1677 pCtx->r13 = paValues[iReg++].Reg64;
1678 pCtx->r14 = paValues[iReg++].Reg64;
1679 pCtx->r15 = paValues[iReg++].Reg64;
1680 }
1681 }
1682
1683 /* RIP & Flags */
1684 if (fWhat & CPUMCTX_EXTRN_RIP)
1685 {
1686 Assert(pInput->Names[iReg] == HvX64RegisterRip);
1687 pCtx->rip = paValues[iReg++].Reg64;
1688 }
1689 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
1690 {
1691 Assert(pInput->Names[iReg] == HvX64RegisterRflags);
1692 pCtx->rflags.u = paValues[iReg++].Reg64;
1693 }
1694
1695 /* Segments */
1696# define COPY_BACK_SEG(a_idx, a_enmName, a_SReg) \
1697 do { \
1698 Assert(pInput->Names[a_idx] == a_enmName); \
1699 (a_SReg).u64Base = paValues[a_idx].Segment.Base; \
1700 (a_SReg).u32Limit = paValues[a_idx].Segment.Limit; \
1701 (a_SReg).ValidSel = (a_SReg).Sel = paValues[a_idx].Segment.Selector; \
1702 (a_SReg).Attr.u = paValues[a_idx].Segment.Attributes; \
1703 (a_SReg).fFlags = CPUMSELREG_FLAGS_VALID; \
1704 } while (0)
1705 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
1706 {
1707 if (fWhat & CPUMCTX_EXTRN_CS)
1708 {
1709 COPY_BACK_SEG(iReg, HvX64RegisterCs, pCtx->cs);
1710 iReg++;
1711 }
1712 if (fWhat & CPUMCTX_EXTRN_ES)
1713 {
1714 COPY_BACK_SEG(iReg, HvX64RegisterEs, pCtx->es);
1715 iReg++;
1716 }
1717 if (fWhat & CPUMCTX_EXTRN_SS)
1718 {
1719 COPY_BACK_SEG(iReg, HvX64RegisterSs, pCtx->ss);
1720 iReg++;
1721 }
1722 if (fWhat & CPUMCTX_EXTRN_DS)
1723 {
1724 COPY_BACK_SEG(iReg, HvX64RegisterDs, pCtx->ds);
1725 iReg++;
1726 }
1727 if (fWhat & CPUMCTX_EXTRN_FS)
1728 {
1729 COPY_BACK_SEG(iReg, HvX64RegisterFs, pCtx->fs);
1730 iReg++;
1731 }
1732 if (fWhat & CPUMCTX_EXTRN_GS)
1733 {
1734 COPY_BACK_SEG(iReg, HvX64RegisterGs, pCtx->gs);
1735 iReg++;
1736 }
1737 }
1738 /* Descriptor tables and the task segment. */
1739 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
1740 {
1741 if (fWhat & CPUMCTX_EXTRN_LDTR)
1742 {
1743 COPY_BACK_SEG(iReg, HvX64RegisterLdtr, pCtx->ldtr);
1744 iReg++;
1745 }
1746 if (fWhat & CPUMCTX_EXTRN_TR)
1747 {
1748 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
1749 avoid to trigger sanity assertions around the code, always fix this. */
1750 COPY_BACK_SEG(iReg, HvX64RegisterTr, pCtx->tr);
1751 switch (pCtx->tr.Attr.n.u4Type)
1752 {
1753 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1754 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1755 break;
1756 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
1757 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1758 break;
1759 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
1760 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
1761 break;
1762 }
1763 iReg++;
1764 }
1765 if (fWhat & CPUMCTX_EXTRN_IDTR)
1766 {
1767 Assert(pInput->Names[iReg] == HvX64RegisterIdtr);
1768 pCtx->idtr.cbIdt = paValues[iReg].Table.Limit;
1769 pCtx->idtr.pIdt = paValues[iReg].Table.Base;
1770 iReg++;
1771 }
1772 if (fWhat & CPUMCTX_EXTRN_GDTR)
1773 {
1774 Assert(pInput->Names[iReg] == HvX64RegisterGdtr);
1775 pCtx->gdtr.cbGdt = paValues[iReg].Table.Limit;
1776 pCtx->gdtr.pGdt = paValues[iReg].Table.Base;
1777 iReg++;
1778 }
1779 }
1780
1781 /* Control registers. */
1782 bool fMaybeChangedMode = false;
1783 bool fUpdateCr3 = false;
1784 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
1785 {
1786 if (fWhat & CPUMCTX_EXTRN_CR0)
1787 {
1788 Assert(pInput->Names[iReg] == HvX64RegisterCr0);
1789 if (pCtx->cr0 != paValues[iReg].Reg64)
1790 {
1791 CPUMSetGuestCR0(pVCpu, paValues[iReg].Reg64);
1792 fMaybeChangedMode = true;
1793 }
1794 iReg++;
1795 }
1796 if (fWhat & CPUMCTX_EXTRN_CR2)
1797 {
1798 Assert(pInput->Names[iReg] == HvX64RegisterCr2);
1799 pCtx->cr2 = paValues[iReg].Reg64;
1800 iReg++;
1801 }
1802 if (fWhat & CPUMCTX_EXTRN_CR3)
1803 {
1804 Assert(pInput->Names[iReg] == HvX64RegisterCr3);
1805 if (pCtx->cr3 != paValues[iReg].Reg64)
1806 {
1807 CPUMSetGuestCR3(pVCpu, paValues[iReg].Reg64);
1808 fUpdateCr3 = true;
1809 }
1810 iReg++;
1811 }
1812 if (fWhat & CPUMCTX_EXTRN_CR4)
1813 {
1814 Assert(pInput->Names[iReg] == HvX64RegisterCr4);
1815 if (pCtx->cr4 != paValues[iReg].Reg64)
1816 {
1817 CPUMSetGuestCR4(pVCpu, paValues[iReg].Reg64);
1818 fMaybeChangedMode = true;
1819 }
1820 iReg++;
1821 }
1822 }
1823 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1824 {
1825 Assert(pInput->Names[iReg] == HvX64RegisterCr8);
1826 APICSetTpr(pVCpu, (uint8_t)paValues[iReg].Reg64 << 4);
1827 iReg++;
1828 }
1829
1830 /* Debug registers. */
1831 if (fWhat & CPUMCTX_EXTRN_DR7)
1832 {
1833 Assert(pInput->Names[iReg] == HvX64RegisterDr7);
1834 if (pCtx->dr[7] != paValues[iReg].Reg64)
1835 CPUMSetGuestDR7(pVCpu, paValues[iReg].Reg64);
1836 pCtx->fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1837 iReg++;
1838 }
1839 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1840 {
1841 Assert(pInput->Names[iReg] == HvX64RegisterDr0);
1842 Assert(pInput->Names[iReg+3] == HvX64RegisterDr3);
1843 if (pCtx->dr[0] != paValues[iReg].Reg64)
1844 CPUMSetGuestDR0(pVCpu, paValues[iReg].Reg64);
1845 iReg++;
1846 if (pCtx->dr[1] != paValues[iReg].Reg64)
1847 CPUMSetGuestDR1(pVCpu, paValues[iReg].Reg64);
1848 iReg++;
1849 if (pCtx->dr[2] != paValues[iReg].Reg64)
1850 CPUMSetGuestDR2(pVCpu, paValues[iReg].Reg64);
1851 iReg++;
1852 if (pCtx->dr[3] != paValues[iReg].Reg64)
1853 CPUMSetGuestDR3(pVCpu, paValues[iReg].Reg64);
1854 iReg++;
1855 }
1856 if (fWhat & CPUMCTX_EXTRN_DR6)
1857 {
1858 Assert(pInput->Names[iReg] == HvX64RegisterDr6);
1859 if (pCtx->dr[6] != paValues[iReg].Reg64)
1860 CPUMSetGuestDR6(pVCpu, paValues[iReg].Reg64);
1861 iReg++;
1862 }
1863
1864 /* Floating point state. */
1865 if (fWhat & CPUMCTX_EXTRN_X87)
1866 {
1867 Assert(pInput->Names[iReg] == HvX64RegisterFpMmx0);
1868 Assert(pInput->Names[iReg + 7] == HvX64RegisterFpMmx7);
1869 pCtx->pXStateR0->x87.aRegs[0].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1870 pCtx->pXStateR0->x87.aRegs[0].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1871 iReg++;
1872 pCtx->pXStateR0->x87.aRegs[1].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1873 pCtx->pXStateR0->x87.aRegs[1].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1874 iReg++;
1875 pCtx->pXStateR0->x87.aRegs[2].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1876 pCtx->pXStateR0->x87.aRegs[2].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1877 iReg++;
1878 pCtx->pXStateR0->x87.aRegs[3].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1879 pCtx->pXStateR0->x87.aRegs[3].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1880 iReg++;
1881 pCtx->pXStateR0->x87.aRegs[4].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1882 pCtx->pXStateR0->x87.aRegs[4].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1883 iReg++;
1884 pCtx->pXStateR0->x87.aRegs[5].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1885 pCtx->pXStateR0->x87.aRegs[5].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1886 iReg++;
1887 pCtx->pXStateR0->x87.aRegs[6].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1888 pCtx->pXStateR0->x87.aRegs[6].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1889 iReg++;
1890 pCtx->pXStateR0->x87.aRegs[7].au64[0] = paValues[iReg].Fp.AsUINT128.Low64;
1891 pCtx->pXStateR0->x87.aRegs[7].au64[1] = paValues[iReg].Fp.AsUINT128.High64;
1892 iReg++;
1893
1894 Assert(pInput->Names[iReg] == HvX64RegisterFpControlStatus);
1895 pCtx->pXStateR0->x87.FCW = paValues[iReg].FpControlStatus.FpControl;
1896 pCtx->pXStateR0->x87.FSW = paValues[iReg].FpControlStatus.FpStatus;
1897 pCtx->pXStateR0->x87.FTW = paValues[iReg].FpControlStatus.FpTag
1898 /*| (paValues[iReg].FpControlStatus.Reserved << 8)*/;
1899 pCtx->pXStateR0->x87.FOP = paValues[iReg].FpControlStatus.LastFpOp;
1900 pCtx->pXStateR0->x87.FPUIP = (uint32_t)paValues[iReg].FpControlStatus.LastFpRip;
1901 pCtx->pXStateR0->x87.CS = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 32);
1902 pCtx->pXStateR0->x87.Rsrvd1 = (uint16_t)(paValues[iReg].FpControlStatus.LastFpRip >> 48);
1903 iReg++;
1904 }
1905
1906 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1907 {
1908 Assert(pInput->Names[iReg] == HvX64RegisterXmmControlStatus);
1909 if (fWhat & CPUMCTX_EXTRN_X87)
1910 {
1911 pCtx->pXStateR0->x87.FPUDP = (uint32_t)paValues[iReg].XmmControlStatus.LastFpRdp;
1912 pCtx->pXStateR0->x87.DS = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 32);
1913 pCtx->pXStateR0->x87.Rsrvd2 = (uint16_t)(paValues[iReg].XmmControlStatus.LastFpRdp >> 48);
1914 }
1915 pCtx->pXStateR0->x87.MXCSR = paValues[iReg].XmmControlStatus.XmmStatusControl;
1916 pCtx->pXStateR0->x87.MXCSR_MASK = paValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
1917 iReg++;
1918 }
1919
1920 /* Vector state. */
1921 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
1922 {
1923 Assert(pInput->Names[iReg] == HvX64RegisterXmm0);
1924 Assert(pInput->Names[iReg+15] == HvX64RegisterXmm15);
1925 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1926 pCtx->pXStateR0->x87.aXMM[0].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1927 iReg++;
1928 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1929 pCtx->pXStateR0->x87.aXMM[1].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1930 iReg++;
1931 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1932 pCtx->pXStateR0->x87.aXMM[2].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1933 iReg++;
1934 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1935 pCtx->pXStateR0->x87.aXMM[3].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1936 iReg++;
1937 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1938 pCtx->pXStateR0->x87.aXMM[4].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1939 iReg++;
1940 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1941 pCtx->pXStateR0->x87.aXMM[5].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1942 iReg++;
1943 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1944 pCtx->pXStateR0->x87.aXMM[6].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1945 iReg++;
1946 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1947 pCtx->pXStateR0->x87.aXMM[7].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1948 iReg++;
1949 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1950 pCtx->pXStateR0->x87.aXMM[8].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1951 iReg++;
1952 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1953 pCtx->pXStateR0->x87.aXMM[9].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1954 iReg++;
1955 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1956 pCtx->pXStateR0->x87.aXMM[10].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1957 iReg++;
1958 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1959 pCtx->pXStateR0->x87.aXMM[11].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1960 iReg++;
1961 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1962 pCtx->pXStateR0->x87.aXMM[12].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1963 iReg++;
1964 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1965 pCtx->pXStateR0->x87.aXMM[13].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1966 iReg++;
1967 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1968 pCtx->pXStateR0->x87.aXMM[14].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1969 iReg++;
1970 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Lo = paValues[iReg].Reg128.Low64;
1971 pCtx->pXStateR0->x87.aXMM[15].uXmm.s.Hi = paValues[iReg].Reg128.High64;
1972 iReg++;
1973 }
1974
1975
1976 /* MSRs */
1977 // HvX64RegisterTsc - don't touch
1978 if (fWhat & CPUMCTX_EXTRN_EFER)
1979 {
1980 Assert(pInput->Names[iReg] == HvX64RegisterEfer);
1981 if (paValues[iReg].Reg64 != pCtx->msrEFER)
1982 {
1983 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, paValues[iReg].Reg64));
1984 if ((paValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
1985 PGMNotifyNxeChanged(pVCpu, RT_BOOL(paValues[iReg].Reg64 & MSR_K6_EFER_NXE));
1986 pCtx->msrEFER = paValues[iReg].Reg64;
1987 fMaybeChangedMode = true;
1988 }
1989 iReg++;
1990 }
1991 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1992 {
1993 Assert(pInput->Names[iReg] == HvX64RegisterKernelGsBase);
1994 if (pCtx->msrKERNELGSBASE != paValues[iReg].Reg64)
1995 Log7(("NEM/%u: MSR KERNELGSBASE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrKERNELGSBASE, paValues[iReg].Reg64));
1996 pCtx->msrKERNELGSBASE = paValues[iReg].Reg64;
1997 iReg++;
1998 }
1999 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
2000 {
2001 Assert(pInput->Names[iReg] == HvX64RegisterSysenterCs);
2002 if (pCtx->SysEnter.cs != paValues[iReg].Reg64)
2003 Log7(("NEM/%u: MSR SYSENTER.CS changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.cs, paValues[iReg].Reg64));
2004 pCtx->SysEnter.cs = paValues[iReg].Reg64;
2005 iReg++;
2006
2007 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEip);
2008 if (pCtx->SysEnter.eip != paValues[iReg].Reg64)
2009 Log7(("NEM/%u: MSR SYSENTER.EIP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.eip, paValues[iReg].Reg64));
2010 pCtx->SysEnter.eip = paValues[iReg].Reg64;
2011 iReg++;
2012
2013 Assert(pInput->Names[iReg] == HvX64RegisterSysenterEsp);
2014 if (pCtx->SysEnter.esp != paValues[iReg].Reg64)
2015 Log7(("NEM/%u: MSR SYSENTER.ESP changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->SysEnter.esp, paValues[iReg].Reg64));
2016 pCtx->SysEnter.esp = paValues[iReg].Reg64;
2017 iReg++;
2018 }
2019 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
2020 {
2021 Assert(pInput->Names[iReg] == HvX64RegisterStar);
2022 if (pCtx->msrSTAR != paValues[iReg].Reg64)
2023 Log7(("NEM/%u: MSR STAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSTAR, paValues[iReg].Reg64));
2024 pCtx->msrSTAR = paValues[iReg].Reg64;
2025 iReg++;
2026
2027 Assert(pInput->Names[iReg] == HvX64RegisterLstar);
2028 if (pCtx->msrLSTAR != paValues[iReg].Reg64)
2029 Log7(("NEM/%u: MSR LSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrLSTAR, paValues[iReg].Reg64));
2030 pCtx->msrLSTAR = paValues[iReg].Reg64;
2031 iReg++;
2032
2033 Assert(pInput->Names[iReg] == HvX64RegisterCstar);
2034 if (pCtx->msrCSTAR != paValues[iReg].Reg64)
2035 Log7(("NEM/%u: MSR CSTAR changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrCSTAR, paValues[iReg].Reg64));
2036 pCtx->msrCSTAR = paValues[iReg].Reg64;
2037 iReg++;
2038
2039 Assert(pInput->Names[iReg] == HvX64RegisterSfmask);
2040 if (pCtx->msrSFMASK != paValues[iReg].Reg64)
2041 Log7(("NEM/%u: MSR SFMASK changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrSFMASK, paValues[iReg].Reg64));
2042 pCtx->msrSFMASK = paValues[iReg].Reg64;
2043 iReg++;
2044 }
2045 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
2046 {
2047 Assert(pInput->Names[iReg] == HvX64RegisterApicBase);
2048 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
2049 if (paValues[iReg].Reg64 != uOldBase)
2050 {
2051 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
2052 pVCpu->idCpu, uOldBase, paValues[iReg].Reg64, paValues[iReg].Reg64 ^ uOldBase));
2053 int rc2 = APICSetBaseMsr(pVCpu, paValues[iReg].Reg64);
2054 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("rc2=%Rrc [%#RX64]\n", rc2, paValues[iReg].Reg64));
2055 }
2056 iReg++;
2057
2058 Assert(pInput->Names[iReg] == HvX64RegisterPat);
2059 if (pCtx->msrPAT != paValues[iReg].Reg64)
2060 Log7(("NEM/%u: MSR PAT changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrPAT, paValues[iReg].Reg64));
2061 pCtx->msrPAT = paValues[iReg].Reg64;
2062 iReg++;
2063
2064# if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
2065 Assert(pInput->Names[iReg] == HvX64RegisterMtrrCap);
2066 if (paValues[iReg].Reg64 != CPUMGetGuestIa32MtrrCap(pVCpu))
2067 Log7(("NEM/%u: MSR MTRR_CAP changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, CPUMGetGuestIa32MtrrCap(pVCpu), paValues[iReg].Reg64));
2068 iReg++;
2069# endif
2070
2071 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
2072 Assert(pInput->Names[iReg] == HvX64RegisterMtrrDefType);
2073 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrDefType )
2074 Log7(("NEM/%u: MSR MTRR_DEF_TYPE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrDefType, paValues[iReg].Reg64));
2075 pCtxMsrs->msr.MtrrDefType = paValues[iReg].Reg64;
2076 iReg++;
2077
2078 /** @todo we dont keep state for HvX64RegisterMtrrPhysBaseX and HvX64RegisterMtrrPhysMaskX */
2079
2080 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix64k00000);
2081 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix64K_00000 )
2082 Log7(("NEM/%u: MSR MTRR_FIX16K_00000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix64K_00000, paValues[iReg].Reg64));
2083 pCtxMsrs->msr.MtrrFix64K_00000 = paValues[iReg].Reg64;
2084 iReg++;
2085
2086 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16k80000);
2087 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_80000 )
2088 Log7(("NEM/%u: MSR MTRR_FIX16K_80000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_80000, paValues[iReg].Reg64));
2089 pCtxMsrs->msr.MtrrFix16K_80000 = paValues[iReg].Reg64;
2090 iReg++;
2091
2092 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix16kA0000);
2093 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix16K_A0000 )
2094 Log7(("NEM/%u: MSR MTRR_FIX16K_A0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix16K_A0000, paValues[iReg].Reg64));
2095 pCtxMsrs->msr.MtrrFix16K_A0000 = paValues[iReg].Reg64;
2096 iReg++;
2097
2098 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC0000);
2099 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C0000 )
2100 Log7(("NEM/%u: MSR MTRR_FIX16K_C0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C0000, paValues[iReg].Reg64));
2101 pCtxMsrs->msr.MtrrFix4K_C0000 = paValues[iReg].Reg64;
2102 iReg++;
2103
2104 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kC8000);
2105 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_C8000 )
2106 Log7(("NEM/%u: MSR MTRR_FIX16K_C8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_C8000, paValues[iReg].Reg64));
2107 pCtxMsrs->msr.MtrrFix4K_C8000 = paValues[iReg].Reg64;
2108 iReg++;
2109
2110 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD0000);
2111 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D0000 )
2112 Log7(("NEM/%u: MSR MTRR_FIX16K_D0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D0000, paValues[iReg].Reg64));
2113 pCtxMsrs->msr.MtrrFix4K_D0000 = paValues[iReg].Reg64;
2114 iReg++;
2115
2116 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kD8000);
2117 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_D8000 )
2118 Log7(("NEM/%u: MSR MTRR_FIX16K_D8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_D8000, paValues[iReg].Reg64));
2119 pCtxMsrs->msr.MtrrFix4K_D8000 = paValues[iReg].Reg64;
2120 iReg++;
2121
2122 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE0000);
2123 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E0000 )
2124 Log7(("NEM/%u: MSR MTRR_FIX16K_E0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E0000, paValues[iReg].Reg64));
2125 pCtxMsrs->msr.MtrrFix4K_E0000 = paValues[iReg].Reg64;
2126 iReg++;
2127
2128 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kE8000);
2129 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_E8000 )
2130 Log7(("NEM/%u: MSR MTRR_FIX16K_E8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_E8000, paValues[iReg].Reg64));
2131 pCtxMsrs->msr.MtrrFix4K_E8000 = paValues[iReg].Reg64;
2132 iReg++;
2133
2134 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF0000);
2135 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F0000 )
2136 Log7(("NEM/%u: MSR MTRR_FIX16K_F0000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F0000, paValues[iReg].Reg64));
2137 pCtxMsrs->msr.MtrrFix4K_F0000 = paValues[iReg].Reg64;
2138 iReg++;
2139
2140 Assert(pInput->Names[iReg] == HvX64RegisterMtrrFix4kF8000);
2141 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MtrrFix4K_F8000 )
2142 Log7(("NEM/%u: MSR MTRR_FIX16K_F8000 changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MtrrFix4K_F8000, paValues[iReg].Reg64));
2143 pCtxMsrs->msr.MtrrFix4K_F8000 = paValues[iReg].Reg64;
2144 iReg++;
2145
2146 Assert(pInput->Names[iReg] == HvX64RegisterTscAux);
2147 if (paValues[iReg].Reg64 != pCtxMsrs->msr.TscAux )
2148 Log7(("NEM/%u: MSR TSC_AUX changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.TscAux, paValues[iReg].Reg64));
2149 pCtxMsrs->msr.TscAux = paValues[iReg].Reg64;
2150 iReg++;
2151
2152# if 0 /** @todo why can't we even read HvX64RegisterIa32MiscEnable? */
2153 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2154 {
2155 Assert(pInput->Names[iReg] == HvX64RegisterIa32MiscEnable);
2156 if (paValues[iReg].Reg64 != pCtxMsrs->msr.MiscEnable)
2157 Log7(("NEM/%u: MSR MISC_ENABLE changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtxMsrs->msr.MiscEnable, paValues[iReg].Reg64));
2158 pCtxMsrs->msr.MiscEnable = paValues[iReg].Reg64;
2159 iReg++;
2160 }
2161# endif
2162# ifdef LOG_ENABLED
2163 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
2164 {
2165 Assert(pInput->Names[iReg] == HvX64RegisterIa32FeatureControl);
2166 if (paValues[iReg].Reg64 != pCtx->hwvirt.vmx.Msrs.u64FeatCtrl)
2167 Log7(("NEM/%u: MSR FEATURE_CONTROL changed %RX64 -> %RX64 (!!)\n", pVCpu->idCpu, pCtx->hwvirt.vmx.Msrs.u64FeatCtrl, paValues[iReg].Reg64));
2168 iReg++;
2169 }
2170# endif
2171 }
2172
2173 /* Interruptibility. */
2174 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2175 {
2176 Assert(pInput->Names[iReg] == HvRegisterInterruptState);
2177 Assert(pInput->Names[iReg + 1] == HvX64RegisterRip);
2178
2179 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
2180 {
2181 pVCpu->nem.s.fLastInterruptShadow = paValues[iReg].InterruptState.InterruptShadow;
2182 if (paValues[iReg].InterruptState.InterruptShadow)
2183 EMSetInhibitInterruptsPC(pVCpu, paValues[iReg + 1].Reg64);
2184 else
2185 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2186 }
2187
2188 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
2189 {
2190 if (paValues[iReg].InterruptState.NmiMasked)
2191 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2192 else
2193 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2194 }
2195
2196 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
2197 iReg += 2;
2198 }
2199
2200 /* Event injection. */
2201 /// @todo HvRegisterPendingInterruption
2202 Assert(pInput->Names[iReg] == HvRegisterPendingInterruption);
2203 if (paValues[iReg].PendingInterruption.InterruptionPending)
2204 {
2205 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
2206 paValues[iReg].PendingInterruption.InterruptionType, paValues[iReg].PendingInterruption.InterruptionVector,
2207 paValues[iReg].PendingInterruption.DeliverErrorCode, paValues[iReg].PendingInterruption.ErrorCode,
2208 paValues[iReg].PendingInterruption.InstructionLength, paValues[iReg].PendingInterruption.NestedEvent));
2209 AssertMsg((paValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
2210 ("%#RX64\n", paValues[iReg].PendingInterruption.AsUINT64));
2211 }
2212
2213 /// @todo HvRegisterPendingEvent0
2214 /// @todo HvRegisterPendingEvent1
2215
2216 /* Almost done, just update extrn flags and maybe change PGM mode. */
2217 pCtx->fExtrn &= ~fWhat;
2218 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
2219 pCtx->fExtrn = 0;
2220
2221 /* Typical. */
2222 if (!fMaybeChangedMode && !fUpdateCr3)
2223 return VINF_SUCCESS;
2224
2225 /*
2226 * Slow.
2227 */
2228 int rc = VINF_SUCCESS;
2229 if (fMaybeChangedMode)
2230 {
2231 rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2232 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
2233 }
2234
2235 if (fUpdateCr3)
2236 {
2237 if (fCanUpdateCr3)
2238 {
2239 LogFlow(("nemR0WinImportState: -> PGMUpdateCR3!\n"));
2240 rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
2241 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
2242 }
2243 else
2244 {
2245 LogFlow(("nemR0WinImportState: -> VERR_NEM_FLUSH_TLB!\n"));
2246 rc = VERR_NEM_FLUSH_TLB; /* Calling PGMFlushTLB w/o long jump setup doesn't work, ring-3 does it. */
2247 }
2248 }
2249
2250 return rc;
2251}
2252#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2253
2254
2255/**
2256 * Import the state from the native API (back to CPUMCTX).
2257 *
2258 * @returns VBox status code
2259 * @param pGVM The ring-0 VM handle.
2260 * @param pVM The cross context VM handle.
2261 * @param idCpu The calling EMT. Necessary for getting the
2262 * hypercall page and arguments.
2263 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. Set
2264 * CPUMCTX_EXTERN_ALL for everything.
2265 */
2266VMMR0_INT_DECL(int) NEMR0ImportState(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t fWhat)
2267{
2268#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2269 /*
2270 * Validate the call.
2271 */
2272 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2273 if (RT_SUCCESS(rc))
2274 {
2275 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2276 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2277 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2278
2279 /*
2280 * Call worker.
2281 */
2282 rc = nemR0WinImportState(pGVM, pGVCpu, &pVCpu->cpum.GstCtx, fWhat, false /*fCanUpdateCr3*/);
2283 }
2284 return rc;
2285#else
2286 RT_NOREF(pGVM, pVM, idCpu, fWhat);
2287 return VERR_NOT_IMPLEMENTED;
2288#endif
2289}
2290
2291
2292#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2293/**
2294 * Worker for NEMR0QueryCpuTick and the ring-0 NEMHCQueryCpuTick.
2295 *
2296 * @returns VBox status code.
2297 * @param pGVM The ring-0 VM handle.
2298 * @param pGVCpu The ring-0 VCPU handle.
2299 * @param pcTicks Where to return the current CPU tick count.
2300 * @param pcAux Where to return the hyper-V TSC_AUX value. Optional.
2301 */
2302NEM_TMPL_STATIC int nemR0WinQueryCpuTick(PGVM pGVM, PGVMCPU pGVCpu, uint64_t *pcTicks, uint32_t *pcAux)
2303{
2304 /*
2305 * Hypercall parameters.
2306 */
2307 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2308 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2309 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2310
2311 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2312 pInput->VpIndex = pGVCpu->idCpu;
2313 pInput->fFlags = 0;
2314 pInput->Names[0] = HvX64RegisterTsc;
2315 pInput->Names[1] = HvX64RegisterTscAux;
2316
2317 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[2]), 32);
2318 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2319 RT_BZERO(paValues, sizeof(paValues[0]) * 2);
2320
2321 /*
2322 * Make the hypercall.
2323 */
2324 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 2),
2325 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2326 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2327 AssertLogRelMsgReturn(uResult == HV_MAKE_CALL_REP_RET(2), ("uResult=%RX64 cRegs=%#x\n", uResult, 2),
2328 VERR_NEM_GET_REGISTERS_FAILED);
2329
2330 /*
2331 * Get results.
2332 */
2333 *pcTicks = paValues[0].Reg64;
2334 if (pcAux)
2335 *pcAux = paValues[0].Reg32;
2336 return VINF_SUCCESS;
2337}
2338#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2339
2340
2341/**
2342 * Queries the TSC and TSC_AUX values, putting the results in .
2343 *
2344 * @returns VBox status code
2345 * @param pGVM The ring-0 VM handle.
2346 * @param pVM The cross context VM handle.
2347 * @param idCpu The calling EMT. Necessary for getting the
2348 * hypercall page and arguments.
2349 */
2350VMMR0_INT_DECL(int) NEMR0QueryCpuTick(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
2351{
2352#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2353 /*
2354 * Validate the call.
2355 */
2356 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2357 if (RT_SUCCESS(rc))
2358 {
2359 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2360 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2361 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2362
2363 /*
2364 * Call worker.
2365 */
2366 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2367 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2368 rc = nemR0WinQueryCpuTick(pGVM, pGVCpu, &pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks,
2369 &pVCpu->nem.s.Hypercall.QueryCpuTick.uAux);
2370 }
2371 return rc;
2372#else
2373 RT_NOREF(pGVM, pVM, idCpu);
2374 return VERR_NOT_IMPLEMENTED;
2375#endif
2376}
2377
2378
2379#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2380/**
2381 * Worker for NEMR0ResumeCpuTickOnAll and the ring-0 NEMHCResumeCpuTickOnAll.
2382 *
2383 * @returns VBox status code.
2384 * @param pGVM The ring-0 VM handle.
2385 * @param pGVCpu The ring-0 VCPU handle.
2386 * @param uPausedTscValue The TSC value at the time of pausing.
2387 */
2388NEM_TMPL_STATIC int nemR0WinResumeCpuTickOnAll(PGVM pGVM, PGVMCPU pGVCpu, uint64_t uPausedTscValue)
2389{
2390 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2391
2392 /*
2393 * Set up the hypercall parameters.
2394 */
2395 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2396 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2397
2398 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2399 pInput->VpIndex = 0;
2400 pInput->RsvdZ = 0;
2401 pInput->Elements[0].Name = HvX64RegisterTsc;
2402 pInput->Elements[0].Pad0 = 0;
2403 pInput->Elements[0].Pad1 = 0;
2404 pInput->Elements[0].Value.Reg128.High64 = 0;
2405 pInput->Elements[0].Value.Reg64 = uPausedTscValue;
2406
2407 /*
2408 * Disable interrupts and do the first virtual CPU.
2409 */
2410 RTCCINTREG const fSavedFlags = ASMIntDisableFlags();
2411 uint64_t const uFirstTsc = ASMReadTSC();
2412 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2413 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2414 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1), ("uResult=%RX64 uTsc=%#RX64\n", uResult, uPausedTscValue),
2415 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2416
2417 /*
2418 * Do secondary processors, adjusting for elapsed TSC and keeping finger crossed
2419 * that we don't introduce too much drift here.
2420 */
2421 for (VMCPUID iCpu = 1; iCpu < pGVM->cCpus; iCpu++)
2422 {
2423 Assert(pInput->PartitionId == pGVM->nemr0.s.idHvPartition);
2424 Assert(pInput->RsvdZ == 0);
2425 Assert(pInput->Elements[0].Name == HvX64RegisterTsc);
2426 Assert(pInput->Elements[0].Pad0 == 0);
2427 Assert(pInput->Elements[0].Pad1 == 0);
2428 Assert(pInput->Elements[0].Value.Reg128.High64 == 0);
2429
2430 pInput->VpIndex = iCpu;
2431 const uint64_t offDelta = (ASMReadTSC() - uFirstTsc);
2432 pInput->Elements[0].Value.Reg64 = uPausedTscValue + offDelta;
2433
2434 uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2435 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0 /* no output */);
2436 AssertLogRelMsgReturnStmt(uResult == HV_MAKE_CALL_REP_RET(1),
2437 ("uResult=%RX64 uTsc=%#RX64 + %#RX64\n", uResult, uPausedTscValue, offDelta),
2438 ASMSetFlags(fSavedFlags), VERR_NEM_SET_TSC);
2439 }
2440
2441 /*
2442 * Done.
2443 */
2444 ASMSetFlags(fSavedFlags);
2445 return VINF_SUCCESS;
2446}
2447#endif /* NEM_WIN_WITH_RING0_RUNLOOP || NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
2448
2449
2450/**
2451 * Sets the TSC register to @a uPausedTscValue on all CPUs.
2452 *
2453 * @returns VBox status code
2454 * @param pGVM The ring-0 VM handle.
2455 * @param pVM The cross context VM handle.
2456 * @param idCpu The calling EMT. Necessary for getting the
2457 * hypercall page and arguments.
2458 * @param uPausedTscValue The TSC value at the time of pausing.
2459 */
2460VMMR0_INT_DECL(int) NEMR0ResumeCpuTickOnAll(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t uPausedTscValue)
2461{
2462#if defined(NEM_WIN_WITH_RING0_RUNLOOP) || defined(NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS)
2463 /*
2464 * Validate the call.
2465 */
2466 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2467 if (RT_SUCCESS(rc))
2468 {
2469 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2470 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2471 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2472
2473 /*
2474 * Call worker.
2475 */
2476 pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks = 0;
2477 pVCpu->nem.s.Hypercall.QueryCpuTick.uAux = 0;
2478 rc = nemR0WinResumeCpuTickOnAll(pGVM, pGVCpu, uPausedTscValue);
2479 }
2480 return rc;
2481#else
2482 RT_NOREF(pGVM, pVM, idCpu, uPausedTscValue);
2483 return VERR_NOT_IMPLEMENTED;
2484#endif
2485}
2486
2487
2488VMMR0_INT_DECL(VBOXSTRICTRC) NEMR0RunGuestCode(PGVM pGVM, VMCPUID idCpu)
2489{
2490#ifdef NEM_WIN_WITH_RING0_RUNLOOP
2491 if (pGVM->nemr0.s.fMayUseRing0Runloop)
2492# ifdef VBOX_BUGREF_9217
2493 return nemHCWinRunGC(pGVM, &pGVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2494# else
2495 {
2496 PVMCC pVM = pGVM->pVM;
2497 return nemHCWinRunGC(pVM, &pVM->aCpus[idCpu], pGVM, &pGVM->aCpus[idCpu]);
2498 }
2499# endif
2500 return VERR_NEM_RING3_ONLY;
2501#else
2502 RT_NOREF(pGVM, idCpu);
2503 return VERR_NOT_IMPLEMENTED;
2504#endif
2505}
2506
2507
2508/**
2509 * Updates statistics in the VM structure.
2510 *
2511 * @returns VBox status code.
2512 * @param pGVM The ring-0 VM handle.
2513 * @param pVM The cross context VM handle.
2514 * @param idCpu The calling EMT, or NIL. Necessary for getting the hypercall
2515 * page and arguments.
2516 */
2517VMMR0_INT_DECL(int) NEMR0UpdateStatistics(PGVM pGVM, PVMCC pVM, VMCPUID idCpu)
2518{
2519 /*
2520 * Validate the call.
2521 */
2522 int rc;
2523 if (idCpu == NIL_VMCPUID)
2524 rc = GVMMR0ValidateGVMandVM(pGVM, pVM);
2525 else
2526 rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2527 if (RT_SUCCESS(rc))
2528 {
2529 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2530
2531 PNEMR0HYPERCALLDATA pHypercallData = idCpu != NIL_VMCPUID
2532 ? &pGVM->aCpus[idCpu].nemr0.s.HypercallData
2533 : &pGVM->nemr0.s.HypercallData;
2534 if ( RT_VALID_PTR(pHypercallData->pbPage)
2535 && pHypercallData->HCPhysPage != NIL_RTHCPHYS)
2536 {
2537 if (idCpu == NIL_VMCPUID)
2538 rc = RTCritSectEnter(&pGVM->nemr0.s.HypercallDataCritSect);
2539 if (RT_SUCCESS(rc))
2540 {
2541 /*
2542 * Query the memory statistics for the partition.
2543 */
2544 HV_INPUT_GET_MEMORY_BALANCE *pInput = (HV_INPUT_GET_MEMORY_BALANCE *)pHypercallData->pbPage;
2545 pInput->TargetPartitionId = pGVM->nemr0.s.idHvPartition;
2546 pInput->ProximityDomainInfo.Flags.ProximityPreferred = 0;
2547 pInput->ProximityDomainInfo.Flags.ProxyimityInfoValid = 0;
2548 pInput->ProximityDomainInfo.Flags.Reserved = 0;
2549 pInput->ProximityDomainInfo.Id = 0;
2550
2551 HV_OUTPUT_GET_MEMORY_BALANCE *pOutput = (HV_OUTPUT_GET_MEMORY_BALANCE *)(pInput + 1);
2552 RT_ZERO(*pOutput);
2553
2554 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetMemoryBalance,
2555 pHypercallData->HCPhysPage,
2556 pHypercallData->HCPhysPage + sizeof(*pInput));
2557 if (uResult == HV_STATUS_SUCCESS)
2558 {
2559 pVM->nem.s.R0Stats.cPagesAvailable = pOutput->PagesAvailable;
2560 pVM->nem.s.R0Stats.cPagesInUse = pOutput->PagesInUse;
2561 rc = VINF_SUCCESS;
2562 }
2563 else
2564 {
2565 LogRel(("HvCallGetMemoryBalance -> %#RX64 (%#RX64 %#RX64)!!\n",
2566 uResult, pOutput->PagesAvailable, pOutput->PagesInUse));
2567 rc = VERR_NEM_IPE_0;
2568 }
2569
2570 if (idCpu == NIL_VMCPUID)
2571 RTCritSectLeave(&pGVM->nemr0.s.HypercallDataCritSect);
2572 }
2573 }
2574 else
2575 rc = VERR_WRONG_ORDER;
2576 }
2577 return rc;
2578}
2579
2580
2581#if 1 && defined(DEBUG_bird)
2582/**
2583 * Debug only interface for poking around and exploring Hyper-V stuff.
2584 *
2585 * @param pGVM The ring-0 VM handle.
2586 * @param pVM The cross context VM handle.
2587 * @param idCpu The calling EMT.
2588 * @param u64Arg What to query. 0 == registers.
2589 */
2590VMMR0_INT_DECL(int) NEMR0DoExperiment(PGVM pGVM, PVMCC pVM, VMCPUID idCpu, uint64_t u64Arg)
2591{
2592 /*
2593 * Resolve CPU structures.
2594 */
2595 int rc = GVMMR0ValidateGVMandVMandEMT(pGVM, pVM, idCpu);
2596 if (RT_SUCCESS(rc))
2597 {
2598 AssertReturn(g_pfnHvlInvokeHypercall, VERR_NEM_MISSING_KERNEL_API);
2599
2600 PGVMCPU pGVCpu = &pGVM->aCpus[idCpu];
2601 PVMCPUCC pVCpu = VMCC_GET_CPU(pVM, idCpu);
2602 if (u64Arg == 0)
2603 {
2604 /*
2605 * Query register.
2606 */
2607 HV_INPUT_GET_VP_REGISTERS *pInput = (HV_INPUT_GET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2608 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2609
2610 size_t const cbInput = RT_ALIGN_Z(RT_UOFFSETOF(HV_INPUT_GET_VP_REGISTERS, Names[1]), 32);
2611 HV_REGISTER_VALUE *paValues = (HV_REGISTER_VALUE *)((uint8_t *)pInput + cbInput);
2612 RT_BZERO(paValues, sizeof(paValues[0]) * 1);
2613
2614 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2615 pInput->VpIndex = pGVCpu->idCpu;
2616 pInput->fFlags = 0;
2617 pInput->Names[0] = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2618
2619 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallGetVpRegisters, 1),
2620 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2621 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2622 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2623 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2624 pVCpu->nem.s.Hypercall.Experiment.uLoValue = paValues[0].Reg128.Low64;
2625 pVCpu->nem.s.Hypercall.Experiment.uHiValue = paValues[0].Reg128.High64;
2626 rc = VINF_SUCCESS;
2627 }
2628 else if (u64Arg == 1)
2629 {
2630 /*
2631 * Query partition property.
2632 */
2633 HV_INPUT_GET_PARTITION_PROPERTY *pInput = (HV_INPUT_GET_PARTITION_PROPERTY *)pGVCpu->nemr0.s.HypercallData.pbPage;
2634 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2635
2636 size_t const cbInput = RT_ALIGN_Z(sizeof(*pInput), 32);
2637 HV_OUTPUT_GET_PARTITION_PROPERTY *pOutput = (HV_OUTPUT_GET_PARTITION_PROPERTY *)((uint8_t *)pInput + cbInput);
2638 pOutput->PropertyValue = 0;
2639
2640 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2641 pInput->PropertyCode = (HV_PARTITION_PROPERTY_CODE)pVCpu->nem.s.Hypercall.Experiment.uItem;
2642 pInput->uPadding = 0;
2643
2644 uint64_t uResult = g_pfnHvlInvokeHypercall(HvCallGetPartitionProperty,
2645 pGVCpu->nemr0.s.HypercallData.HCPhysPage,
2646 pGVCpu->nemr0.s.HypercallData.HCPhysPage + cbInput);
2647 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_STATUS_SUCCESS;
2648 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2649 pVCpu->nem.s.Hypercall.Experiment.uLoValue = pOutput->PropertyValue;
2650 pVCpu->nem.s.Hypercall.Experiment.uHiValue = 0;
2651 rc = VINF_SUCCESS;
2652 }
2653 else if (u64Arg == 2)
2654 {
2655 /*
2656 * Set register.
2657 */
2658 HV_INPUT_SET_VP_REGISTERS *pInput = (HV_INPUT_SET_VP_REGISTERS *)pGVCpu->nemr0.s.HypercallData.pbPage;
2659 AssertPtrReturn(pInput, VERR_INTERNAL_ERROR_3);
2660 RT_BZERO(pInput, RT_UOFFSETOF(HV_INPUT_SET_VP_REGISTERS, Elements[1]));
2661
2662 pInput->PartitionId = pGVM->nemr0.s.idHvPartition;
2663 pInput->VpIndex = pGVCpu->idCpu;
2664 pInput->RsvdZ = 0;
2665 pInput->Elements[0].Name = (HV_REGISTER_NAME)pVCpu->nem.s.Hypercall.Experiment.uItem;
2666 pInput->Elements[0].Value.Reg128.High64 = pVCpu->nem.s.Hypercall.Experiment.uHiValue;
2667 pInput->Elements[0].Value.Reg128.Low64 = pVCpu->nem.s.Hypercall.Experiment.uLoValue;
2668
2669 uint64_t uResult = g_pfnHvlInvokeHypercall(HV_MAKE_CALL_INFO(HvCallSetVpRegisters, 1),
2670 pGVCpu->nemr0.s.HypercallData.HCPhysPage, 0);
2671 pVCpu->nem.s.Hypercall.Experiment.fSuccess = uResult == HV_MAKE_CALL_REP_RET(1);
2672 pVCpu->nem.s.Hypercall.Experiment.uStatus = uResult;
2673 rc = VINF_SUCCESS;
2674 }
2675 else
2676 rc = VERR_INVALID_FUNCTION;
2677 }
2678 return rc;
2679}
2680#endif /* DEBUG_bird */
2681
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette