VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp@ 97228

最後變更 在這個檔案從97228是 97226,由 vboxsync 提交於 2 年 前

VMM/NEM-darwin: Access CPUMCTX::eflags via the 'u' member when possible in preparation for putting internal info in the reserved bits.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 159.5 KB
 
1/* $Id: NEMR3Native-darwin.cpp 97226 2022-10-18 22:57:26Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2020-2022 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.alldomusa.eu.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/apic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/hm.h>
44#include <VBox/vmm/hm_vmx.h>
45#include <VBox/vmm/dbgftrace.h>
46#include <VBox/vmm/gcm.h>
47#include "VMXInternal.h"
48#include "NEMInternal.h"
49#include <VBox/vmm/vmcc.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/asm.h>
53#include <iprt/ldr.h>
54#include <iprt/mem.h>
55#include <iprt/path.h>
56#include <iprt/string.h>
57#include <iprt/system.h>
58#include <iprt/utf16.h>
59
60#include <mach/mach_time.h>
61#include <mach/kern_return.h>
62
63
64/*********************************************************************************************************************************
65* Defined Constants And Macros *
66*********************************************************************************************************************************/
67/* No nested hwvirt (for now). */
68#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
69# undef VBOX_WITH_NESTED_HWVIRT_VMX
70#endif
71
72
73/** @name HV return codes.
74 * @{ */
75/** Operation was successful. */
76#define HV_SUCCESS 0
77/** An error occurred during operation. */
78#define HV_ERROR 0xfae94001
79/** The operation could not be completed right now, try again. */
80#define HV_BUSY 0xfae94002
81/** One of the parameters passed wis invalid. */
82#define HV_BAD_ARGUMENT 0xfae94003
83/** Not enough resources left to fulfill the operation. */
84#define HV_NO_RESOURCES 0xfae94005
85/** The device could not be found. */
86#define HV_NO_DEVICE 0xfae94006
87/** The operation is not supportd on this platform with this configuration. */
88#define HV_UNSUPPORTED 0xfae94007
89/** @} */
90
91
92/** @name HV memory protection flags.
93 * @{ */
94/** Memory is readable. */
95#define HV_MEMORY_READ RT_BIT_64(0)
96/** Memory is writeable. */
97#define HV_MEMORY_WRITE RT_BIT_64(1)
98/** Memory is executable. */
99#define HV_MEMORY_EXEC RT_BIT_64(2)
100/** @} */
101
102
103/** @name HV shadow VMCS protection flags.
104 * @{ */
105/** Shadow VMCS field is not accessible. */
106#define HV_SHADOW_VMCS_NONE 0
107/** Shadow VMCS fild is readable. */
108#define HV_SHADOW_VMCS_READ RT_BIT_64(0)
109/** Shadow VMCS field is writeable. */
110#define HV_SHADOW_VMCS_WRITE RT_BIT_64(1)
111/** @} */
112
113
114/** Default VM creation flags. */
115#define HV_VM_DEFAULT 0
116/** Default guest address space creation flags. */
117#define HV_VM_SPACE_DEFAULT 0
118/** Default vCPU creation flags. */
119#define HV_VCPU_DEFAULT 0
120
121#define HV_DEADLINE_FOREVER UINT64_MAX
122
123
124/*********************************************************************************************************************************
125* Structures and Typedefs *
126*********************************************************************************************************************************/
127
128/** HV return code type. */
129typedef uint32_t hv_return_t;
130/** HV capability bitmask. */
131typedef uint64_t hv_capability_t;
132/** Option bitmask type when creating a VM. */
133typedef uint64_t hv_vm_options_t;
134/** Option bitmask when creating a vCPU. */
135typedef uint64_t hv_vcpu_options_t;
136/** HV memory protection flags type. */
137typedef uint64_t hv_memory_flags_t;
138/** Shadow VMCS protection flags. */
139typedef uint64_t hv_shadow_flags_t;
140/** Guest physical address type. */
141typedef uint64_t hv_gpaddr_t;
142
143
144/**
145 * VMX Capability enumeration.
146 */
147typedef enum
148{
149 HV_VMX_CAP_PINBASED = 0,
150 HV_VMX_CAP_PROCBASED,
151 HV_VMX_CAP_PROCBASED2,
152 HV_VMX_CAP_ENTRY,
153 HV_VMX_CAP_EXIT,
154 HV_VMX_CAP_BASIC, /* Since 11.0 */
155 HV_VMX_CAP_TRUE_PINBASED, /* Since 11.0 */
156 HV_VMX_CAP_TRUE_PROCBASED, /* Since 11.0 */
157 HV_VMX_CAP_TRUE_ENTRY, /* Since 11.0 */
158 HV_VMX_CAP_TRUE_EXIT, /* Since 11.0 */
159 HV_VMX_CAP_MISC, /* Since 11.0 */
160 HV_VMX_CAP_CR0_FIXED0, /* Since 11.0 */
161 HV_VMX_CAP_CR0_FIXED1, /* Since 11.0 */
162 HV_VMX_CAP_CR4_FIXED0, /* Since 11.0 */
163 HV_VMX_CAP_CR4_FIXED1, /* Since 11.0 */
164 HV_VMX_CAP_VMCS_ENUM, /* Since 11.0 */
165 HV_VMX_CAP_EPT_VPID_CAP, /* Since 11.0 */
166 HV_VMX_CAP_PREEMPTION_TIMER = 32
167} hv_vmx_capability_t;
168
169
170/**
171 * HV x86 register enumeration.
172 */
173typedef enum
174{
175 HV_X86_RIP = 0,
176 HV_X86_RFLAGS,
177 HV_X86_RAX,
178 HV_X86_RCX,
179 HV_X86_RDX,
180 HV_X86_RBX,
181 HV_X86_RSI,
182 HV_X86_RDI,
183 HV_X86_RSP,
184 HV_X86_RBP,
185 HV_X86_R8,
186 HV_X86_R9,
187 HV_X86_R10,
188 HV_X86_R11,
189 HV_X86_R12,
190 HV_X86_R13,
191 HV_X86_R14,
192 HV_X86_R15,
193 HV_X86_CS,
194 HV_X86_SS,
195 HV_X86_DS,
196 HV_X86_ES,
197 HV_X86_FS,
198 HV_X86_GS,
199 HV_X86_IDT_BASE,
200 HV_X86_IDT_LIMIT,
201 HV_X86_GDT_BASE,
202 HV_X86_GDT_LIMIT,
203 HV_X86_LDTR,
204 HV_X86_LDT_BASE,
205 HV_X86_LDT_LIMIT,
206 HV_X86_LDT_AR,
207 HV_X86_TR,
208 HV_X86_TSS_BASE,
209 HV_X86_TSS_LIMIT,
210 HV_X86_TSS_AR,
211 HV_X86_CR0,
212 HV_X86_CR1,
213 HV_X86_CR2,
214 HV_X86_CR3,
215 HV_X86_CR4,
216 HV_X86_DR0,
217 HV_X86_DR1,
218 HV_X86_DR2,
219 HV_X86_DR3,
220 HV_X86_DR4,
221 HV_X86_DR5,
222 HV_X86_DR6,
223 HV_X86_DR7,
224 HV_X86_TPR,
225 HV_X86_XCR0,
226 HV_X86_REGISTERS_MAX
227} hv_x86_reg_t;
228
229
230/** MSR permission flags type. */
231typedef uint32_t hv_msr_flags_t;
232/** MSR can't be accessed. */
233#define HV_MSR_NONE 0
234/** MSR is readable by the guest. */
235#define HV_MSR_READ RT_BIT(0)
236/** MSR is writeable by the guest. */
237#define HV_MSR_WRITE RT_BIT(1)
238
239
240typedef hv_return_t FN_HV_CAPABILITY(hv_capability_t capability, uint64_t *valu);
241typedef hv_return_t FN_HV_VM_CREATE(hv_vm_options_t flags);
242typedef hv_return_t FN_HV_VM_DESTROY(void);
243typedef hv_return_t FN_HV_VM_SPACE_CREATE(hv_vm_space_t *asid);
244typedef hv_return_t FN_HV_VM_SPACE_DESTROY(hv_vm_space_t asid);
245typedef hv_return_t FN_HV_VM_MAP(const void *uva, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
246typedef hv_return_t FN_HV_VM_UNMAP(hv_gpaddr_t gpa, size_t size);
247typedef hv_return_t FN_HV_VM_PROTECT(hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
248typedef hv_return_t FN_HV_VM_MAP_SPACE(hv_vm_space_t asid, const void *uva, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
249typedef hv_return_t FN_HV_VM_UNMAP_SPACE(hv_vm_space_t asid, hv_gpaddr_t gpa, size_t size);
250typedef hv_return_t FN_HV_VM_PROTECT_SPACE(hv_vm_space_t asid, hv_gpaddr_t gpa, size_t size, hv_memory_flags_t flags);
251typedef hv_return_t FN_HV_VM_SYNC_TSC(uint64_t tsc);
252
253typedef hv_return_t FN_HV_VCPU_CREATE(hv_vcpuid_t *vcpu, hv_vcpu_options_t flags);
254typedef hv_return_t FN_HV_VCPU_DESTROY(hv_vcpuid_t vcpu);
255typedef hv_return_t FN_HV_VCPU_SET_SPACE(hv_vcpuid_t vcpu, hv_vm_space_t asid);
256typedef hv_return_t FN_HV_VCPU_READ_REGISTER(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t *value);
257typedef hv_return_t FN_HV_VCPU_WRITE_REGISTER(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t value);
258typedef hv_return_t FN_HV_VCPU_READ_FPSTATE(hv_vcpuid_t vcpu, void *buffer, size_t size);
259typedef hv_return_t FN_HV_VCPU_WRITE_FPSTATE(hv_vcpuid_t vcpu, const void *buffer, size_t size);
260typedef hv_return_t FN_HV_VCPU_ENABLE_NATIVE_MSR(hv_vcpuid_t vcpu, uint32_t msr, bool enable);
261typedef hv_return_t FN_HV_VCPU_READ_MSR(hv_vcpuid_t vcpu, uint32_t msr, uint64_t *value);
262typedef hv_return_t FN_HV_VCPU_WRITE_MSR(hv_vcpuid_t vcpu, uint32_t msr, uint64_t value);
263typedef hv_return_t FN_HV_VCPU_FLUSH(hv_vcpuid_t vcpu);
264typedef hv_return_t FN_HV_VCPU_INVALIDATE_TLB(hv_vcpuid_t vcpu);
265typedef hv_return_t FN_HV_VCPU_RUN(hv_vcpuid_t vcpu);
266typedef hv_return_t FN_HV_VCPU_RUN_UNTIL(hv_vcpuid_t vcpu, uint64_t deadline);
267typedef hv_return_t FN_HV_VCPU_INTERRUPT(hv_vcpuid_t *vcpus, unsigned int vcpu_count);
268typedef hv_return_t FN_HV_VCPU_GET_EXEC_TIME(hv_vcpuid_t *vcpus, uint64_t *time);
269
270typedef hv_return_t FN_HV_VMX_VCPU_READ_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *value);
271typedef hv_return_t FN_HV_VMX_VCPU_WRITE_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t value);
272
273typedef hv_return_t FN_HV_VMX_VCPU_READ_SHADOW_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *value);
274typedef hv_return_t FN_HV_VMX_VCPU_WRITE_SHADOW_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t value);
275typedef hv_return_t FN_HV_VMX_VCPU_SET_SHADOW_ACCESS(hv_vcpuid_t vcpu, uint32_t field, hv_shadow_flags_t flags);
276
277typedef hv_return_t FN_HV_VMX_READ_CAPABILITY(hv_vmx_capability_t field, uint64_t *value);
278typedef hv_return_t FN_HV_VMX_VCPU_SET_APIC_ADDRESS(hv_vcpuid_t vcpu, hv_gpaddr_t gpa);
279
280/* Since 11.0 */
281typedef hv_return_t FN_HV_VMX_VCPU_GET_CAP_WRITE_VMCS(hv_vcpuid_t vcpu, uint32_t field, uint64_t *allowed_0, uint64_t *allowed_1);
282typedef hv_return_t FN_HV_VCPU_ENABLE_MANAGED_MSR(hv_vcpuid_t vcpu, uint32_t msr, bool enable);
283typedef hv_return_t FN_HV_VCPU_SET_MSR_ACCESS(hv_vcpuid_t vcpu, uint32_t msr, hv_msr_flags_t flags);
284
285
286/*********************************************************************************************************************************
287* Global Variables *
288*********************************************************************************************************************************/
289/** NEM_DARWIN_PAGE_STATE_XXX names. */
290NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
291/** MSRs. */
292static SUPHWVIRTMSRS g_HmMsrs;
293/** VMX: Set if swapping EFER is supported. */
294static bool g_fHmVmxSupportsVmcsEfer = false;
295/** @name APIs imported from Hypervisor.framework.
296 * @{ */
297static FN_HV_CAPABILITY *g_pfnHvCapability = NULL; /* Since 10.15 */
298static FN_HV_VM_CREATE *g_pfnHvVmCreate = NULL; /* Since 10.10 */
299static FN_HV_VM_DESTROY *g_pfnHvVmDestroy = NULL; /* Since 10.10 */
300static FN_HV_VM_SPACE_CREATE *g_pfnHvVmSpaceCreate = NULL; /* Since 10.15 */
301static FN_HV_VM_SPACE_DESTROY *g_pfnHvVmSpaceDestroy = NULL; /* Since 10.15 */
302static FN_HV_VM_MAP *g_pfnHvVmMap = NULL; /* Since 10.10 */
303static FN_HV_VM_UNMAP *g_pfnHvVmUnmap = NULL; /* Since 10.10 */
304static FN_HV_VM_PROTECT *g_pfnHvVmProtect = NULL; /* Since 10.10 */
305static FN_HV_VM_MAP_SPACE *g_pfnHvVmMapSpace = NULL; /* Since 10.15 */
306static FN_HV_VM_UNMAP_SPACE *g_pfnHvVmUnmapSpace = NULL; /* Since 10.15 */
307static FN_HV_VM_PROTECT_SPACE *g_pfnHvVmProtectSpace = NULL; /* Since 10.15 */
308static FN_HV_VM_SYNC_TSC *g_pfnHvVmSyncTsc = NULL; /* Since 10.10 */
309
310static FN_HV_VCPU_CREATE *g_pfnHvVCpuCreate = NULL; /* Since 10.10 */
311static FN_HV_VCPU_DESTROY *g_pfnHvVCpuDestroy = NULL; /* Since 10.10 */
312static FN_HV_VCPU_SET_SPACE *g_pfnHvVCpuSetSpace = NULL; /* Since 10.15 */
313static FN_HV_VCPU_READ_REGISTER *g_pfnHvVCpuReadRegister = NULL; /* Since 10.10 */
314static FN_HV_VCPU_WRITE_REGISTER *g_pfnHvVCpuWriteRegister = NULL; /* Since 10.10 */
315static FN_HV_VCPU_READ_FPSTATE *g_pfnHvVCpuReadFpState = NULL; /* Since 10.10 */
316static FN_HV_VCPU_WRITE_FPSTATE *g_pfnHvVCpuWriteFpState = NULL; /* Since 10.10 */
317static FN_HV_VCPU_ENABLE_NATIVE_MSR *g_pfnHvVCpuEnableNativeMsr = NULL; /* Since 10.10 */
318static FN_HV_VCPU_READ_MSR *g_pfnHvVCpuReadMsr = NULL; /* Since 10.10 */
319static FN_HV_VCPU_WRITE_MSR *g_pfnHvVCpuWriteMsr = NULL; /* Since 10.10 */
320static FN_HV_VCPU_FLUSH *g_pfnHvVCpuFlush = NULL; /* Since 10.10 */
321static FN_HV_VCPU_INVALIDATE_TLB *g_pfnHvVCpuInvalidateTlb = NULL; /* Since 10.10 */
322static FN_HV_VCPU_RUN *g_pfnHvVCpuRun = NULL; /* Since 10.10 */
323static FN_HV_VCPU_RUN_UNTIL *g_pfnHvVCpuRunUntil = NULL; /* Since 10.15 */
324static FN_HV_VCPU_INTERRUPT *g_pfnHvVCpuInterrupt = NULL; /* Since 10.10 */
325static FN_HV_VCPU_GET_EXEC_TIME *g_pfnHvVCpuGetExecTime = NULL; /* Since 10.10 */
326
327static FN_HV_VMX_READ_CAPABILITY *g_pfnHvVmxReadCapability = NULL; /* Since 10.10 */
328static FN_HV_VMX_VCPU_READ_VMCS *g_pfnHvVmxVCpuReadVmcs = NULL; /* Since 10.10 */
329static FN_HV_VMX_VCPU_WRITE_VMCS *g_pfnHvVmxVCpuWriteVmcs = NULL; /* Since 10.10 */
330static FN_HV_VMX_VCPU_READ_SHADOW_VMCS *g_pfnHvVmxVCpuReadShadowVmcs = NULL; /* Since 10.15 */
331static FN_HV_VMX_VCPU_WRITE_SHADOW_VMCS *g_pfnHvVmxVCpuWriteShadowVmcs = NULL; /* Since 10.15 */
332static FN_HV_VMX_VCPU_SET_SHADOW_ACCESS *g_pfnHvVmxVCpuSetShadowAccess = NULL; /* Since 10.15 */
333static FN_HV_VMX_VCPU_SET_APIC_ADDRESS *g_pfnHvVmxVCpuSetApicAddress = NULL; /* Since 10.10 */
334
335static FN_HV_VMX_VCPU_GET_CAP_WRITE_VMCS *g_pfnHvVmxVCpuGetCapWriteVmcs = NULL; /* Since 11.0 */
336static FN_HV_VCPU_ENABLE_MANAGED_MSR *g_pfnHvVCpuEnableManagedMsr = NULL; /* Since 11.0 */
337static FN_HV_VCPU_SET_MSR_ACCESS *g_pfnHvVCpuSetMsrAccess = NULL; /* Since 11.0 */
338/** @} */
339
340
341/**
342 * Import instructions.
343 */
344static const struct
345{
346 bool fOptional; /**< Set if import is optional. */
347 void **ppfn; /**< The function pointer variable. */
348 const char *pszName; /**< The function name. */
349} g_aImports[] =
350{
351#define NEM_DARWIN_IMPORT(a_fOptional, a_Pfn, a_Name) { (a_fOptional), (void **)&(a_Pfn), #a_Name }
352 NEM_DARWIN_IMPORT(true, g_pfnHvCapability, hv_capability),
353 NEM_DARWIN_IMPORT(false, g_pfnHvVmCreate, hv_vm_create),
354 NEM_DARWIN_IMPORT(false, g_pfnHvVmDestroy, hv_vm_destroy),
355 NEM_DARWIN_IMPORT(true, g_pfnHvVmSpaceCreate, hv_vm_space_create),
356 NEM_DARWIN_IMPORT(true, g_pfnHvVmSpaceDestroy, hv_vm_space_destroy),
357 NEM_DARWIN_IMPORT(false, g_pfnHvVmMap, hv_vm_map),
358 NEM_DARWIN_IMPORT(false, g_pfnHvVmUnmap, hv_vm_unmap),
359 NEM_DARWIN_IMPORT(false, g_pfnHvVmProtect, hv_vm_protect),
360 NEM_DARWIN_IMPORT(true, g_pfnHvVmMapSpace, hv_vm_map_space),
361 NEM_DARWIN_IMPORT(true, g_pfnHvVmUnmapSpace, hv_vm_unmap_space),
362 NEM_DARWIN_IMPORT(true, g_pfnHvVmProtectSpace, hv_vm_protect_space),
363 NEM_DARWIN_IMPORT(false, g_pfnHvVmSyncTsc, hv_vm_sync_tsc),
364
365 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuCreate, hv_vcpu_create),
366 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuDestroy, hv_vcpu_destroy),
367 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuSetSpace, hv_vcpu_set_space),
368 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadRegister, hv_vcpu_read_register),
369 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteRegister, hv_vcpu_write_register),
370 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadFpState, hv_vcpu_read_fpstate),
371 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteFpState, hv_vcpu_write_fpstate),
372 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuEnableNativeMsr, hv_vcpu_enable_native_msr),
373 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuReadMsr, hv_vcpu_read_msr),
374 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuWriteMsr, hv_vcpu_write_msr),
375 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuFlush, hv_vcpu_flush),
376 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuInvalidateTlb, hv_vcpu_invalidate_tlb),
377 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuRun, hv_vcpu_run),
378 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuRunUntil, hv_vcpu_run_until),
379 NEM_DARWIN_IMPORT(false, g_pfnHvVCpuInterrupt, hv_vcpu_interrupt),
380 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuGetExecTime, hv_vcpu_get_exec_time),
381 NEM_DARWIN_IMPORT(false, g_pfnHvVmxReadCapability, hv_vmx_read_capability),
382 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuReadVmcs, hv_vmx_vcpu_read_vmcs),
383 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuWriteVmcs, hv_vmx_vcpu_write_vmcs),
384 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuReadShadowVmcs, hv_vmx_vcpu_read_shadow_vmcs),
385 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuWriteShadowVmcs, hv_vmx_vcpu_write_shadow_vmcs),
386 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuSetShadowAccess, hv_vmx_vcpu_set_shadow_access),
387 NEM_DARWIN_IMPORT(false, g_pfnHvVmxVCpuSetApicAddress, hv_vmx_vcpu_set_apic_address),
388 NEM_DARWIN_IMPORT(true, g_pfnHvVmxVCpuGetCapWriteVmcs, hv_vmx_vcpu_get_cap_write_vmcs),
389 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuEnableManagedMsr, hv_vcpu_enable_managed_msr),
390 NEM_DARWIN_IMPORT(true, g_pfnHvVCpuSetMsrAccess, hv_vcpu_set_msr_access)
391#undef NEM_DARWIN_IMPORT
392};
393
394
395/*
396 * Let the preprocessor alias the APIs to import variables for better autocompletion.
397 */
398#ifndef IN_SLICKEDIT
399# define hv_capability g_pfnHvCapability
400# define hv_vm_create g_pfnHvVmCreate
401# define hv_vm_destroy g_pfnHvVmDestroy
402# define hv_vm_space_create g_pfnHvVmSpaceCreate
403# define hv_vm_space_destroy g_pfnHvVmSpaceDestroy
404# define hv_vm_map g_pfnHvVmMap
405# define hv_vm_unmap g_pfnHvVmUnmap
406# define hv_vm_protect g_pfnHvVmProtect
407# define hv_vm_map_space g_pfnHvVmMapSpace
408# define hv_vm_unmap_space g_pfnHvVmUnmapSpace
409# define hv_vm_protect_space g_pfnHvVmProtectSpace
410# define hv_vm_sync_tsc g_pfnHvVmSyncTsc
411
412# define hv_vcpu_create g_pfnHvVCpuCreate
413# define hv_vcpu_destroy g_pfnHvVCpuDestroy
414# define hv_vcpu_set_space g_pfnHvVCpuSetSpace
415# define hv_vcpu_read_register g_pfnHvVCpuReadRegister
416# define hv_vcpu_write_register g_pfnHvVCpuWriteRegister
417# define hv_vcpu_read_fpstate g_pfnHvVCpuReadFpState
418# define hv_vcpu_write_fpstate g_pfnHvVCpuWriteFpState
419# define hv_vcpu_enable_native_msr g_pfnHvVCpuEnableNativeMsr
420# define hv_vcpu_read_msr g_pfnHvVCpuReadMsr
421# define hv_vcpu_write_msr g_pfnHvVCpuWriteMsr
422# define hv_vcpu_flush g_pfnHvVCpuFlush
423# define hv_vcpu_invalidate_tlb g_pfnHvVCpuInvalidateTlb
424# define hv_vcpu_run g_pfnHvVCpuRun
425# define hv_vcpu_run_until g_pfnHvVCpuRunUntil
426# define hv_vcpu_interrupt g_pfnHvVCpuInterrupt
427# define hv_vcpu_get_exec_time g_pfnHvVCpuGetExecTime
428
429# define hv_vmx_read_capability g_pfnHvVmxReadCapability
430# define hv_vmx_vcpu_read_vmcs g_pfnHvVmxVCpuReadVmcs
431# define hv_vmx_vcpu_write_vmcs g_pfnHvVmxVCpuWriteVmcs
432# define hv_vmx_vcpu_read_shadow_vmcs g_pfnHvVmxVCpuReadShadowVmcs
433# define hv_vmx_vcpu_write_shadow_vmcs g_pfnHvVmxVCpuWriteShadowVmcs
434# define hv_vmx_vcpu_set_shadow_access g_pfnHvVmxVCpuSetShadowAccess
435# define hv_vmx_vcpu_set_apic_address g_pfnHvVmxVCpuSetApicAddress
436
437# define hv_vmx_vcpu_get_cap_write_vmcs g_pfnHvVmxVCpuGetCapWriteVmcs
438# define hv_vcpu_enable_managed_msr g_pfnHvVCpuEnableManagedMsr
439# define hv_vcpu_set_msr_access g_pfnHvVCpuSetMsrAccess
440#endif
441
442static const struct
443{
444 uint32_t u32VmcsFieldId; /**< The VMCS field identifier. */
445 const char *pszVmcsField; /**< The VMCS field name. */
446 bool f64Bit;
447} g_aVmcsFieldsCap[] =
448{
449#define NEM_DARWIN_VMCS64_FIELD_CAP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, true }
450#define NEM_DARWIN_VMCS32_FIELD_CAP(a_u32VmcsFieldId) { (a_u32VmcsFieldId), #a_u32VmcsFieldId, false }
451
452 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PIN_EXEC),
453 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PROC_EXEC),
454 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_EXCEPTION_BITMAP),
455 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_EXIT),
456 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_ENTRY),
457 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PROC_EXEC2),
458 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PLE_GAP),
459 NEM_DARWIN_VMCS32_FIELD_CAP(VMX_VMCS32_CTRL_PLE_WINDOW),
460 NEM_DARWIN_VMCS64_FIELD_CAP(VMX_VMCS64_CTRL_TSC_OFFSET_FULL),
461 NEM_DARWIN_VMCS64_FIELD_CAP(VMX_VMCS64_GUEST_DEBUGCTL_FULL)
462#undef NEM_DARWIN_VMCS64_FIELD_CAP
463#undef NEM_DARWIN_VMCS32_FIELD_CAP
464};
465
466
467/*********************************************************************************************************************************
468* Internal Functions *
469*********************************************************************************************************************************/
470DECLINLINE(void) vmxHCImportGuestIntrState(PVMCPUCC pVCpu, PCVMXVMCSINFO pVmcsInfo);
471
472
473/**
474 * Converts a HV return code to a VBox status code.
475 *
476 * @returns VBox status code.
477 * @param hrc The HV return code to convert.
478 */
479DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
480{
481 if (hrc == HV_SUCCESS)
482 return VINF_SUCCESS;
483
484 switch (hrc)
485 {
486 case HV_ERROR: return VERR_INVALID_STATE;
487 case HV_BUSY: return VERR_RESOURCE_BUSY;
488 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
489 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
490 case HV_NO_DEVICE: return VERR_NOT_FOUND;
491 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
492 }
493
494 return VERR_IPE_UNEXPECTED_STATUS;
495}
496
497
498/**
499 * Unmaps the given guest physical address range (page aligned).
500 *
501 * @returns VBox status code.
502 * @param pVM The cross context VM structure.
503 * @param GCPhys The guest physical address to start unmapping at.
504 * @param cb The size of the range to unmap in bytes.
505 * @param pu2State Where to store the new state of the unmappd page, optional.
506 */
507DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
508{
509 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
510 {
511 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
512 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
513 return VINF_SUCCESS;
514 }
515
516 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
517 hv_return_t hrc;
518 if (pVM->nem.s.fCreatedAsid)
519 hrc = hv_vm_unmap_space(pVM->nem.s.uVmAsid, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, cb);
520 else
521 hrc = hv_vm_unmap(GCPhys, cb);
522 if (RT_LIKELY(hrc == HV_SUCCESS))
523 {
524 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
525 if (pu2State)
526 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
527 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
528 return VINF_SUCCESS;
529 }
530
531 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
532 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
533 GCPhys, hrc));
534 return VERR_NEM_IPE_6;
535}
536
537
538/**
539 * Maps a given guest physical address range backed by the given memory with the given
540 * protection flags.
541 *
542 * @returns VBox status code.
543 * @param pVM The cross context VM structure.
544 * @param GCPhys The guest physical address to start mapping.
545 * @param pvRam The R3 pointer of the memory to back the range with.
546 * @param cb The size of the range, page aligned.
547 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
548 * @param pu2State Where to store the state for the new page, optional.
549 */
550DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
551{
552 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
553
554 Assert(fPageProt != NEM_PAGE_PROT_NONE);
555
556 hv_memory_flags_t fHvMemProt = 0;
557 if (fPageProt & NEM_PAGE_PROT_READ)
558 fHvMemProt |= HV_MEMORY_READ;
559 if (fPageProt & NEM_PAGE_PROT_WRITE)
560 fHvMemProt |= HV_MEMORY_WRITE;
561 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
562 fHvMemProt |= HV_MEMORY_EXEC;
563
564 hv_return_t hrc;
565 if (pVM->nem.s.fCreatedAsid)
566 hrc = hv_vm_map_space(pVM->nem.s.uVmAsid, pvRam, GCPhys, cb, fHvMemProt);
567 else
568 hrc = hv_vm_map(pvRam, GCPhys, cb, fHvMemProt);
569 if (hrc == HV_SUCCESS)
570 {
571 if (pu2State)
572 *pu2State = (fPageProt & NEM_PAGE_PROT_WRITE)
573 ? NEM_DARWIN_PAGE_STATE_WRITABLE
574 : NEM_DARWIN_PAGE_STATE_READABLE;
575 return VINF_SUCCESS;
576 }
577
578 return nemR3DarwinHvSts2Rc(hrc);
579}
580
581#if 0 /* unused */
582DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
583{
584 hv_memory_flags_t fHvMemProt = 0;
585 if (fPageProt & NEM_PAGE_PROT_READ)
586 fHvMemProt |= HV_MEMORY_READ;
587 if (fPageProt & NEM_PAGE_PROT_WRITE)
588 fHvMemProt |= HV_MEMORY_WRITE;
589 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
590 fHvMemProt |= HV_MEMORY_EXEC;
591
592 hv_return_t hrc;
593 if (pVM->nem.s.fCreatedAsid)
594 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
595 else
596 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
597
598 return nemR3DarwinHvSts2Rc(hrc);
599}
600#endif
601
602DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
603{
604 PGMPAGEMAPLOCK Lock;
605 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
606 if (RT_SUCCESS(rc))
607 PGMPhysReleasePageMappingLock(pVM, &Lock);
608 return rc;
609}
610
611
612DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
613{
614 PGMPAGEMAPLOCK Lock;
615 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
616 if (RT_SUCCESS(rc))
617 PGMPhysReleasePageMappingLock(pVM, &Lock);
618 return rc;
619}
620
621
622#ifdef LOG_ENABLED
623/**
624 * Logs the current CPU state.
625 */
626static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
627{
628 if (LogIs3Enabled())
629 {
630#if 0
631 char szRegs[4096];
632 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
633 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
634 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
635 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
636 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
637 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
638 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
639 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
640 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
641 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
642 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
643 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
644 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
645 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
646 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
647 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
648 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
649 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
650 " efer=%016VR{efer}\n"
651 " pat=%016VR{pat}\n"
652 " sf_mask=%016VR{sf_mask}\n"
653 "krnl_gs_base=%016VR{krnl_gs_base}\n"
654 " lstar=%016VR{lstar}\n"
655 " star=%016VR{star} cstar=%016VR{cstar}\n"
656 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
657 );
658
659 char szInstr[256];
660 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
661 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
662 szInstr, sizeof(szInstr), NULL);
663 Log3(("%s%s\n", szRegs, szInstr));
664#else
665 RT_NOREF(pVM, pVCpu);
666#endif
667 }
668}
669#endif /* LOG_ENABLED */
670
671
672DECLINLINE(int) nemR3DarwinReadVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t *pData)
673{
674 uint64_t u64Data;
675 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
676 if (RT_LIKELY(hrc == HV_SUCCESS))
677 {
678 *pData = (uint16_t)u64Data;
679 return VINF_SUCCESS;
680 }
681
682 return nemR3DarwinHvSts2Rc(hrc);
683}
684
685
686DECLINLINE(int) nemR3DarwinReadVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t *pData)
687{
688 uint64_t u64Data;
689 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, &u64Data);
690 if (RT_LIKELY(hrc == HV_SUCCESS))
691 {
692 *pData = (uint32_t)u64Data;
693 return VINF_SUCCESS;
694 }
695
696 return nemR3DarwinHvSts2Rc(hrc);
697}
698
699
700DECLINLINE(int) nemR3DarwinReadVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t *pData)
701{
702 hv_return_t hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, pData);
703 if (RT_LIKELY(hrc == HV_SUCCESS))
704 return VINF_SUCCESS;
705
706 return nemR3DarwinHvSts2Rc(hrc);
707}
708
709
710DECLINLINE(int) nemR3DarwinWriteVmcs16(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint16_t u16Val)
711{
712 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u16Val);
713 if (RT_LIKELY(hrc == HV_SUCCESS))
714 return VINF_SUCCESS;
715
716 return nemR3DarwinHvSts2Rc(hrc);
717}
718
719
720DECLINLINE(int) nemR3DarwinWriteVmcs32(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint32_t u32Val)
721{
722 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u32Val);
723 if (RT_LIKELY(hrc == HV_SUCCESS))
724 return VINF_SUCCESS;
725
726 return nemR3DarwinHvSts2Rc(hrc);
727}
728
729
730DECLINLINE(int) nemR3DarwinWriteVmcs64(PVMCPUCC pVCpu, uint32_t uFieldEnc, uint64_t u64Val)
731{
732 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, uFieldEnc, u64Val);
733 if (RT_LIKELY(hrc == HV_SUCCESS))
734 return VINF_SUCCESS;
735
736 return nemR3DarwinHvSts2Rc(hrc);
737}
738
739DECLINLINE(int) nemR3DarwinMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val)
740{
741 hv_return_t hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, idMsr, pu64Val);
742 if (RT_LIKELY(hrc == HV_SUCCESS))
743 return VINF_SUCCESS;
744
745 return nemR3DarwinHvSts2Rc(hrc);
746}
747
748#if 0 /*unused*/
749DECLINLINE(int) nemR3DarwinMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Val)
750{
751 hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, idMsr, u64Val);
752 if (RT_LIKELY(hrc == HV_SUCCESS))
753 return VINF_SUCCESS;
754
755 return nemR3DarwinHvSts2Rc(hrc);
756}
757#endif
758
759static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
760{
761#define READ_GREG(a_GReg, a_Value) \
762 do \
763 { \
764 hrc = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, (a_GReg), &(a_Value)); \
765 if (RT_LIKELY(hrc == HV_SUCCESS)) \
766 { /* likely */ } \
767 else \
768 return VERR_INTERNAL_ERROR; \
769 } while(0)
770#define READ_VMCS_FIELD(a_Field, a_Value) \
771 do \
772 { \
773 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &(a_Value)); \
774 if (RT_LIKELY(hrc == HV_SUCCESS)) \
775 { /* likely */ } \
776 else \
777 return VERR_INTERNAL_ERROR; \
778 } while(0)
779#define READ_VMCS16_FIELD(a_Field, a_Value) \
780 do \
781 { \
782 uint64_t u64Data; \
783 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
784 if (RT_LIKELY(hrc == HV_SUCCESS)) \
785 { (a_Value) = (uint16_t)u64Data; } \
786 else \
787 return VERR_INTERNAL_ERROR; \
788 } while(0)
789#define READ_VMCS32_FIELD(a_Field, a_Value) \
790 do \
791 { \
792 uint64_t u64Data; \
793 hrc = hv_vmx_vcpu_read_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), &u64Data); \
794 if (RT_LIKELY(hrc == HV_SUCCESS)) \
795 { (a_Value) = (uint32_t)u64Data; } \
796 else \
797 return VERR_INTERNAL_ERROR; \
798 } while(0)
799#define READ_MSR(a_Msr, a_Value) \
800 do \
801 { \
802 hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, (a_Msr), &(a_Value)); \
803 if (RT_LIKELY(hrc == HV_SUCCESS)) \
804 { /* likely */ } \
805 else \
806 AssertFailedReturn(VERR_INTERNAL_ERROR); \
807 } while(0)
808
809 STAM_PROFILE_ADV_START(&pVCpu->nem.s.StatProfGstStateImport, x);
810
811 RT_NOREF(pVM);
812 fWhat &= pVCpu->cpum.GstCtx.fExtrn;
813
814 if (fWhat & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI))
815 vmxHCImportGuestIntrState(pVCpu, &pVCpu->nem.s.VmcsInfo);
816
817 /* GPRs */
818 hv_return_t hrc;
819 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
820 {
821 if (fWhat & CPUMCTX_EXTRN_RAX)
822 READ_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
823 if (fWhat & CPUMCTX_EXTRN_RCX)
824 READ_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
825 if (fWhat & CPUMCTX_EXTRN_RDX)
826 READ_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
827 if (fWhat & CPUMCTX_EXTRN_RBX)
828 READ_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
829 if (fWhat & CPUMCTX_EXTRN_RSP)
830 READ_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
831 if (fWhat & CPUMCTX_EXTRN_RBP)
832 READ_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
833 if (fWhat & CPUMCTX_EXTRN_RSI)
834 READ_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
835 if (fWhat & CPUMCTX_EXTRN_RDI)
836 READ_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
837 if (fWhat & CPUMCTX_EXTRN_R8_R15)
838 {
839 READ_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
840 READ_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
841 READ_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
842 READ_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
843 READ_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
844 READ_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
845 READ_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
846 READ_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
847 }
848 }
849
850 /* RIP & Flags */
851 if (fWhat & CPUMCTX_EXTRN_RIP)
852 READ_GREG(HV_X86_RIP, pVCpu->cpum.GstCtx.rip);
853 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
854 {
855 uint64_t fRFlagsTmp = 0;
856 READ_GREG(HV_X86_RFLAGS, fRFlagsTmp);
857 pVCpu->cpum.GstCtx.rflags.u = fRFlagsTmp;
858 }
859
860 /* Segments */
861#define READ_SEG(a_SReg, a_enmName) \
862 do { \
863 READ_VMCS16_FIELD(VMX_VMCS16_GUEST_ ## a_enmName ## _SEL, (a_SReg).Sel); \
864 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _LIMIT, (a_SReg).u32Limit); \
865 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _ACCESS_RIGHTS, (a_SReg).Attr.u); \
866 READ_VMCS_FIELD(VMX_VMCS_GUEST_ ## a_enmName ## _BASE, (a_SReg).u64Base); \
867 (a_SReg).ValidSel = (a_SReg).Sel; \
868 } while (0)
869 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
870 {
871 if (fWhat & CPUMCTX_EXTRN_ES)
872 READ_SEG(pVCpu->cpum.GstCtx.es, ES);
873 if (fWhat & CPUMCTX_EXTRN_CS)
874 READ_SEG(pVCpu->cpum.GstCtx.cs, CS);
875 if (fWhat & CPUMCTX_EXTRN_SS)
876 READ_SEG(pVCpu->cpum.GstCtx.ss, SS);
877 if (fWhat & CPUMCTX_EXTRN_DS)
878 READ_SEG(pVCpu->cpum.GstCtx.ds, DS);
879 if (fWhat & CPUMCTX_EXTRN_FS)
880 READ_SEG(pVCpu->cpum.GstCtx.fs, FS);
881 if (fWhat & CPUMCTX_EXTRN_GS)
882 READ_SEG(pVCpu->cpum.GstCtx.gs, GS);
883 }
884
885 /* Descriptor tables and the task segment. */
886 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
887 {
888 if (fWhat & CPUMCTX_EXTRN_LDTR)
889 READ_SEG(pVCpu->cpum.GstCtx.ldtr, LDTR);
890
891 if (fWhat & CPUMCTX_EXTRN_TR)
892 {
893 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
894 avoid to trigger sanity assertions around the code, always fix this. */
895 READ_SEG(pVCpu->cpum.GstCtx.tr, TR);
896 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
897 {
898 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
899 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
900 break;
901 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
902 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
903 break;
904 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
905 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
906 break;
907 }
908 }
909 if (fWhat & CPUMCTX_EXTRN_IDTR)
910 {
911 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_IDTR_LIMIT, pVCpu->cpum.GstCtx.idtr.cbIdt);
912 READ_VMCS_FIELD(VMX_VMCS_GUEST_IDTR_BASE, pVCpu->cpum.GstCtx.idtr.pIdt);
913 }
914 if (fWhat & CPUMCTX_EXTRN_GDTR)
915 {
916 READ_VMCS32_FIELD(VMX_VMCS32_GUEST_GDTR_LIMIT, pVCpu->cpum.GstCtx.gdtr.cbGdt);
917 READ_VMCS_FIELD(VMX_VMCS_GUEST_GDTR_BASE, pVCpu->cpum.GstCtx.gdtr.pGdt);
918 }
919 }
920
921 /* Control registers. */
922 bool fMaybeChangedMode = false;
923 bool fUpdateCr3 = false;
924 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
925 {
926 uint64_t u64CrTmp = 0;
927
928 if (fWhat & CPUMCTX_EXTRN_CR0)
929 {
930 READ_GREG(HV_X86_CR0, u64CrTmp);
931 if (pVCpu->cpum.GstCtx.cr0 != u64CrTmp)
932 {
933 CPUMSetGuestCR0(pVCpu, u64CrTmp);
934 fMaybeChangedMode = true;
935 }
936 }
937 if (fWhat & CPUMCTX_EXTRN_CR2)
938 READ_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
939 if (fWhat & CPUMCTX_EXTRN_CR3)
940 {
941 READ_GREG(HV_X86_CR3, u64CrTmp);
942 if (pVCpu->cpum.GstCtx.cr3 != u64CrTmp)
943 {
944 CPUMSetGuestCR3(pVCpu, u64CrTmp);
945 fUpdateCr3 = true;
946 }
947
948 /*
949 * If the guest is in PAE mode, sync back the PDPE's into the guest state.
950 * CR4.PAE, CR0.PG, EFER MSR changes are always intercepted, so they're up to date.
951 */
952 if (CPUMIsGuestInPAEModeEx(&pVCpu->cpum.GstCtx))
953 {
954 X86PDPE aPaePdpes[4];
955 READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE0_FULL, aPaePdpes[0].u);
956 READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE1_FULL, aPaePdpes[1].u);
957 READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE2_FULL, aPaePdpes[2].u);
958 READ_VMCS_FIELD(VMX_VMCS64_GUEST_PDPTE3_FULL, aPaePdpes[3].u);
959 if (memcmp(&aPaePdpes[0], &pVCpu->cpum.GstCtx.aPaePdpes[0], sizeof(aPaePdpes)))
960 {
961 memcpy(&pVCpu->cpum.GstCtx.aPaePdpes[0], &aPaePdpes[0], sizeof(aPaePdpes));
962 fUpdateCr3 = true;
963 }
964 }
965 }
966 if (fWhat & CPUMCTX_EXTRN_CR4)
967 {
968 READ_GREG(HV_X86_CR4, u64CrTmp);
969 u64CrTmp &= ~VMX_V_CR4_FIXED0;
970
971 if (pVCpu->cpum.GstCtx.cr4 != u64CrTmp)
972 {
973 CPUMSetGuestCR4(pVCpu, u64CrTmp);
974 fMaybeChangedMode = true;
975 }
976 }
977 }
978
979#if 0 /* Always done. */
980 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
981 {
982 uint64_t u64Cr8 = 0;
983
984 READ_GREG(HV_X86_TPR, u64Cr8);
985 APICSetTpr(pVCpu, u64Cr8 << 4);
986 }
987#endif
988
989 if (fWhat & CPUMCTX_EXTRN_XCRx)
990 READ_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
991
992 /* Debug registers. */
993 if (fWhat & CPUMCTX_EXTRN_DR7)
994 {
995 uint64_t u64Dr7;
996 READ_GREG(HV_X86_DR7, u64Dr7);
997 if (pVCpu->cpum.GstCtx.dr[7] != u64Dr7)
998 CPUMSetGuestDR7(pVCpu, u64Dr7);
999 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_DR7; /* Hack alert! Avoids asserting when processing CPUMCTX_EXTRN_DR0_DR3. */
1000 }
1001 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1002 {
1003 uint64_t u64DrTmp;
1004
1005 READ_GREG(HV_X86_DR0, u64DrTmp);
1006 if (pVCpu->cpum.GstCtx.dr[0] != u64DrTmp)
1007 CPUMSetGuestDR0(pVCpu, u64DrTmp);
1008 READ_GREG(HV_X86_DR1, u64DrTmp);
1009 if (pVCpu->cpum.GstCtx.dr[1] != u64DrTmp)
1010 CPUMSetGuestDR1(pVCpu, u64DrTmp);
1011 READ_GREG(HV_X86_DR2, u64DrTmp);
1012 if (pVCpu->cpum.GstCtx.dr[2] != u64DrTmp)
1013 CPUMSetGuestDR2(pVCpu, u64DrTmp);
1014 READ_GREG(HV_X86_DR3, u64DrTmp);
1015 if (pVCpu->cpum.GstCtx.dr[3] != u64DrTmp)
1016 CPUMSetGuestDR3(pVCpu, u64DrTmp);
1017 }
1018 if (fWhat & CPUMCTX_EXTRN_DR6)
1019 {
1020 uint64_t u64Dr6;
1021 READ_GREG(HV_X86_DR6, u64Dr6);
1022 if (pVCpu->cpum.GstCtx.dr[6] != u64Dr6)
1023 CPUMSetGuestDR6(pVCpu, u64Dr6);
1024 }
1025
1026 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
1027 {
1028 hrc = hv_vcpu_read_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
1029 if (hrc == HV_SUCCESS)
1030 { /* likely */ }
1031 else
1032 {
1033 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
1034 return nemR3DarwinHvSts2Rc(hrc);
1035 }
1036 }
1037
1038 /* MSRs */
1039 if (fWhat & CPUMCTX_EXTRN_EFER)
1040 {
1041 uint64_t u64Efer;
1042
1043 READ_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, u64Efer);
1044 if (u64Efer != pVCpu->cpum.GstCtx.msrEFER)
1045 {
1046 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.msrEFER, u64Efer));
1047 if ((u64Efer ^ pVCpu->cpum.GstCtx.msrEFER) & MSR_K6_EFER_NXE)
1048 PGMNotifyNxeChanged(pVCpu, RT_BOOL(u64Efer & MSR_K6_EFER_NXE));
1049 pVCpu->cpum.GstCtx.msrEFER = u64Efer;
1050 fMaybeChangedMode = true;
1051 }
1052 }
1053
1054 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1055 READ_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
1056 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1057 {
1058 uint64_t u64Tmp;
1059 READ_MSR(MSR_IA32_SYSENTER_EIP, u64Tmp);
1060 pVCpu->cpum.GstCtx.SysEnter.eip = u64Tmp;
1061 READ_MSR(MSR_IA32_SYSENTER_ESP, u64Tmp);
1062 pVCpu->cpum.GstCtx.SysEnter.esp = u64Tmp;
1063 READ_MSR(MSR_IA32_SYSENTER_CS, u64Tmp);
1064 pVCpu->cpum.GstCtx.SysEnter.cs = u64Tmp;
1065 }
1066 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1067 {
1068 READ_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR);
1069 READ_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR);
1070 READ_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
1071 READ_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
1072 }
1073 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
1074 {
1075 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1076 READ_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux);
1077 }
1078 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1079 {
1080 /* Last Branch Record. */
1081 if (pVM->nem.s.fLbr)
1082 {
1083 PVMXVMCSINFOSHARED const pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo;
1084 uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst;
1085 uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst;
1086 uint32_t const idInfoMsrStart = pVM->nem.s.idLbrInfoMsrFirst;
1087 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
1088 Assert(cLbrStack <= 32);
1089 for (uint32_t i = 0; i < cLbrStack; i++)
1090 {
1091 READ_MSR(idFromIpMsrStart + i, pVmcsInfoShared->au64LbrFromIpMsr[i]);
1092
1093 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
1094 if (idToIpMsrStart != 0)
1095 READ_MSR(idToIpMsrStart + i, pVmcsInfoShared->au64LbrToIpMsr[i]);
1096 if (idInfoMsrStart != 0)
1097 READ_MSR(idInfoMsrStart + i, pVmcsInfoShared->au64LbrInfoMsr[i]);
1098 }
1099
1100 READ_MSR(pVM->nem.s.idLbrTosMsr, pVmcsInfoShared->u64LbrTosMsr);
1101
1102 if (pVM->nem.s.idLerFromIpMsr)
1103 READ_MSR(pVM->nem.s.idLerFromIpMsr, pVmcsInfoShared->u64LerFromIpMsr);
1104 if (pVM->nem.s.idLerToIpMsr)
1105 READ_MSR(pVM->nem.s.idLerToIpMsr, pVmcsInfoShared->u64LerToIpMsr);
1106 }
1107 }
1108
1109 /* Almost done, just update extrn flags and maybe change PGM mode. */
1110 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
1111 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1112 pVCpu->cpum.GstCtx.fExtrn = 0;
1113
1114#ifdef LOG_ENABLED
1115 nemR3DarwinLogState(pVM, pVCpu);
1116#endif
1117
1118 /* Typical. */
1119 if (!fMaybeChangedMode && !fUpdateCr3)
1120 {
1121 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
1122 return VINF_SUCCESS;
1123 }
1124
1125 /*
1126 * Slow.
1127 */
1128 if (fMaybeChangedMode)
1129 {
1130 int rc = PGMChangeMode(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr4, pVCpu->cpum.GstCtx.msrEFER,
1131 false /* fForce */);
1132 AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_1);
1133 }
1134
1135 if (fUpdateCr3)
1136 {
1137 int rc = PGMUpdateCR3(pVCpu, pVCpu->cpum.GstCtx.cr3);
1138 if (rc == VINF_SUCCESS)
1139 { /* likely */ }
1140 else
1141 AssertMsgFailedReturn(("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_NEM_IPE_2);
1142 }
1143
1144 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateImport, x);
1145
1146 return VINF_SUCCESS;
1147#undef READ_GREG
1148#undef READ_VMCS_FIELD
1149#undef READ_VMCS32_FIELD
1150#undef READ_SEG
1151#undef READ_MSR
1152}
1153
1154
1155/**
1156 * State to pass between vmxHCExitEptViolation
1157 * and nemR3DarwinHandleMemoryAccessPageCheckerCallback.
1158 */
1159typedef struct NEMHCDARWINHMACPCCSTATE
1160{
1161 /** Input: Write access. */
1162 bool fWriteAccess;
1163 /** Output: Set if we did something. */
1164 bool fDidSomething;
1165 /** Output: Set it we should resume. */
1166 bool fCanResume;
1167} NEMHCDARWINHMACPCCSTATE;
1168
1169/**
1170 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1171 * Worker for vmxHCExitEptViolation; pvUser points to a
1172 * NEMHCDARWINHMACPCCSTATE structure. }
1173 */
1174static DECLCALLBACK(int)
1175nemR3DarwinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1176{
1177 RT_NOREF(pVCpu);
1178
1179 NEMHCDARWINHMACPCCSTATE *pState = (NEMHCDARWINHMACPCCSTATE *)pvUser;
1180 pState->fDidSomething = false;
1181 pState->fCanResume = false;
1182
1183 uint8_t u2State = pInfo->u2NemState;
1184
1185 /*
1186 * Consolidate current page state with actual page protection and access type.
1187 * We don't really consider downgrades here, as they shouldn't happen.
1188 */
1189 switch (u2State)
1190 {
1191 case NEM_DARWIN_PAGE_STATE_UNMAPPED:
1192 case NEM_DARWIN_PAGE_STATE_NOT_SET:
1193 {
1194 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1195 {
1196 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1197 return VINF_SUCCESS;
1198 }
1199
1200 /* Don't bother remapping it if it's a write request to a non-writable page. */
1201 if ( pState->fWriteAccess
1202 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1203 {
1204 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1205 return VINF_SUCCESS;
1206 }
1207
1208 int rc = VINF_SUCCESS;
1209 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1210 {
1211 void *pvPage;
1212 rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhys, &pvPage);
1213 if (RT_SUCCESS(rc))
1214 rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
1215 }
1216 else if (pInfo->fNemProt & NEM_PAGE_PROT_READ)
1217 {
1218 const void *pvPage;
1219 rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
1220 if (RT_SUCCESS(rc))
1221 rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
1222 }
1223 else /* Only EXECUTE doesn't work. */
1224 AssertReleaseFailed();
1225
1226 pInfo->u2NemState = u2State;
1227 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1228 GCPhys, g_apszPageStates[u2State], rc));
1229 pState->fDidSomething = true;
1230 pState->fCanResume = true;
1231 return rc;
1232 }
1233 case NEM_DARWIN_PAGE_STATE_READABLE:
1234 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1235 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1236 {
1237 pState->fCanResume = true;
1238 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1239 return VINF_SUCCESS;
1240 }
1241 break;
1242
1243 case NEM_DARWIN_PAGE_STATE_WRITABLE:
1244 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1245 {
1246 pState->fCanResume = true;
1247 if (pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_WRITABLE)
1248 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: Spurious EPT fault\n", GCPhys));
1249 return VINF_SUCCESS;
1250 }
1251 break;
1252
1253 default:
1254 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1255 }
1256
1257 /* Unmap and restart the instruction. */
1258 int rc = nemR3DarwinUnmap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE, &u2State);
1259 if (RT_SUCCESS(rc))
1260 {
1261 pInfo->u2NemState = u2State;
1262 pState->fDidSomething = true;
1263 pState->fCanResume = true;
1264 Log5(("NEM GPA unmapped/exit: %RGp (was %s)\n", GCPhys, g_apszPageStates[u2State]));
1265 return VINF_SUCCESS;
1266 }
1267
1268 LogRel(("nemR3DarwinHandleMemoryAccessPageCheckerCallback/unmap: GCPhys=%RGp %s rc=%Rrc\n",
1269 GCPhys, g_apszPageStates[u2State], rc));
1270 return VERR_NEM_UNMAP_PAGES_FAILED;
1271}
1272
1273
1274DECL_FORCE_INLINE(bool) nemR3DarwinIsUnrestrictedGuest(PCVMCC pVM)
1275{
1276 RT_NOREF(pVM);
1277 return true;
1278}
1279
1280
1281DECL_FORCE_INLINE(bool) nemR3DarwinIsNestedPaging(PCVMCC pVM)
1282{
1283 RT_NOREF(pVM);
1284 return true;
1285}
1286
1287
1288DECL_FORCE_INLINE(bool) nemR3DarwinIsPreemptTimerUsed(PCVMCC pVM)
1289{
1290 RT_NOREF(pVM);
1291 return false;
1292}
1293
1294
1295#if 0 /* unused */
1296DECL_FORCE_INLINE(bool) nemR3DarwinIsVmxLbr(PCVMCC pVM)
1297{
1298 RT_NOREF(pVM);
1299 return false;
1300}
1301#endif
1302
1303
1304/*
1305 * Instantiate the code we share with ring-0.
1306 */
1307#define IN_NEM_DARWIN
1308//#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
1309//#define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE
1310//#define HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
1311#define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s
1312#define VCPU_2_VMXSTATS(a_pVCpu) (*(a_pVCpu)->nem.s.pVmxStats)
1313
1314#define VM_IS_VMX_UNRESTRICTED_GUEST(a_pVM) nemR3DarwinIsUnrestrictedGuest((a_pVM))
1315#define VM_IS_VMX_NESTED_PAGING(a_pVM) nemR3DarwinIsNestedPaging((a_pVM))
1316#define VM_IS_VMX_PREEMPT_TIMER_USED(a_pVM) nemR3DarwinIsPreemptTimerUsed((a_pVM))
1317#define VM_IS_VMX_LBR(a_pVM) nemR3DarwinIsVmxLbr((a_pVM))
1318
1319#define VMX_VMCS_WRITE_16(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs16((a_pVCpu), (a_FieldEnc), (a_Val))
1320#define VMX_VMCS_WRITE_32(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs32((a_pVCpu), (a_FieldEnc), (a_Val))
1321#define VMX_VMCS_WRITE_64(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
1322#define VMX_VMCS_WRITE_NW(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs64((a_pVCpu), (a_FieldEnc), (a_Val))
1323
1324#define VMX_VMCS_READ_16(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs16((a_pVCpu), (a_FieldEnc), (a_pVal))
1325#define VMX_VMCS_READ_32(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs32((a_pVCpu), (a_FieldEnc), (a_pVal))
1326#define VMX_VMCS_READ_64(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
1327#define VMX_VMCS_READ_NW(a_pVCpu, a_FieldEnc, a_pVal) nemR3DarwinReadVmcs64((a_pVCpu), (a_FieldEnc), (a_pVal))
1328
1329#include "../VMMAll/VMXAllTemplate.cpp.h"
1330
1331#undef VMX_VMCS_WRITE_16
1332#undef VMX_VMCS_WRITE_32
1333#undef VMX_VMCS_WRITE_64
1334#undef VMX_VMCS_WRITE_NW
1335
1336#undef VMX_VMCS_READ_16
1337#undef VMX_VMCS_READ_32
1338#undef VMX_VMCS_READ_64
1339#undef VMX_VMCS_READ_NW
1340
1341#undef VM_IS_VMX_PREEMPT_TIMER_USED
1342#undef VM_IS_VMX_NESTED_PAGING
1343#undef VM_IS_VMX_UNRESTRICTED_GUEST
1344#undef VCPU_2_VMXSTATS
1345#undef VCPU_2_VMXSTATE
1346
1347
1348/**
1349 * Exports the guest GP registers to HV for execution.
1350 *
1351 * @returns VBox status code.
1352 * @param pVCpu The cross context virtual CPU structure of the
1353 * calling EMT.
1354 */
1355static int nemR3DarwinExportGuestGprs(PVMCPUCC pVCpu)
1356{
1357#define WRITE_GREG(a_GReg, a_Value) \
1358 do \
1359 { \
1360 hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
1361 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1362 { /* likely */ } \
1363 else \
1364 return VERR_INTERNAL_ERROR; \
1365 } while(0)
1366
1367 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->nem.s.fCtxChanged);
1368 if (fCtxChanged & HM_CHANGED_GUEST_GPRS_MASK)
1369 {
1370 if (fCtxChanged & HM_CHANGED_GUEST_RAX)
1371 WRITE_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax);
1372 if (fCtxChanged & HM_CHANGED_GUEST_RCX)
1373 WRITE_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx);
1374 if (fCtxChanged & HM_CHANGED_GUEST_RDX)
1375 WRITE_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx);
1376 if (fCtxChanged & HM_CHANGED_GUEST_RBX)
1377 WRITE_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx);
1378 if (fCtxChanged & HM_CHANGED_GUEST_RSP)
1379 WRITE_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp);
1380 if (fCtxChanged & HM_CHANGED_GUEST_RBP)
1381 WRITE_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp);
1382 if (fCtxChanged & HM_CHANGED_GUEST_RSI)
1383 WRITE_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi);
1384 if (fCtxChanged & HM_CHANGED_GUEST_RDI)
1385 WRITE_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi);
1386 if (fCtxChanged & HM_CHANGED_GUEST_R8_R15)
1387 {
1388 WRITE_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8);
1389 WRITE_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9);
1390 WRITE_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10);
1391 WRITE_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11);
1392 WRITE_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12);
1393 WRITE_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13);
1394 WRITE_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14);
1395 WRITE_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15);
1396 }
1397
1398 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_GPRS_MASK);
1399 }
1400
1401 if (fCtxChanged & HM_CHANGED_GUEST_CR2)
1402 {
1403 WRITE_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2);
1404 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_CR2);
1405 }
1406
1407 return VINF_SUCCESS;
1408#undef WRITE_GREG
1409}
1410
1411
1412/**
1413 * Exports the guest debug registers into the guest-state applying any hypervisor
1414 * debug related states (hardware breakpoints from the debugger, etc.).
1415 *
1416 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits.
1417 *
1418 * @returns VBox status code.
1419 * @param pVCpu The cross context virtual CPU structure.
1420 * @param pVmxTransient The VMX-transient structure.
1421 */
1422static int nemR3DarwinExportDebugState(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1423{
1424 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;
1425
1426#ifdef VBOX_STRICT
1427 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
1428 if (pVmcsInfo->u32EntryCtls & VMX_ENTRY_CTLS_LOAD_DEBUG)
1429 {
1430 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
1431 Assert((pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0);
1432 Assert((pVCpu->cpum.GstCtx.dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK);
1433 }
1434#endif
1435
1436 bool fSteppingDB = false;
1437 bool fInterceptMovDRx = false;
1438 uint32_t uProcCtls = pVmcsInfo->u32ProcCtls;
1439 if (pVCpu->nem.s.fSingleInstruction)
1440 {
1441 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
1442 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MONITOR_TRAP_FLAG)
1443 {
1444 uProcCtls |= VMX_PROC_CTLS_MONITOR_TRAP_FLAG;
1445 Assert(fSteppingDB == false);
1446 }
1447 else
1448 {
1449 pVCpu->cpum.GstCtx.eflags.u |= X86_EFL_TF;
1450 pVCpu->nem.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS;
1451 pVCpu->nem.s.fClearTrapFlag = true;
1452 fSteppingDB = true;
1453 }
1454 }
1455
1456 uint64_t u64GuestDr7;
1457 if ( fSteppingDB
1458 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
1459 {
1460 /*
1461 * Use the combined guest and host DRx values found in the hypervisor register set
1462 * because the hypervisor debugger has breakpoints active or someone is single stepping
1463 * on the host side without a monitor trap flag.
1464 *
1465 * Note! DBGF expects a clean DR6 state before executing guest code.
1466 */
1467 if (!CPUMIsHyperDebugStateActive(pVCpu))
1468 {
1469 /*
1470 * Make sure the hypervisor values are up to date.
1471 */
1472 CPUMRecalcHyperDRx(pVCpu, UINT8_MAX /* no loading, please */);
1473
1474 CPUMR3NemActivateHyperDebugState(pVCpu);
1475
1476 Assert(CPUMIsHyperDebugStateActive(pVCpu));
1477 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
1478 }
1479
1480 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */
1481 u64GuestDr7 = CPUMGetHyperDR7(pVCpu);
1482 pVCpu->nem.s.fUsingHyperDR7 = true;
1483 fInterceptMovDRx = true;
1484 }
1485 else
1486 {
1487 /*
1488 * If the guest has enabled debug registers, we need to load them prior to
1489 * executing guest code so they'll trigger at the right time.
1490 */
1491 HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1492 if (pVCpu->cpum.GstCtx.dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
1493 {
1494 if (!CPUMIsGuestDebugStateActive(pVCpu))
1495 {
1496 CPUMR3NemActivateGuestDebugState(pVCpu);
1497
1498 Assert(CPUMIsGuestDebugStateActive(pVCpu));
1499 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
1500 }
1501 Assert(!fInterceptMovDRx);
1502 }
1503 else if (!CPUMIsGuestDebugStateActive(pVCpu))
1504 {
1505 /*
1506 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
1507 * must intercept #DB in order to maintain a correct DR6 guest value, and
1508 * because we need to intercept it to prevent nested #DBs from hanging the
1509 * CPU, we end up always having to intercept it. See hmR0VmxSetupVmcsXcptBitmap().
1510 */
1511 fInterceptMovDRx = true;
1512 }
1513
1514 /* Update DR7 with the actual guest value. */
1515 u64GuestDr7 = pVCpu->cpum.GstCtx.dr[7];
1516 pVCpu->nem.s.fUsingHyperDR7 = false;
1517 }
1518
1519 if (fInterceptMovDRx)
1520 uProcCtls |= VMX_PROC_CTLS_MOV_DR_EXIT;
1521 else
1522 uProcCtls &= ~VMX_PROC_CTLS_MOV_DR_EXIT;
1523
1524 /*
1525 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
1526 * monitor-trap flag and update our cache.
1527 */
1528 if (uProcCtls != pVmcsInfo->u32ProcCtls)
1529 {
1530 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls);
1531 AssertRC(rc);
1532 pVmcsInfo->u32ProcCtls = uProcCtls;
1533 }
1534
1535 /*
1536 * If we have forced EFLAGS.TF to be set because we're single-stepping in the hypervisor debugger,
1537 * we need to clear interrupt inhibition if any as otherwise it causes a VM-entry failure.
1538 *
1539 * See Intel spec. 26.3.1.5 "Checks on Guest Non-Register State".
1540 */
1541 if (fSteppingDB)
1542 {
1543 Assert(pVCpu->nem.s.fSingleInstruction);
1544 Assert(pVCpu->cpum.GstCtx.eflags.Bits.u1TF);
1545
1546 uint32_t fIntrState = 0;
1547 int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, &fIntrState);
1548 AssertRC(rc);
1549
1550 if (fIntrState & (VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS))
1551 {
1552 fIntrState &= ~(VMX_VMCS_GUEST_INT_STATE_BLOCK_STI | VMX_VMCS_GUEST_INT_STATE_BLOCK_MOVSS);
1553 rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_GUEST_INT_STATE, fIntrState);
1554 AssertRC(rc);
1555 }
1556 }
1557
1558 /*
1559 * Store status of the shared guest/host debug state at the time of VM-entry.
1560 */
1561 pVmxTransient->fWasGuestDebugStateActive = CPUMIsGuestDebugStateActive(pVCpu);
1562 pVmxTransient->fWasHyperDebugStateActive = CPUMIsHyperDebugStateActive(pVCpu);
1563
1564 return VINF_SUCCESS;
1565}
1566
1567
1568/**
1569 * Converts the given CPUM externalized bitmask to the appropriate HM changed bitmask.
1570 *
1571 * @returns Bitmask of HM changed flags.
1572 * @param fCpumExtrn The CPUM extern bitmask.
1573 */
1574static uint64_t nemR3DarwinCpumExtrnToHmChanged(uint64_t fCpumExtrn)
1575{
1576 uint64_t fHmChanged = 0;
1577
1578 /* Invert to gt a mask of things which are kept in CPUM. */
1579 uint64_t fCpumIntern = ~fCpumExtrn;
1580
1581 if (fCpumIntern & CPUMCTX_EXTRN_GPRS_MASK)
1582 {
1583 if (fCpumIntern & CPUMCTX_EXTRN_RAX)
1584 fHmChanged |= HM_CHANGED_GUEST_RAX;
1585 if (fCpumIntern & CPUMCTX_EXTRN_RCX)
1586 fHmChanged |= HM_CHANGED_GUEST_RCX;
1587 if (fCpumIntern & CPUMCTX_EXTRN_RDX)
1588 fHmChanged |= HM_CHANGED_GUEST_RDX;
1589 if (fCpumIntern & CPUMCTX_EXTRN_RBX)
1590 fHmChanged |= HM_CHANGED_GUEST_RBX;
1591 if (fCpumIntern & CPUMCTX_EXTRN_RSP)
1592 fHmChanged |= HM_CHANGED_GUEST_RSP;
1593 if (fCpumIntern & CPUMCTX_EXTRN_RBP)
1594 fHmChanged |= HM_CHANGED_GUEST_RBP;
1595 if (fCpumIntern & CPUMCTX_EXTRN_RSI)
1596 fHmChanged |= HM_CHANGED_GUEST_RSI;
1597 if (fCpumIntern & CPUMCTX_EXTRN_RDI)
1598 fHmChanged |= HM_CHANGED_GUEST_RDI;
1599 if (fCpumIntern & CPUMCTX_EXTRN_R8_R15)
1600 fHmChanged |= HM_CHANGED_GUEST_R8_R15;
1601 }
1602
1603 /* RIP & Flags */
1604 if (fCpumIntern & CPUMCTX_EXTRN_RIP)
1605 fHmChanged |= HM_CHANGED_GUEST_RIP;
1606 if (fCpumIntern & CPUMCTX_EXTRN_RFLAGS)
1607 fHmChanged |= HM_CHANGED_GUEST_RFLAGS;
1608
1609 /* Segments */
1610 if (fCpumIntern & CPUMCTX_EXTRN_SREG_MASK)
1611 {
1612 if (fCpumIntern & CPUMCTX_EXTRN_ES)
1613 fHmChanged |= HM_CHANGED_GUEST_ES;
1614 if (fCpumIntern & CPUMCTX_EXTRN_CS)
1615 fHmChanged |= HM_CHANGED_GUEST_CS;
1616 if (fCpumIntern & CPUMCTX_EXTRN_SS)
1617 fHmChanged |= HM_CHANGED_GUEST_SS;
1618 if (fCpumIntern & CPUMCTX_EXTRN_DS)
1619 fHmChanged |= HM_CHANGED_GUEST_DS;
1620 if (fCpumIntern & CPUMCTX_EXTRN_FS)
1621 fHmChanged |= HM_CHANGED_GUEST_FS;
1622 if (fCpumIntern & CPUMCTX_EXTRN_GS)
1623 fHmChanged |= HM_CHANGED_GUEST_GS;
1624 }
1625
1626 /* Descriptor tables & task segment. */
1627 if (fCpumIntern & CPUMCTX_EXTRN_TABLE_MASK)
1628 {
1629 if (fCpumIntern & CPUMCTX_EXTRN_LDTR)
1630 fHmChanged |= HM_CHANGED_GUEST_LDTR;
1631 if (fCpumIntern & CPUMCTX_EXTRN_TR)
1632 fHmChanged |= HM_CHANGED_GUEST_TR;
1633 if (fCpumIntern & CPUMCTX_EXTRN_IDTR)
1634 fHmChanged |= HM_CHANGED_GUEST_IDTR;
1635 if (fCpumIntern & CPUMCTX_EXTRN_GDTR)
1636 fHmChanged |= HM_CHANGED_GUEST_GDTR;
1637 }
1638
1639 /* Control registers. */
1640 if (fCpumIntern & CPUMCTX_EXTRN_CR_MASK)
1641 {
1642 if (fCpumIntern & CPUMCTX_EXTRN_CR0)
1643 fHmChanged |= HM_CHANGED_GUEST_CR0;
1644 if (fCpumIntern & CPUMCTX_EXTRN_CR2)
1645 fHmChanged |= HM_CHANGED_GUEST_CR2;
1646 if (fCpumIntern & CPUMCTX_EXTRN_CR3)
1647 fHmChanged |= HM_CHANGED_GUEST_CR3;
1648 if (fCpumIntern & CPUMCTX_EXTRN_CR4)
1649 fHmChanged |= HM_CHANGED_GUEST_CR4;
1650 }
1651 if (fCpumIntern & CPUMCTX_EXTRN_APIC_TPR)
1652 fHmChanged |= HM_CHANGED_GUEST_APIC_TPR;
1653
1654 /* Debug registers. */
1655 if (fCpumIntern & CPUMCTX_EXTRN_DR0_DR3)
1656 fHmChanged |= HM_CHANGED_GUEST_DR0_DR3;
1657 if (fCpumIntern & CPUMCTX_EXTRN_DR6)
1658 fHmChanged |= HM_CHANGED_GUEST_DR6;
1659 if (fCpumIntern & CPUMCTX_EXTRN_DR7)
1660 fHmChanged |= HM_CHANGED_GUEST_DR7;
1661
1662 /* Floating point state. */
1663 if (fCpumIntern & CPUMCTX_EXTRN_X87)
1664 fHmChanged |= HM_CHANGED_GUEST_X87;
1665 if (fCpumIntern & CPUMCTX_EXTRN_SSE_AVX)
1666 fHmChanged |= HM_CHANGED_GUEST_SSE_AVX;
1667 if (fCpumIntern & CPUMCTX_EXTRN_OTHER_XSAVE)
1668 fHmChanged |= HM_CHANGED_GUEST_OTHER_XSAVE;
1669 if (fCpumIntern & CPUMCTX_EXTRN_XCRx)
1670 fHmChanged |= HM_CHANGED_GUEST_XCRx;
1671
1672 /* MSRs */
1673 if (fCpumIntern & CPUMCTX_EXTRN_EFER)
1674 fHmChanged |= HM_CHANGED_GUEST_EFER_MSR;
1675 if (fCpumIntern & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1676 fHmChanged |= HM_CHANGED_GUEST_KERNEL_GS_BASE;
1677 if (fCpumIntern & CPUMCTX_EXTRN_SYSENTER_MSRS)
1678 fHmChanged |= HM_CHANGED_GUEST_SYSENTER_MSR_MASK;
1679 if (fCpumIntern & CPUMCTX_EXTRN_SYSCALL_MSRS)
1680 fHmChanged |= HM_CHANGED_GUEST_SYSCALL_MSRS;
1681 if (fCpumIntern & CPUMCTX_EXTRN_TSC_AUX)
1682 fHmChanged |= HM_CHANGED_GUEST_TSC_AUX;
1683 if (fCpumIntern & CPUMCTX_EXTRN_OTHER_MSRS)
1684 fHmChanged |= HM_CHANGED_GUEST_OTHER_MSRS;
1685
1686 return fHmChanged;
1687}
1688
1689
1690/**
1691 * Exports the guest state to HV for execution.
1692 *
1693 * @returns VBox status code.
1694 * @param pVM The cross context VM structure.
1695 * @param pVCpu The cross context virtual CPU structure of the
1696 * calling EMT.
1697 * @param pVmxTransient The transient VMX structure.
1698 */
1699static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient)
1700{
1701#define WRITE_GREG(a_GReg, a_Value) \
1702 do \
1703 { \
1704 hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \
1705 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1706 { /* likely */ } \
1707 else \
1708 return VERR_INTERNAL_ERROR; \
1709 } while(0)
1710#define WRITE_VMCS_FIELD(a_Field, a_Value) \
1711 do \
1712 { \
1713 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), (a_Value)); \
1714 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1715 { /* likely */ } \
1716 else \
1717 return VERR_INTERNAL_ERROR; \
1718 } while(0)
1719#define WRITE_MSR(a_Msr, a_Value) \
1720 do \
1721 { \
1722 hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, (a_Msr), (a_Value)); \
1723 if (RT_LIKELY(hrc == HV_SUCCESS)) \
1724 { /* likely */ } \
1725 else \
1726 AssertFailedReturn(VERR_INTERNAL_ERROR); \
1727 } while(0)
1728
1729 RT_NOREF(pVM);
1730
1731#ifdef LOG_ENABLED
1732 nemR3DarwinLogState(pVM, pVCpu);
1733#endif
1734
1735 STAM_PROFILE_ADV_START(&pVCpu->nem.s.StatProfGstStateExport, x);
1736
1737 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL;
1738 if (!fWhat)
1739 return VINF_SUCCESS;
1740
1741 pVCpu->nem.s.fCtxChanged |= nemR3DarwinCpumExtrnToHmChanged(pVCpu->cpum.GstCtx.fExtrn);
1742
1743 int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient);
1744 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1745
1746 rc = nemR3DarwinExportGuestGprs(pVCpu);
1747 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1748
1749 rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient);
1750 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1751
1752 VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient);
1753 if (rcStrict == VINF_SUCCESS)
1754 { /* likely */ }
1755 else
1756 {
1757 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict));
1758 return VBOXSTRICTRC_VAL(rcStrict);
1759 }
1760
1761 rc = nemR3DarwinExportDebugState(pVCpu, pVmxTransient);
1762 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1763
1764 vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient);
1765 vmxHCExportGuestRip(pVCpu);
1766 //vmxHCExportGuestRsp(pVCpu);
1767 vmxHCExportGuestRflags(pVCpu, pVmxTransient);
1768
1769 rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient);
1770 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc);
1771
1772 if (fWhat & CPUMCTX_EXTRN_XCRx)
1773 {
1774 WRITE_GREG(HV_X86_XCR0, pVCpu->cpum.GstCtx.aXcr[0]);
1775 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_XCRx);
1776 }
1777
1778 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
1779 {
1780 Assert(pVCpu->nem.s.fCtxChanged & HM_CHANGED_GUEST_APIC_TPR);
1781 vmxHCExportGuestApicTpr(pVCpu, pVmxTransient);
1782
1783 rc = APICGetTpr(pVCpu, &pVmxTransient->u8GuestTpr, NULL /*pfPending*/, NULL /*pu8PendingIntr*/);
1784 AssertRC(rc);
1785
1786 WRITE_GREG(HV_X86_TPR, pVmxTransient->u8GuestTpr);
1787 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR);
1788 }
1789
1790 /* Debug registers. */
1791 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
1792 {
1793 WRITE_GREG(HV_X86_DR0, CPUMGetHyperDR0(pVCpu));
1794 WRITE_GREG(HV_X86_DR1, CPUMGetHyperDR1(pVCpu));
1795 WRITE_GREG(HV_X86_DR2, CPUMGetHyperDR2(pVCpu));
1796 WRITE_GREG(HV_X86_DR3, CPUMGetHyperDR3(pVCpu));
1797 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR0_DR3);
1798 }
1799 if (fWhat & CPUMCTX_EXTRN_DR6)
1800 {
1801 WRITE_GREG(HV_X86_DR6, CPUMGetHyperDR6(pVCpu));
1802 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR6);
1803 }
1804 if (fWhat & CPUMCTX_EXTRN_DR7)
1805 {
1806 WRITE_GREG(HV_X86_DR7, CPUMGetHyperDR7(pVCpu));
1807 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_DR7);
1808 }
1809
1810 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE))
1811 {
1812 hv_return_t hrc = hv_vcpu_write_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState));
1813 if (hrc == HV_SUCCESS)
1814 { /* likely */ }
1815 else
1816 return nemR3DarwinHvSts2Rc(hrc);
1817
1818 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~(HM_CHANGED_GUEST_X87 | HM_CHANGED_GUEST_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE));
1819 }
1820
1821 /* MSRs */
1822 if (fWhat & CPUMCTX_EXTRN_EFER)
1823 {
1824 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, pVCpu->cpum.GstCtx.msrEFER);
1825 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR);
1826 }
1827 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
1828 {
1829 WRITE_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE);
1830 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_KERNEL_GS_BASE);
1831 }
1832 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
1833 {
1834 WRITE_MSR(MSR_IA32_SYSENTER_CS, pVCpu->cpum.GstCtx.SysEnter.cs);
1835 WRITE_MSR(MSR_IA32_SYSENTER_EIP, pVCpu->cpum.GstCtx.SysEnter.eip);
1836 WRITE_MSR(MSR_IA32_SYSENTER_ESP, pVCpu->cpum.GstCtx.SysEnter.esp);
1837 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_MSR_MASK);
1838 }
1839 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
1840 {
1841 WRITE_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR);
1842 WRITE_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR);
1843 WRITE_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR);
1844 WRITE_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK);
1845 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSCALL_MSRS);
1846 }
1847 if (fWhat & CPUMCTX_EXTRN_TSC_AUX)
1848 {
1849 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
1850
1851 WRITE_MSR(MSR_K8_TSC_AUX, pCtxMsrs->msr.TscAux);
1852 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX);
1853 }
1854 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
1855 {
1856 /* Last Branch Record. */
1857 if (pVM->nem.s.fLbr)
1858 {
1859 PVMXVMCSINFOSHARED const pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo;
1860 uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst;
1861 uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst;
1862 uint32_t const idInfoMsrStart = pVM->nem.s.idLbrInfoMsrFirst;
1863 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
1864 Assert(cLbrStack <= 32);
1865 for (uint32_t i = 0; i < cLbrStack; i++)
1866 {
1867 WRITE_MSR(idFromIpMsrStart + i, pVmcsInfoShared->au64LbrFromIpMsr[i]);
1868
1869 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
1870 if (idToIpMsrStart != 0)
1871 WRITE_MSR(idToIpMsrStart + i, pVmcsInfoShared->au64LbrToIpMsr[i]);
1872 if (idInfoMsrStart != 0)
1873 WRITE_MSR(idInfoMsrStart + i, pVmcsInfoShared->au64LbrInfoMsr[i]);
1874 }
1875
1876 WRITE_MSR(pVM->nem.s.idLbrTosMsr, pVmcsInfoShared->u64LbrTosMsr);
1877 if (pVM->nem.s.idLerFromIpMsr)
1878 WRITE_MSR(pVM->nem.s.idLerFromIpMsr, pVmcsInfoShared->u64LerFromIpMsr);
1879 if (pVM->nem.s.idLerToIpMsr)
1880 WRITE_MSR(pVM->nem.s.idLerToIpMsr, pVmcsInfoShared->u64LerToIpMsr);
1881 }
1882
1883 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_OTHER_MSRS);
1884 }
1885
1886 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId);
1887 hv_vcpu_flush(pVCpu->nem.s.hVCpuId);
1888
1889 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1890
1891 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */
1892 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~( HM_CHANGED_GUEST_HWVIRT
1893 | HM_CHANGED_VMX_GUEST_AUTO_MSRS
1894 | HM_CHANGED_VMX_GUEST_LAZY_MSRS
1895 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK)));
1896
1897 STAM_PROFILE_ADV_STOP(&pVCpu->nem.s.StatProfGstStateExport, x);
1898 return VINF_SUCCESS;
1899#undef WRITE_GREG
1900#undef WRITE_VMCS_FIELD
1901}
1902
1903
1904/**
1905 * Common worker for both nemR3DarwinHandleExit() and nemR3DarwinHandleExitDebug().
1906 *
1907 * @returns VBox strict status code.
1908 * @param pVM The cross context VM structure.
1909 * @param pVCpu The cross context virtual CPU structure of the
1910 * calling EMT.
1911 * @param pVmxTransient The transient VMX structure.
1912 */
1913DECLINLINE(int) nemR3DarwinHandleExitCommon(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1914{
1915 uint32_t uExitReason;
1916 int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
1917 AssertRC(rc);
1918 pVmxTransient->fVmcsFieldsRead = 0;
1919 pVmxTransient->fIsNestedGuest = false;
1920 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason);
1921 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason);
1922
1923 if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed))
1924 AssertLogRelMsgFailedReturn(("Running guest failed for CPU #%u: %#x %u\n",
1925 pVCpu->idCpu, pVmxTransient->uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
1926 VERR_NEM_IPE_0);
1927
1928 /** @todo Only copy the state on demand (the R0 VT-x code saves some stuff unconditionally and the VMX template assumes that
1929 * when handling exits). */
1930 /*
1931 * Note! What is being fetched here must match the default value for the
1932 * a_fDonePostExit parameter of vmxHCImportGuestState exactly!
1933 */
1934 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1935 AssertRCReturn(rc, rc);
1936
1937 STAM_COUNTER_INC(&pVCpu->nem.s.pVmxStats->aStatExitReason[pVmxTransient->uExitReason & MASK_EXITREASON_STAT]);
1938 STAM_REL_COUNTER_INC(&pVCpu->nem.s.pVmxStats->StatExitAll);
1939 return VINF_SUCCESS;
1940}
1941
1942
1943/**
1944 * Handles an exit from hv_vcpu_run().
1945 *
1946 * @returns VBox strict status code.
1947 * @param pVM The cross context VM structure.
1948 * @param pVCpu The cross context virtual CPU structure of the
1949 * calling EMT.
1950 * @param pVmxTransient The transient VMX structure.
1951 */
1952static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
1953{
1954 int rc = nemR3DarwinHandleExitCommon(pVM, pVCpu, pVmxTransient);
1955 AssertRCReturn(rc, rc);
1956
1957#ifndef HMVMX_USE_FUNCTION_TABLE
1958 return vmxHCHandleExit(pVCpu, pVmxTransient);
1959#else
1960 return g_aVMExitHandlers[pVmxTransient->uExitReason].pfn(pVCpu, pVmxTransient);
1961#endif
1962}
1963
1964
1965/**
1966 * Handles an exit from hv_vcpu_run() - debug runloop variant.
1967 *
1968 * @returns VBox strict status code.
1969 * @param pVM The cross context VM structure.
1970 * @param pVCpu The cross context virtual CPU structure of the
1971 * calling EMT.
1972 * @param pVmxTransient The transient VMX structure.
1973 * @param pDbgState The debug state structure.
1974 */
1975static VBOXSTRICTRC nemR3DarwinHandleExitDebug(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)
1976{
1977 int rc = nemR3DarwinHandleExitCommon(pVM, pVCpu, pVmxTransient);
1978 AssertRCReturn(rc, rc);
1979
1980 return vmxHCRunDebugHandleExit(pVCpu, pVmxTransient, pDbgState);
1981}
1982
1983
1984/**
1985 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1986 *
1987 * @returns VBox status code.
1988 * @param fForced Whether the HMForced flag is set and we should
1989 * fail if we cannot initialize.
1990 * @param pErrInfo Where to always return error info.
1991 */
1992static int nemR3DarwinLoadHv(bool fForced, PRTERRINFO pErrInfo)
1993{
1994 RTLDRMOD hMod = NIL_RTLDRMOD;
1995 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1996
1997 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1998 if (RT_SUCCESS(rc))
1999 {
2000 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
2001 {
2002 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
2003 if (RT_SUCCESS(rc2))
2004 {
2005 if (g_aImports[i].fOptional)
2006 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n",
2007 g_aImports[i].pszName));
2008 }
2009 else
2010 {
2011 *g_aImports[i].ppfn = NULL;
2012
2013 LogRel(("NEM: %s: Failed to import Hypervisor!%s: %Rrc\n",
2014 g_aImports[i].fOptional ? "info" : fForced ? "fatal" : "error",
2015 g_aImports[i].pszName, rc2));
2016 if (!g_aImports[i].fOptional)
2017 {
2018 if (RTErrInfoIsSet(pErrInfo))
2019 RTErrInfoAddF(pErrInfo, rc2, ", Hypervisor!%s", g_aImports[i].pszName);
2020 else
2021 rc = RTErrInfoSetF(pErrInfo, rc2, "Failed to import: Hypervisor!%s", g_aImports[i].pszName);
2022 Assert(RT_FAILURE(rc));
2023 }
2024 }
2025 }
2026 if (RT_SUCCESS(rc))
2027 {
2028 Assert(!RTErrInfoIsSet(pErrInfo));
2029 }
2030
2031 RTLdrClose(hMod);
2032 }
2033 else
2034 {
2035 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
2036 rc = VERR_NEM_INIT_FAILED;
2037 }
2038
2039 return rc;
2040}
2041
2042
2043/**
2044 * Read and initialize the global capabilities supported by this CPU.
2045 *
2046 * @returns VBox status code.
2047 */
2048static int nemR3DarwinCapsInit(void)
2049{
2050 RT_ZERO(g_HmMsrs);
2051
2052 hv_return_t hrc = hv_vmx_read_capability(HV_VMX_CAP_PINBASED, &g_HmMsrs.u.vmx.PinCtls.u);
2053 if (hrc == HV_SUCCESS)
2054 hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &g_HmMsrs.u.vmx.ProcCtls.u);
2055 if (hrc == HV_SUCCESS)
2056 hrc = hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &g_HmMsrs.u.vmx.EntryCtls.u);
2057 if (hrc == HV_SUCCESS)
2058 hrc = hv_vmx_read_capability(HV_VMX_CAP_EXIT, &g_HmMsrs.u.vmx.ExitCtls.u);
2059 if (hrc == HV_SUCCESS)
2060 {
2061 hrc = hv_vmx_read_capability(HV_VMX_CAP_BASIC, &g_HmMsrs.u.vmx.u64Basic);
2062 if (hrc == HV_SUCCESS)
2063 {
2064 if (hrc == HV_SUCCESS)
2065 hrc = hv_vmx_read_capability(HV_VMX_CAP_MISC, &g_HmMsrs.u.vmx.u64Misc);
2066 if (hrc == HV_SUCCESS)
2067 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED0, &g_HmMsrs.u.vmx.u64Cr0Fixed0);
2068 if (hrc == HV_SUCCESS)
2069 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR0_FIXED1, &g_HmMsrs.u.vmx.u64Cr0Fixed1);
2070 if (hrc == HV_SUCCESS)
2071 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED0, &g_HmMsrs.u.vmx.u64Cr4Fixed0);
2072 if (hrc == HV_SUCCESS)
2073 hrc = hv_vmx_read_capability(HV_VMX_CAP_CR4_FIXED1, &g_HmMsrs.u.vmx.u64Cr4Fixed1);
2074 if (hrc == HV_SUCCESS)
2075 hrc = hv_vmx_read_capability(HV_VMX_CAP_VMCS_ENUM, &g_HmMsrs.u.vmx.u64VmcsEnum);
2076 if ( hrc == HV_SUCCESS
2077 && RT_BF_GET(g_HmMsrs.u.vmx.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
2078 {
2079 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PINBASED, &g_HmMsrs.u.vmx.TruePinCtls.u);
2080 if (hrc == HV_SUCCESS)
2081 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_PROCBASED, &g_HmMsrs.u.vmx.TrueProcCtls.u);
2082 if (hrc == HV_SUCCESS)
2083 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_ENTRY, &g_HmMsrs.u.vmx.TrueEntryCtls.u);
2084 if (hrc == HV_SUCCESS)
2085 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_EXIT, &g_HmMsrs.u.vmx.TrueExitCtls.u);
2086 }
2087 }
2088 else
2089 {
2090 /* Likely running on anything < 11.0 (BigSur) so provide some sensible defaults. */
2091 g_HmMsrs.u.vmx.u64Cr0Fixed0 = 0x80000021;
2092 g_HmMsrs.u.vmx.u64Cr0Fixed1 = 0xffffffff;
2093 g_HmMsrs.u.vmx.u64Cr4Fixed0 = 0x2000;
2094 g_HmMsrs.u.vmx.u64Cr4Fixed1 = 0x1767ff;
2095 hrc = HV_SUCCESS;
2096 }
2097 }
2098
2099 if ( hrc == HV_SUCCESS
2100 && g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2101 {
2102 hrc = hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &g_HmMsrs.u.vmx.ProcCtls2.u);
2103
2104 if ( hrc == HV_SUCCESS
2105 && g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & (VMX_PROC_CTLS2_EPT | VMX_PROC_CTLS2_VPID))
2106 {
2107 hrc = hv_vmx_read_capability(HV_VMX_CAP_EPT_VPID_CAP, &g_HmMsrs.u.vmx.u64EptVpidCaps);
2108 if (hrc != HV_SUCCESS)
2109 hrc = HV_SUCCESS; /* Probably just outdated OS. */
2110 }
2111
2112 g_HmMsrs.u.vmx.u64VmFunc = 0; /* No way to read that on macOS. */
2113 }
2114
2115 if (hrc == HV_SUCCESS)
2116 {
2117 /*
2118 * Check for EFER swapping support.
2119 */
2120 g_fHmVmxSupportsVmcsEfer = true; //(g_HmMsrs.u.vmx.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR)
2121 //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR)
2122 //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR);
2123 }
2124
2125 return nemR3DarwinHvSts2Rc(hrc);
2126}
2127
2128
2129/**
2130 * Sets up the LBR MSR ranges based on the host CPU.
2131 *
2132 * @returns VBox status code.
2133 * @param pVM The cross context VM structure.
2134 *
2135 * @sa hmR0VmxSetupLbrMsrRange
2136 */
2137static int nemR3DarwinSetupLbrMsrRange(PVMCC pVM)
2138{
2139 Assert(pVM->nem.s.fLbr);
2140 uint32_t idLbrFromIpMsrFirst;
2141 uint32_t idLbrFromIpMsrLast;
2142 uint32_t idLbrToIpMsrFirst;
2143 uint32_t idLbrToIpMsrLast;
2144 uint32_t idLbrInfoMsrFirst;
2145 uint32_t idLbrInfoMsrLast;
2146 uint32_t idLbrTosMsr;
2147 uint32_t idLbrSelectMsr;
2148 uint32_t idLerFromIpMsr;
2149 uint32_t idLerToIpMsr;
2150
2151 /*
2152 * Determine the LBR MSRs supported for this host CPU family and model.
2153 *
2154 * See Intel spec. 17.4.8 "LBR Stack".
2155 * See Intel "Model-Specific Registers" spec.
2156 */
2157 uint32_t const uFamilyModel = (g_CpumHostFeatures.s.uFamily << 8)
2158 | g_CpumHostFeatures.s.uModel;
2159 switch (uFamilyModel)
2160 {
2161 case 0x0f01: case 0x0f02:
2162 idLbrFromIpMsrFirst = MSR_P4_LASTBRANCH_0;
2163 idLbrFromIpMsrLast = MSR_P4_LASTBRANCH_3;
2164 idLbrToIpMsrFirst = 0x0;
2165 idLbrToIpMsrLast = 0x0;
2166 idLbrInfoMsrFirst = 0x0;
2167 idLbrInfoMsrLast = 0x0;
2168 idLbrTosMsr = MSR_P4_LASTBRANCH_TOS;
2169 idLbrSelectMsr = 0x0;
2170 idLerFromIpMsr = 0x0;
2171 idLerToIpMsr = 0x0;
2172 break;
2173
2174 case 0x065c: case 0x065f: case 0x064e: case 0x065e: case 0x068e:
2175 case 0x069e: case 0x0655: case 0x0666: case 0x067a: case 0x0667:
2176 case 0x066a: case 0x066c: case 0x067d: case 0x067e:
2177 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
2178 idLbrFromIpMsrLast = MSR_LASTBRANCH_31_FROM_IP;
2179 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
2180 idLbrToIpMsrLast = MSR_LASTBRANCH_31_TO_IP;
2181 idLbrInfoMsrFirst = MSR_LASTBRANCH_0_INFO;
2182 idLbrInfoMsrLast = MSR_LASTBRANCH_31_INFO;
2183 idLbrTosMsr = MSR_LASTBRANCH_TOS;
2184 idLbrSelectMsr = MSR_LASTBRANCH_SELECT;
2185 idLerFromIpMsr = MSR_LER_FROM_IP;
2186 idLerToIpMsr = MSR_LER_TO_IP;
2187 break;
2188
2189 case 0x063d: case 0x0647: case 0x064f: case 0x0656: case 0x063c:
2190 case 0x0645: case 0x0646: case 0x063f: case 0x062a: case 0x062d:
2191 case 0x063a: case 0x063e: case 0x061a: case 0x061e: case 0x061f:
2192 case 0x062e: case 0x0625: case 0x062c: case 0x062f:
2193 idLbrFromIpMsrFirst = MSR_LASTBRANCH_0_FROM_IP;
2194 idLbrFromIpMsrLast = MSR_LASTBRANCH_15_FROM_IP;
2195 idLbrToIpMsrFirst = MSR_LASTBRANCH_0_TO_IP;
2196 idLbrToIpMsrLast = MSR_LASTBRANCH_15_TO_IP;
2197 idLbrInfoMsrFirst = MSR_LASTBRANCH_0_INFO;
2198 idLbrInfoMsrLast = MSR_LASTBRANCH_15_INFO;
2199 idLbrTosMsr = MSR_LASTBRANCH_TOS;
2200 idLbrSelectMsr = MSR_LASTBRANCH_SELECT;
2201 idLerFromIpMsr = MSR_LER_FROM_IP;
2202 idLerToIpMsr = MSR_LER_TO_IP;
2203 break;
2204
2205 case 0x0617: case 0x061d: case 0x060f:
2206 idLbrFromIpMsrFirst = MSR_CORE2_LASTBRANCH_0_FROM_IP;
2207 idLbrFromIpMsrLast = MSR_CORE2_LASTBRANCH_3_FROM_IP;
2208 idLbrToIpMsrFirst = MSR_CORE2_LASTBRANCH_0_TO_IP;
2209 idLbrToIpMsrLast = MSR_CORE2_LASTBRANCH_3_TO_IP;
2210 idLbrInfoMsrFirst = 0x0;
2211 idLbrInfoMsrLast = 0x0;
2212 idLbrTosMsr = MSR_CORE2_LASTBRANCH_TOS;
2213 idLbrSelectMsr = 0x0;
2214 idLerFromIpMsr = 0x0;
2215 idLerToIpMsr = 0x0;
2216 break;
2217
2218 /* Atom and related microarchitectures we don't care about:
2219 case 0x0637: case 0x064a: case 0x064c: case 0x064d: case 0x065a:
2220 case 0x065d: case 0x061c: case 0x0626: case 0x0627: case 0x0635:
2221 case 0x0636: */
2222 /* All other CPUs: */
2223 default:
2224 {
2225 LogRelFunc(("Could not determine LBR stack size for the CPU model %#x\n", uFamilyModel));
2226 VMCC_GET_CPU_0(pVM)->nem.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_UNKNOWN;
2227 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2228 }
2229 }
2230
2231 /*
2232 * Validate.
2233 */
2234 uint32_t const cLbrStack = idLbrFromIpMsrLast - idLbrFromIpMsrFirst + 1;
2235 PCVMCPU pVCpu0 = VMCC_GET_CPU_0(pVM);
2236 AssertCompile( RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr)
2237 == RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrToIpMsr));
2238 AssertCompile( RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr)
2239 == RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrInfoMsr));
2240 if (cLbrStack > RT_ELEMENTS(pVCpu0->nem.s.vmx.VmcsInfo.au64LbrFromIpMsr))
2241 {
2242 LogRelFunc(("LBR stack size of the CPU (%u) exceeds our buffer size\n", cLbrStack));
2243 VMCC_GET_CPU_0(pVM)->nem.s.u32HMError = VMX_UFC_LBR_STACK_SIZE_OVERFLOW;
2244 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2245 }
2246 NOREF(pVCpu0);
2247
2248 /*
2249 * Update the LBR info. to the VM struct. for use later.
2250 */
2251 pVM->nem.s.idLbrTosMsr = idLbrTosMsr;
2252 pVM->nem.s.idLbrSelectMsr = idLbrSelectMsr;
2253
2254 pVM->nem.s.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
2255 pVM->nem.s.idLbrFromIpMsrLast = idLbrFromIpMsrLast;
2256
2257 pVM->nem.s.idLbrToIpMsrFirst = idLbrToIpMsrFirst;
2258 pVM->nem.s.idLbrToIpMsrLast = idLbrToIpMsrLast;
2259
2260 pVM->nem.s.idLbrInfoMsrFirst = idLbrInfoMsrFirst;
2261 pVM->nem.s.idLbrInfoMsrLast = idLbrInfoMsrLast;
2262
2263 pVM->nem.s.idLerFromIpMsr = idLerFromIpMsr;
2264 pVM->nem.s.idLerToIpMsr = idLerToIpMsr;
2265 return VINF_SUCCESS;
2266}
2267
2268
2269/**
2270 * Sets up pin-based VM-execution controls in the VMCS.
2271 *
2272 * @returns VBox status code.
2273 * @param pVCpu The cross context virtual CPU structure.
2274 * @param pVmcsInfo The VMCS info. object.
2275 */
2276static int nemR3DarwinVmxSetupVmcsPinCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2277{
2278 //PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2279 uint32_t fVal = g_HmMsrs.u.vmx.PinCtls.n.allowed0; /* Bits set here must always be set. */
2280 uint32_t const fZap = g_HmMsrs.u.vmx.PinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
2281
2282 if (g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_VIRT_NMI)
2283 fVal |= VMX_PIN_CTLS_VIRT_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */
2284
2285#if 0 /** @todo Use preemption timer */
2286 /* Enable the VMX-preemption timer. */
2287 if (pVM->hmr0.s.vmx.fUsePreemptTimer)
2288 {
2289 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_PREEMPT_TIMER);
2290 fVal |= VMX_PIN_CTLS_PREEMPT_TIMER;
2291 }
2292
2293 /* Enable posted-interrupt processing. */
2294 if (pVM->hm.s.fPostedIntrs)
2295 {
2296 Assert(g_HmMsrs.u.vmx.PinCtls.n.allowed1 & VMX_PIN_CTLS_POSTED_INT);
2297 Assert(g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_ACK_EXT_INT);
2298 fVal |= VMX_PIN_CTLS_POSTED_INT;
2299 }
2300#endif
2301
2302 if ((fVal & fZap) != fVal)
2303 {
2304 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2305 g_HmMsrs.u.vmx.PinCtls.n.allowed0, fVal, fZap));
2306 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
2307 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2308 }
2309
2310 /* Commit it to the VMCS and update our cache. */
2311 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PIN_EXEC, fVal);
2312 AssertRC(rc);
2313 pVmcsInfo->u32PinCtls = fVal;
2314
2315 return VINF_SUCCESS;
2316}
2317
2318
2319/**
2320 * Sets up secondary processor-based VM-execution controls in the VMCS.
2321 *
2322 * @returns VBox status code.
2323 * @param pVCpu The cross context virtual CPU structure.
2324 * @param pVmcsInfo The VMCS info. object.
2325 */
2326static int nemR3DarwinVmxSetupVmcsProcCtls2(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2327{
2328 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2329 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls2.n.allowed0; /* Bits set here must be set in the VMCS. */
2330 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2331
2332 /* WBINVD causes a VM-exit. */
2333 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_WBINVD_EXIT)
2334 fVal |= VMX_PROC_CTLS2_WBINVD_EXIT;
2335
2336 /* Enable the INVPCID instruction if we expose it to the guest and is supported
2337 by the hardware. Without this, guest executing INVPCID would cause a #UD. */
2338 if ( pVM->cpum.ro.GuestFeatures.fInvpcid
2339 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_INVPCID))
2340 fVal |= VMX_PROC_CTLS2_INVPCID;
2341
2342#if 0 /** @todo */
2343 /* Enable VPID. */
2344 if (pVM->hmr0.s.vmx.fVpid)
2345 fVal |= VMX_PROC_CTLS2_VPID;
2346
2347 if (pVM->hm.s.fVirtApicRegs)
2348 {
2349 /* Enable APIC-register virtualization. */
2350 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_APIC_REG_VIRT);
2351 fVal |= VMX_PROC_CTLS2_APIC_REG_VIRT;
2352
2353 /* Enable virtual-interrupt delivery. */
2354 Assert(g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_INTR_DELIVERY);
2355 fVal |= VMX_PROC_CTLS2_VIRT_INTR_DELIVERY;
2356 }
2357
2358 /* Virtualize-APIC accesses if supported by the CPU. The virtual-APIC page is
2359 where the TPR shadow resides. */
2360 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
2361 * done dynamically. */
2362 if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VIRT_APIC_ACCESS)
2363 {
2364 fVal |= VMX_PROC_CTLS2_VIRT_APIC_ACCESS;
2365 hmR0VmxSetupVmcsApicAccessAddr(pVCpu);
2366 }
2367#endif
2368
2369 /* Enable the RDTSCP instruction if we expose it to the guest and is supported
2370 by the hardware. Without this, guest executing RDTSCP would cause a #UD. */
2371 if ( pVM->cpum.ro.GuestFeatures.fRdTscP
2372 && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_RDTSCP))
2373 fVal |= VMX_PROC_CTLS2_RDTSCP;
2374
2375 /* Enable Pause-Loop exiting. */
2376 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT)
2377 && pVM->nem.s.cPleGapTicks
2378 && pVM->nem.s.cPleWindowTicks)
2379 {
2380 fVal |= VMX_PROC_CTLS2_PAUSE_LOOP_EXIT;
2381
2382 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PLE_GAP, pVM->nem.s.cPleGapTicks); AssertRC(rc);
2383 rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PLE_WINDOW, pVM->nem.s.cPleWindowTicks); AssertRC(rc);
2384 }
2385
2386 if ((fVal & fZap) != fVal)
2387 {
2388 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2389 g_HmMsrs.u.vmx.ProcCtls2.n.allowed0, fVal, fZap));
2390 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
2391 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2392 }
2393
2394 /* Commit it to the VMCS and update our cache. */
2395 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
2396 AssertRC(rc);
2397 pVmcsInfo->u32ProcCtls2 = fVal;
2398
2399 return VINF_SUCCESS;
2400}
2401
2402
2403/**
2404 * Enables native access for the given MSR.
2405 *
2406 * @returns VBox status code.
2407 * @param pVCpu The cross context virtual CPU structure.
2408 * @param idMsr The MSR to enable native access for.
2409 */
2410static int nemR3DarwinMsrSetNative(PVMCPUCC pVCpu, uint32_t idMsr)
2411{
2412 hv_return_t hrc = hv_vcpu_enable_native_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/);
2413 if (hrc == HV_SUCCESS)
2414 return VINF_SUCCESS;
2415
2416 return nemR3DarwinHvSts2Rc(hrc);
2417}
2418
2419
2420/**
2421 * Sets the MSR to managed for the given vCPU allowing the guest to access it.
2422 *
2423 * @returns VBox status code.
2424 * @param pVCpu The cross context virtual CPU structure.
2425 * @param idMsr The MSR to enable managed access for.
2426 * @param fMsrPerm The MSR permissions flags.
2427 */
2428static int nemR3DarwinMsrSetManaged(PVMCPUCC pVCpu, uint32_t idMsr, hv_msr_flags_t fMsrPerm)
2429{
2430 Assert(hv_vcpu_enable_managed_msr);
2431
2432 hv_return_t hrc = hv_vcpu_enable_managed_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/);
2433 if (hrc == HV_SUCCESS)
2434 {
2435 hrc = hv_vcpu_set_msr_access(pVCpu->nem.s.hVCpuId, idMsr, fMsrPerm);
2436 if (hrc == HV_SUCCESS)
2437 return VINF_SUCCESS;
2438 }
2439
2440 return nemR3DarwinHvSts2Rc(hrc);
2441}
2442
2443
2444/**
2445 * Sets up the MSR permissions which don't change through the lifetime of the VM.
2446 *
2447 * @returns VBox status code.
2448 * @param pVCpu The cross context virtual CPU structure.
2449 * @param pVmcsInfo The VMCS info. object.
2450 */
2451static int nemR3DarwinSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2452{
2453 RT_NOREF(pVmcsInfo);
2454
2455 /*
2456 * The guest can access the following MSRs (read, write) without causing
2457 * VM-exits; they are loaded/stored automatically using fields in the VMCS.
2458 */
2459 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2460 int rc;
2461 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_CS); AssertRCReturn(rc, rc);
2462 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_ESP); AssertRCReturn(rc, rc);
2463 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_EIP); AssertRCReturn(rc, rc);
2464 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_GS_BASE); AssertRCReturn(rc, rc);
2465 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_FS_BASE); AssertRCReturn(rc, rc);
2466
2467 /*
2468 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state
2469 * associated with then. We never need to intercept access (writes need to be
2470 * executed without causing a VM-exit, reads will #GP fault anyway).
2471 *
2472 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to
2473 * read/write them. We swap the guest/host MSR value using the
2474 * auto-load/store MSR area.
2475 */
2476 if (pVM->cpum.ro.GuestFeatures.fIbpb)
2477 {
2478 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_PRED_CMD);
2479 AssertRCReturn(rc, rc);
2480 }
2481#if 0 /* Doesn't work. */
2482 if (pVM->cpum.ro.GuestFeatures.fFlushCmd)
2483 {
2484 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_FLUSH_CMD);
2485 AssertRCReturn(rc, rc);
2486 }
2487#endif
2488 if (pVM->cpum.ro.GuestFeatures.fIbrs)
2489 {
2490 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SPEC_CTRL);
2491 AssertRCReturn(rc, rc);
2492 }
2493
2494 /*
2495 * Allow full read/write access for the following MSRs (mandatory for VT-x)
2496 * required for 64-bit guests.
2497 */
2498 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_LSTAR); AssertRCReturn(rc, rc);
2499 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K6_STAR); AssertRCReturn(rc, rc);
2500 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_SF_MASK); AssertRCReturn(rc, rc);
2501 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_KERNEL_GS_BASE); AssertRCReturn(rc, rc);
2502
2503 /* Required for enabling the RDTSCP instruction. */
2504 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_TSC_AUX); AssertRCReturn(rc, rc);
2505
2506 /* Last Branch Record. */
2507 if (pVM->nem.s.fLbr)
2508 {
2509 uint32_t const idFromIpMsrStart = pVM->nem.s.idLbrFromIpMsrFirst;
2510 uint32_t const idToIpMsrStart = pVM->nem.s.idLbrToIpMsrFirst;
2511 uint32_t const idInfoMsrStart = pVM->nem.s.idLbrInfoMsrFirst;
2512 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
2513 Assert(cLbrStack <= 32);
2514 for (uint32_t i = 0; i < cLbrStack; i++)
2515 {
2516 rc = nemR3DarwinMsrSetManaged(pVCpu, idFromIpMsrStart + i, HV_MSR_READ | HV_MSR_WRITE);
2517 AssertRCReturn(rc, rc);
2518
2519 /* Some CPUs don't have a Branch-To-IP MSR (P4 and related Xeons). */
2520 if (idToIpMsrStart != 0)
2521 {
2522 rc = nemR3DarwinMsrSetManaged(pVCpu, idToIpMsrStart + i, HV_MSR_READ | HV_MSR_WRITE);
2523 AssertRCReturn(rc, rc);
2524 }
2525
2526 if (idInfoMsrStart != 0)
2527 {
2528 rc = nemR3DarwinMsrSetManaged(pVCpu, idInfoMsrStart + i, HV_MSR_READ | HV_MSR_WRITE);
2529 AssertRCReturn(rc, rc);
2530 }
2531 }
2532
2533 rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLbrTosMsr, HV_MSR_READ | HV_MSR_WRITE);
2534 AssertRCReturn(rc, rc);
2535
2536 if (pVM->nem.s.idLerFromIpMsr)
2537 {
2538 rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLerFromIpMsr, HV_MSR_READ | HV_MSR_WRITE);
2539 AssertRCReturn(rc, rc);
2540 }
2541
2542 if (pVM->nem.s.idLerToIpMsr)
2543 {
2544 rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLerToIpMsr, HV_MSR_READ | HV_MSR_WRITE);
2545 AssertRCReturn(rc, rc);
2546 }
2547
2548 if (pVM->nem.s.idLbrSelectMsr)
2549 {
2550 rc = nemR3DarwinMsrSetManaged(pVCpu, pVM->nem.s.idLbrSelectMsr, HV_MSR_READ | HV_MSR_WRITE);
2551 AssertRCReturn(rc, rc);
2552 }
2553 }
2554
2555 return VINF_SUCCESS;
2556}
2557
2558
2559/**
2560 * Sets up processor-based VM-execution controls in the VMCS.
2561 *
2562 * @returns VBox status code.
2563 * @param pVCpu The cross context virtual CPU structure.
2564 * @param pVmcsInfo The VMCS info. object.
2565 */
2566static int nemR3DarwinVmxSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2567{
2568 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */
2569 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2570
2571 fVal |= VMX_PROC_CTLS_HLT_EXIT /* HLT causes a VM-exit. */
2572// | VMX_PROC_CTLS_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
2573 | VMX_PROC_CTLS_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
2574 | VMX_PROC_CTLS_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
2575 | VMX_PROC_CTLS_RDPMC_EXIT /* RDPMC causes a VM-exit. */
2576 | VMX_PROC_CTLS_MONITOR_EXIT /* MONITOR causes a VM-exit. */
2577 | VMX_PROC_CTLS_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
2578
2579#ifdef HMVMX_ALWAYS_INTERCEPT_CR3_ACCESS
2580 fVal |= VMX_PROC_CTLS_CR3_LOAD_EXIT
2581 | VMX_PROC_CTLS_CR3_STORE_EXIT;
2582#endif
2583
2584 /* We toggle VMX_PROC_CTLS_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
2585 if ( !(g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_MOV_DR_EXIT)
2586 || (g_HmMsrs.u.vmx.ProcCtls.n.allowed0 & VMX_PROC_CTLS_MOV_DR_EXIT))
2587 {
2588 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
2589 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2590 }
2591
2592 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
2593 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
2594 fVal |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;
2595
2596 if ((fVal & fZap) != fVal)
2597 {
2598 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX32 fVal=%#RX32 fZap=%#RX32\n",
2599 g_HmMsrs.u.vmx.ProcCtls.n.allowed0, fVal, fZap));
2600 pVCpu->nem.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
2601 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2602 }
2603
2604 /* Commit it to the VMCS and update our cache. */
2605 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
2606 AssertRC(rc);
2607 pVmcsInfo->u32ProcCtls = fVal;
2608
2609 /* Set up MSR permissions that don't change through the lifetime of the VM. */
2610 rc = nemR3DarwinSetupVmcsMsrPermissions(pVCpu, pVmcsInfo);
2611 AssertRCReturn(rc, rc);
2612
2613 /*
2614 * Set up secondary processor-based VM-execution controls
2615 * (we assume the CPU to always support it as we rely on unrestricted guest execution support).
2616 */
2617 Assert(pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_SECONDARY_CTLS);
2618 return nemR3DarwinVmxSetupVmcsProcCtls2(pVCpu, pVmcsInfo);
2619}
2620
2621
2622/**
2623 * Sets up miscellaneous (everything other than Pin, Processor and secondary
2624 * Processor-based VM-execution) control fields in the VMCS.
2625 *
2626 * @returns VBox status code.
2627 * @param pVCpu The cross context virtual CPU structure.
2628 * @param pVmcsInfo The VMCS info. object.
2629 */
2630static int nemR3DarwinVmxSetupVmcsMiscCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2631{
2632 int rc = VINF_SUCCESS;
2633 //rc = hmR0VmxSetupVmcsAutoLoadStoreMsrAddrs(pVmcsInfo); TODO
2634 if (RT_SUCCESS(rc))
2635 {
2636 uint64_t const u64Cr0Mask = vmxHCGetFixedCr0Mask(pVCpu);
2637 uint64_t const u64Cr4Mask = vmxHCGetFixedCr4Mask(pVCpu);
2638
2639 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR0_MASK, u64Cr0Mask); AssertRC(rc);
2640 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS_CTRL_CR4_MASK, u64Cr4Mask); AssertRC(rc);
2641
2642 pVmcsInfo->u64Cr0Mask = u64Cr0Mask;
2643 pVmcsInfo->u64Cr4Mask = u64Cr4Mask;
2644
2645 if (pVCpu->CTX_SUFF(pVM)->nem.s.fLbr)
2646 {
2647 rc = nemR3DarwinWriteVmcs64(pVCpu, VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);
2648 AssertRC(rc);
2649 }
2650 return VINF_SUCCESS;
2651 }
2652 else
2653 LogRelFunc(("Failed to initialize VMCS auto-load/store MSR addresses. rc=%Rrc\n", rc));
2654 return rc;
2655}
2656
2657
2658/**
2659 * Sets up the initial exception bitmap in the VMCS based on static conditions.
2660 *
2661 * We shall setup those exception intercepts that don't change during the
2662 * lifetime of the VM here. The rest are done dynamically while loading the
2663 * guest state.
2664 *
2665 * @param pVCpu The cross context virtual CPU structure.
2666 * @param pVmcsInfo The VMCS info. object.
2667 */
2668static void nemR3DarwinVmxSetupVmcsXcptBitmap(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo)
2669{
2670 /*
2671 * The following exceptions are always intercepted:
2672 *
2673 * #AC - To prevent the guest from hanging the CPU and for dealing with
2674 * split-lock detecting host configs.
2675 * #DB - To maintain the DR6 state even when intercepting DRx reads/writes and
2676 * recursive #DBs can cause a CPU hang.
2677 */
2678 uint32_t const uXcptBitmap = RT_BIT(X86_XCPT_AC)
2679 | RT_BIT(X86_XCPT_DB);
2680
2681 /* Commit it to the VMCS. */
2682 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, uXcptBitmap);
2683 AssertRC(rc);
2684
2685 /* Update our cache of the exception bitmap. */
2686 pVmcsInfo->u32XcptBitmap = uXcptBitmap;
2687}
2688
2689
2690/**
2691 * Initialize the VMCS information field for the given vCPU.
2692 *
2693 * @returns VBox status code.
2694 * @param pVCpu The cross context virtual CPU structure of the
2695 * calling EMT.
2696 */
2697static int nemR3DarwinInitVmcs(PVMCPU pVCpu)
2698{
2699 int rc = nemR3DarwinVmxSetupVmcsPinCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
2700 if (RT_SUCCESS(rc))
2701 {
2702 rc = nemR3DarwinVmxSetupVmcsProcCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
2703 if (RT_SUCCESS(rc))
2704 {
2705 rc = nemR3DarwinVmxSetupVmcsMiscCtls(pVCpu, &pVCpu->nem.s.VmcsInfo);
2706 if (RT_SUCCESS(rc))
2707 {
2708 rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &pVCpu->nem.s.VmcsInfo.u32EntryCtls);
2709 if (RT_SUCCESS(rc))
2710 {
2711 rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_EXIT, &pVCpu->nem.s.VmcsInfo.u32ExitCtls);
2712 if (RT_SUCCESS(rc))
2713 {
2714 nemR3DarwinVmxSetupVmcsXcptBitmap(pVCpu, &pVCpu->nem.s.VmcsInfo);
2715 return VINF_SUCCESS;
2716 }
2717 else
2718 LogRelFunc(("Failed to read the exit controls. rc=%Rrc\n", rc));
2719 }
2720 else
2721 LogRelFunc(("Failed to read the entry controls. rc=%Rrc\n", rc));
2722 }
2723 else
2724 LogRelFunc(("Failed to setup miscellaneous controls. rc=%Rrc\n", rc));
2725 }
2726 else
2727 LogRelFunc(("Failed to setup processor-based VM-execution controls. rc=%Rrc\n", rc));
2728 }
2729 else
2730 LogRelFunc(("Failed to setup pin-based controls. rc=%Rrc\n", rc));
2731
2732 return rc;
2733}
2734
2735
2736/**
2737 * Registers statistics for the given vCPU.
2738 *
2739 * @returns VBox status code.
2740 * @param pVM The cross context VM structure.
2741 * @param idCpu The CPU ID.
2742 * @param pNemCpu The NEM CPU structure.
2743 */
2744static int nemR3DarwinStatisticsRegister(PVM pVM, VMCPUID idCpu, PNEMCPU pNemCpu)
2745{
2746#define NEM_REG_STAT(a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szNmFmt, a_szDesc) do { \
2747 int rc = STAMR3RegisterF(pVM, a_pVar, a_enmType, s_enmVisibility, a_enmUnit, a_szDesc, a_szNmFmt, idCpu); \
2748 AssertRC(rc); \
2749 } while (0)
2750#define NEM_REG_PROFILE(a_pVar, a_szNmFmt, a_szDesc) \
2751 NEM_REG_STAT(a_pVar, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, a_szNmFmt, a_szDesc)
2752#define NEM_REG_COUNTER(a, b, desc) NEM_REG_STAT(a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, b, desc)
2753
2754 PVMXSTATISTICS const pVmxStats = pNemCpu->pVmxStats;
2755
2756 NEM_REG_COUNTER(&pVmxStats->StatExitCR0Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR0", "CR0 read.");
2757 NEM_REG_COUNTER(&pVmxStats->StatExitCR2Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR2", "CR2 read.");
2758 NEM_REG_COUNTER(&pVmxStats->StatExitCR3Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR3", "CR3 read.");
2759 NEM_REG_COUNTER(&pVmxStats->StatExitCR4Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR4", "CR4 read.");
2760 NEM_REG_COUNTER(&pVmxStats->StatExitCR8Read, "/NEM/CPU%u/Exit/Instr/CR-Read/CR8", "CR8 read.");
2761 NEM_REG_COUNTER(&pVmxStats->StatExitCR0Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR0", "CR0 write.");
2762 NEM_REG_COUNTER(&pVmxStats->StatExitCR2Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR2", "CR2 write.");
2763 NEM_REG_COUNTER(&pVmxStats->StatExitCR3Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR3", "CR3 write.");
2764 NEM_REG_COUNTER(&pVmxStats->StatExitCR4Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR4", "CR4 write.");
2765 NEM_REG_COUNTER(&pVmxStats->StatExitCR8Write, "/NEM/CPU%u/Exit/Instr/CR-Write/CR8", "CR8 write.");
2766
2767 NEM_REG_COUNTER(&pVmxStats->StatExitAll, "/NEM/CPU%u/Exit/All", "Total exits (including nested-guest exits).");
2768
2769 NEM_REG_COUNTER(&pVmxStats->StatImportGuestStateFallback, "/NEM/CPU%u/ImportGuestStateFallback", "Times vmxHCImportGuestState took the fallback code path.");
2770 NEM_REG_COUNTER(&pVmxStats->StatReadToTransientFallback, "/NEM/CPU%u/ReadToTransientFallback", "Times vmxHCReadToTransient took the fallback code path.");
2771
2772#ifdef VBOX_WITH_STATISTICS
2773 NEM_REG_PROFILE(&pNemCpu->StatProfGstStateImport, "/NEM/CPU%u/ImportGuestState", "Profiling of importing guest state from hardware after VM-exit.");
2774 NEM_REG_PROFILE(&pNemCpu->StatProfGstStateExport, "/NEM/CPU%u/ExportGuestState", "Profiling of exporting guest state from hardware after VM-exit.");
2775
2776 for (int j = 0; j < MAX_EXITREASON_STAT; j++)
2777 {
2778 const char *pszExitName = HMGetVmxExitName(j);
2779 if (pszExitName)
2780 {
2781 int rc = STAMR3RegisterF(pVM, &pVmxStats->aStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED,
2782 STAMUNIT_OCCURENCES, pszExitName, "/NEM/CPU%u/Exit/Reason/%02x", idCpu, j);
2783 AssertRCReturn(rc, rc);
2784 }
2785 }
2786#endif
2787
2788 return VINF_SUCCESS;
2789
2790#undef NEM_REG_COUNTER
2791#undef NEM_REG_PROFILE
2792#undef NEM_REG_STAT
2793}
2794
2795
2796/**
2797 * Displays the HM Last-Branch-Record info. for the guest.
2798 *
2799 * @param pVM The cross context VM structure.
2800 * @param pHlp The info helper functions.
2801 * @param pszArgs Arguments, ignored.
2802 */
2803static DECLCALLBACK(void) nemR3DarwinInfoLbr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
2804{
2805 NOREF(pszArgs);
2806 PVMCPU pVCpu = VMMGetCpu(pVM);
2807 if (!pVCpu)
2808 pVCpu = pVM->apCpusR3[0];
2809
2810 Assert(pVM->nem.s.fLbr);
2811
2812 PCVMXVMCSINFOSHARED pVmcsInfoShared = &pVCpu->nem.s.vmx.VmcsInfo;
2813 uint32_t const cLbrStack = pVM->nem.s.idLbrFromIpMsrLast - pVM->nem.s.idLbrFromIpMsrFirst + 1;
2814
2815 /** @todo r=ramshankar: The index technically varies depending on the CPU, but
2816 * 0xf should cover everything we support thus far. Fix if necessary
2817 * later. */
2818 uint32_t const idxTopOfStack = pVmcsInfoShared->u64LbrTosMsr & 0xf;
2819 if (idxTopOfStack > cLbrStack)
2820 {
2821 pHlp->pfnPrintf(pHlp, "Top-of-stack LBR MSR seems corrupt (index=%u, msr=%#RX64) expected index < %u\n",
2822 idxTopOfStack, pVmcsInfoShared->u64LbrTosMsr, cLbrStack);
2823 return;
2824 }
2825
2826 /*
2827 * Dump the circular buffer of LBR records starting from the most recent record (contained in idxTopOfStack).
2828 */
2829 pHlp->pfnPrintf(pHlp, "CPU[%u]: LBRs (most-recent first)\n", pVCpu->idCpu);
2830 if (pVM->nem.s.idLerFromIpMsr)
2831 pHlp->pfnPrintf(pHlp, "LER: From IP=%#016RX64 - To IP=%#016RX64\n",
2832 pVmcsInfoShared->u64LerFromIpMsr, pVmcsInfoShared->u64LerToIpMsr);
2833 uint32_t idxCurrent = idxTopOfStack;
2834 Assert(idxTopOfStack < cLbrStack);
2835 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrFromIpMsr) <= cLbrStack);
2836 Assert(RT_ELEMENTS(pVmcsInfoShared->au64LbrToIpMsr) <= cLbrStack);
2837 for (;;)
2838 {
2839 if (pVM->nem.s.idLbrToIpMsrFirst)
2840 pHlp->pfnPrintf(pHlp, " Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64 (Info: %#016RX64)\n", idxCurrent,
2841 pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent],
2842 pVmcsInfoShared->au64LbrToIpMsr[idxCurrent],
2843 pVmcsInfoShared->au64LbrInfoMsr[idxCurrent]);
2844 else
2845 pHlp->pfnPrintf(pHlp, " Branch (%2u): LBR=%#RX64\n", idxCurrent, pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent]);
2846
2847 idxCurrent = (idxCurrent - 1) % cLbrStack;
2848 if (idxCurrent == idxTopOfStack)
2849 break;
2850 }
2851}
2852
2853
2854/**
2855 * Try initialize the native API.
2856 *
2857 * This may only do part of the job, more can be done in
2858 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
2859 *
2860 * @returns VBox status code.
2861 * @param pVM The cross context VM structure.
2862 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
2863 * the latter we'll fail if we cannot initialize.
2864 * @param fForced Whether the HMForced flag is set and we should
2865 * fail if we cannot initialize.
2866 */
2867int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
2868{
2869 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
2870
2871 /*
2872 * Some state init.
2873 */
2874 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
2875
2876 /** @cfgm{/NEM/VmxPleGap, uint32_t, 0}
2877 * The pause-filter exiting gap in TSC ticks. When the number of ticks between
2878 * two successive PAUSE instructions exceeds VmxPleGap, the CPU considers the
2879 * latest PAUSE instruction to be start of a new PAUSE loop.
2880 */
2881 int rc = CFGMR3QueryU32Def(pCfgNem, "VmxPleGap", &pVM->nem.s.cPleGapTicks, 0);
2882 AssertRCReturn(rc, rc);
2883
2884 /** @cfgm{/NEM/VmxPleWindow, uint32_t, 0}
2885 * The pause-filter exiting window in TSC ticks. When the number of ticks
2886 * between the current PAUSE instruction and first PAUSE of a loop exceeds
2887 * VmxPleWindow, a VM-exit is triggered.
2888 *
2889 * Setting VmxPleGap and VmxPleGap to 0 disables pause-filter exiting.
2890 */
2891 rc = CFGMR3QueryU32Def(pCfgNem, "VmxPleWindow", &pVM->nem.s.cPleWindowTicks, 0);
2892 AssertRCReturn(rc, rc);
2893
2894 /** @cfgm{/NEM/VmxLbr, bool, false}
2895 * Whether to enable LBR for the guest. This is disabled by default as it's only
2896 * useful while debugging and enabling it causes a noticeable performance hit. */
2897 rc = CFGMR3QueryBoolDef(pCfgNem, "VmxLbr", &pVM->nem.s.fLbr, false);
2898 AssertRCReturn(rc, rc);
2899
2900 /*
2901 * Error state.
2902 * The error message will be non-empty on failure and 'rc' will be set too.
2903 */
2904 RTERRINFOSTATIC ErrInfo;
2905 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
2906 rc = nemR3DarwinLoadHv(fForced, pErrInfo);
2907 if (RT_SUCCESS(rc))
2908 {
2909 if ( !hv_vcpu_enable_managed_msr
2910 && pVM->nem.s.fLbr)
2911 {
2912 LogRel(("NEM: LBR recording is disabled because the Hypervisor API misses hv_vcpu_enable_managed_msr/hv_vcpu_set_msr_access functionality\n"));
2913 pVM->nem.s.fLbr = false;
2914 }
2915
2916 if (hv_vcpu_run_until)
2917 {
2918 struct mach_timebase_info TimeInfo;
2919
2920 if (mach_timebase_info(&TimeInfo) == KERN_SUCCESS)
2921 {
2922 pVM->nem.s.cMachTimePerNs = RT_MIN(1, (double)TimeInfo.denom / (double)TimeInfo.numer);
2923 LogRel(("NEM: cMachTimePerNs=%llu (TimeInfo.numer=%u TimeInfo.denom=%u)\n",
2924 pVM->nem.s.cMachTimePerNs, TimeInfo.numer, TimeInfo.denom));
2925 }
2926 else
2927 hv_vcpu_run_until = NULL; /* To avoid running forever (TM asserts when the guest runs for longer than 4 seconds). */
2928 }
2929
2930 hv_return_t hrc = hv_vm_create(HV_VM_DEFAULT);
2931 if (hrc == HV_SUCCESS)
2932 {
2933 if (hv_vm_space_create)
2934 {
2935 hrc = hv_vm_space_create(&pVM->nem.s.uVmAsid);
2936 if (hrc == HV_SUCCESS)
2937 {
2938 LogRel(("NEM: Successfully created ASID: %u\n", pVM->nem.s.uVmAsid));
2939 pVM->nem.s.fCreatedAsid = true;
2940 }
2941 else
2942 LogRel(("NEM: Failed to create ASID for VM (hrc=%#x), continuing...\n", pVM->nem.s.uVmAsid));
2943 }
2944 pVM->nem.s.fCreatedVm = true;
2945
2946 /* Register release statistics */
2947 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2948 {
2949 PNEMCPU pNemCpu = &pVM->apCpusR3[idCpu]->nem.s;
2950 PVMXSTATISTICS pVmxStats = (PVMXSTATISTICS)RTMemAllocZ(sizeof(*pVmxStats));
2951 if (RT_LIKELY(pVmxStats))
2952 {
2953 pNemCpu->pVmxStats = pVmxStats;
2954 rc = nemR3DarwinStatisticsRegister(pVM, idCpu, pNemCpu);
2955 AssertRC(rc);
2956 }
2957 else
2958 {
2959 rc = VERR_NO_MEMORY;
2960 break;
2961 }
2962 }
2963
2964 if (RT_SUCCESS(rc))
2965 {
2966 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
2967 Log(("NEM: Marked active!\n"));
2968 PGMR3EnableNemMode(pVM);
2969 }
2970 }
2971 else
2972 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
2973 "hv_vm_create() failed: %#x", hrc);
2974 }
2975
2976 /*
2977 * We only fail if in forced mode, otherwise just log the complaint and return.
2978 */
2979 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
2980 if ( (fForced || !fFallback)
2981 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
2982 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
2983
2984 if (pVM->nem.s.fLbr)
2985 {
2986 rc = DBGFR3InfoRegisterInternalEx(pVM, "lbr", "Dumps the NEM LBR info.", nemR3DarwinInfoLbr, DBGFINFO_FLAGS_ALL_EMTS);
2987 AssertRCReturn(rc, rc);
2988 }
2989
2990 if (RTErrInfoIsSet(pErrInfo))
2991 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
2992 return VINF_SUCCESS;
2993}
2994
2995
2996/**
2997 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
2998 *
2999 * @returns VBox status code
3000 * @param pVM The VM handle.
3001 * @param pVCpu The vCPU handle.
3002 * @param idCpu ID of the CPU to create.
3003 */
3004static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
3005{
3006 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpuId, HV_VCPU_DEFAULT);
3007 if (hrc != HV_SUCCESS)
3008 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
3009 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
3010
3011 if (idCpu == 0)
3012 {
3013 /* First call initializs the MSR structure holding the capabilities of the host CPU. */
3014 int rc = nemR3DarwinCapsInit();
3015 AssertRCReturn(rc, rc);
3016
3017 if (hv_vmx_vcpu_get_cap_write_vmcs)
3018 {
3019 /* Log the VMCS field write capabilities. */
3020 for (uint32_t i = 0; i < RT_ELEMENTS(g_aVmcsFieldsCap); i++)
3021 {
3022 uint64_t u64Allowed0 = 0;
3023 uint64_t u64Allowed1 = 0;
3024
3025 hrc = hv_vmx_vcpu_get_cap_write_vmcs(pVCpu->nem.s.hVCpuId, g_aVmcsFieldsCap[i].u32VmcsFieldId,
3026 &u64Allowed0, &u64Allowed1);
3027 if (hrc == HV_SUCCESS)
3028 {
3029 if (g_aVmcsFieldsCap[i].f64Bit)
3030 LogRel(("NEM: %s = (allowed_0=%#016RX64 allowed_1=%#016RX64)\n",
3031 g_aVmcsFieldsCap[i].pszVmcsField, u64Allowed0, u64Allowed1));
3032 else
3033 LogRel(("NEM: %s = (allowed_0=%#08RX32 allowed_1=%#08RX32)\n",
3034 g_aVmcsFieldsCap[i].pszVmcsField, (uint32_t)u64Allowed0, (uint32_t)u64Allowed1));
3035
3036 uint32_t cBits = g_aVmcsFieldsCap[i].f64Bit ? 64 : 32;
3037 for (uint32_t iBit = 0; iBit < cBits; iBit++)
3038 {
3039 bool fAllowed0 = RT_BOOL(u64Allowed0 & RT_BIT_64(iBit));
3040 bool fAllowed1 = RT_BOOL(u64Allowed1 & RT_BIT_64(iBit));
3041
3042 if (!fAllowed0 && !fAllowed1)
3043 LogRel(("NEM: Bit %02u = Must NOT be set\n", iBit));
3044 else if (!fAllowed0 && fAllowed1)
3045 LogRel(("NEM: Bit %02u = Can be set or not be set\n", iBit));
3046 else if (fAllowed0 && !fAllowed1)
3047 LogRel(("NEM: Bit %02u = UNDEFINED (AppleHV error)!\n", iBit));
3048 else if (fAllowed0 && fAllowed1)
3049 LogRel(("NEM: Bit %02u = MUST be set\n", iBit));
3050 else
3051 AssertFailed();
3052 }
3053 }
3054 else
3055 LogRel(("NEM: %s = failed to query (hrc=%d)\n", g_aVmcsFieldsCap[i].pszVmcsField, hrc));
3056 }
3057 }
3058 }
3059
3060 int rc = nemR3DarwinInitVmcs(pVCpu);
3061 AssertRCReturn(rc, rc);
3062
3063 if (pVM->nem.s.fCreatedAsid)
3064 {
3065 hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, pVM->nem.s.uVmAsid);
3066 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_VM_CREATE_FAILED);
3067 }
3068
3069 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3070
3071 return VINF_SUCCESS;
3072}
3073
3074
3075/**
3076 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
3077 *
3078 * @returns VBox status code
3079 * @param pVCpu The vCPU handle.
3080 */
3081static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
3082{
3083 hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, 0 /*asid*/);
3084 Assert(hrc == HV_SUCCESS);
3085
3086 hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
3087 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
3088 return VINF_SUCCESS;
3089}
3090
3091
3092/**
3093 * Worker to setup the TPR shadowing feature if available on the CPU and the VM has an APIC enabled.
3094 *
3095 * @returns VBox status code
3096 * @param pVM The VM handle.
3097 * @param pVCpu The vCPU handle.
3098 */
3099static DECLCALLBACK(int) nemR3DarwinNativeInitTprShadowing(PVM pVM, PVMCPU pVCpu)
3100{
3101 PVMXVMCSINFO pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
3102 uint32_t fVal = pVmcsInfo->u32ProcCtls;
3103
3104 /* Use TPR shadowing if supported by the CPU. */
3105 if ( PDMHasApic(pVM)
3106 && (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_TPR_SHADOW))
3107 {
3108 fVal |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
3109 /* CR8 writes cause a VM-exit based on TPR threshold. */
3110 Assert(!(fVal & VMX_PROC_CTLS_CR8_STORE_EXIT));
3111 Assert(!(fVal & VMX_PROC_CTLS_CR8_LOAD_EXIT));
3112 }
3113 else
3114 {
3115 fVal |= VMX_PROC_CTLS_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */
3116 | VMX_PROC_CTLS_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */
3117 }
3118
3119 /* Commit it to the VMCS and update our cache. */
3120 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, fVal);
3121 AssertRC(rc);
3122 pVmcsInfo->u32ProcCtls = fVal;
3123
3124 return VINF_SUCCESS;
3125}
3126
3127
3128/**
3129 * This is called after CPUMR3Init is done.
3130 *
3131 * @returns VBox status code.
3132 * @param pVM The VM handle..
3133 */
3134int nemR3NativeInitAfterCPUM(PVM pVM)
3135{
3136 /*
3137 * Validate sanity.
3138 */
3139 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
3140 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
3141
3142 if (pVM->nem.s.fLbr)
3143 {
3144 int rc = nemR3DarwinSetupLbrMsrRange(pVM);
3145 AssertRCReturn(rc, rc);
3146 }
3147
3148 /*
3149 * Setup the EMTs.
3150 */
3151 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3152 {
3153 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3154
3155 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
3156 if (RT_FAILURE(rc))
3157 {
3158 /* Rollback. */
3159 while (idCpu--)
3160 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
3161
3162 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
3163 }
3164 }
3165
3166 pVM->nem.s.fCreatedEmts = true;
3167 return VINF_SUCCESS;
3168}
3169
3170
3171int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
3172{
3173 if (enmWhat == VMINITCOMPLETED_RING3)
3174 {
3175 /* Now that PDM is initialized the APIC state is known in order to enable the TPR shadowing feature on all EMTs. */
3176 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
3177 {
3178 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3179
3180 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitTprShadowing, 2, pVM, pVCpu);
3181 if (RT_FAILURE(rc))
3182 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Setting up TPR shadowing failed: %Rrc", rc);
3183 }
3184 }
3185 return VINF_SUCCESS;
3186}
3187
3188
3189int nemR3NativeTerm(PVM pVM)
3190{
3191 /*
3192 * Delete the VM.
3193 */
3194
3195 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
3196 {
3197 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
3198
3199 /*
3200 * Need to do this or hv_vm_space_destroy() fails later on (on 10.15 at least). Could've been documented in
3201 * API reference so I wouldn't have to decompile the kext to find this out but we are talking
3202 * about Apple here unfortunately, API documentation is not their strong suit...
3203 * Would have been of course even better to just automatically drop the address space reference when the vCPU
3204 * gets destroyed.
3205 */
3206 hv_return_t hrc = hv_vcpu_set_space(pVCpu->nem.s.hVCpuId, 0 /*asid*/);
3207 Assert(hrc == HV_SUCCESS);
3208
3209 /*
3210 * Apple's documentation states that the vCPU should be destroyed
3211 * on the thread running the vCPU but as all the other EMTs are gone
3212 * at this point, destroying the VM would hang.
3213 *
3214 * We seem to be at luck here though as destroying apparently works
3215 * from EMT(0) as well.
3216 */
3217 hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpuId);
3218 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
3219
3220 if (pVCpu->nem.s.pVmxStats)
3221 {
3222 RTMemFree(pVCpu->nem.s.pVmxStats);
3223 pVCpu->nem.s.pVmxStats = NULL;
3224 }
3225 }
3226
3227 pVM->nem.s.fCreatedEmts = false;
3228
3229 if (pVM->nem.s.fCreatedAsid)
3230 {
3231 hv_return_t hrc = hv_vm_space_destroy(pVM->nem.s.uVmAsid);
3232 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
3233 pVM->nem.s.fCreatedAsid = false;
3234 }
3235
3236 if (pVM->nem.s.fCreatedVm)
3237 {
3238 hv_return_t hrc = hv_vm_destroy();
3239 if (hrc != HV_SUCCESS)
3240 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
3241
3242 pVM->nem.s.fCreatedVm = false;
3243 }
3244 return VINF_SUCCESS;
3245}
3246
3247
3248/**
3249 * VM reset notification.
3250 *
3251 * @param pVM The cross context VM structure.
3252 */
3253void nemR3NativeReset(PVM pVM)
3254{
3255 RT_NOREF(pVM);
3256}
3257
3258
3259/**
3260 * Reset CPU due to INIT IPI or hot (un)plugging.
3261 *
3262 * @param pVCpu The cross context virtual CPU structure of the CPU being
3263 * reset.
3264 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
3265 */
3266void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
3267{
3268 RT_NOREF(fInitIpi);
3269 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3270}
3271
3272
3273/**
3274 * Runs the guest once until an exit occurs.
3275 *
3276 * @returns HV status code.
3277 * @param pVM The cross context VM structure.
3278 * @param pVCpu The cross context virtual CPU structure.
3279 * @param pVmxTransient The transient VMX execution structure.
3280 */
3281static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
3282{
3283 TMNotifyStartOfExecution(pVM, pVCpu);
3284
3285 Assert(!pVCpu->nem.s.fCtxChanged);
3286 hv_return_t hrc;
3287 if (hv_vcpu_run_until) /** @todo Configur the deadline dynamically based on when the next timer triggers. */
3288 hrc = hv_vcpu_run_until(pVCpu->nem.s.hVCpuId, mach_absolute_time() + 2 * RT_NS_1SEC_64 * pVM->nem.s.cMachTimePerNs);
3289 else
3290 hrc = hv_vcpu_run(pVCpu->nem.s.hVCpuId);
3291
3292 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
3293
3294 /*
3295 * Sync the TPR shadow with our APIC state.
3296 */
3297 if ( !pVmxTransient->fIsNestedGuest
3298 && (pVCpu->nem.s.VmcsInfo.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW))
3299 {
3300 uint64_t u64Tpr;
3301 hv_return_t hrc2 = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, HV_X86_TPR, &u64Tpr);
3302 Assert(hrc2 == HV_SUCCESS); RT_NOREF(hrc2);
3303
3304 if (pVmxTransient->u8GuestTpr != (uint8_t)u64Tpr)
3305 {
3306 int rc = APICSetTpr(pVCpu, (uint8_t)u64Tpr);
3307 AssertRC(rc);
3308 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR);
3309 }
3310 }
3311
3312 return hrc;
3313}
3314
3315
3316/**
3317 * Prepares the VM to run the guest.
3318 *
3319 * @returns Strict VBox status code.
3320 * @param pVM The cross context VM structure.
3321 * @param pVCpu The cross context virtual CPU structure.
3322 * @param pVmxTransient The VMX transient state.
3323 * @param fSingleStepping Flag whether we run in single stepping mode.
3324 */
3325static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, bool fSingleStepping)
3326{
3327 /*
3328 * Check and process force flag actions, some of which might require us to go back to ring-3.
3329 */
3330 VBOXSTRICTRC rcStrict = vmxHCCheckForceFlags(pVCpu, false /*fIsNestedGuest*/, fSingleStepping);
3331 if (rcStrict == VINF_SUCCESS)
3332 { /*likely */ }
3333 else
3334 return rcStrict;
3335
3336 /*
3337 * Do not execute in HV if the A20 isn't enabled.
3338 */
3339 if (PGMPhysIsA20Enabled(pVCpu))
3340 { /* likely */ }
3341 else
3342 {
3343 LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu));
3344 return VINF_EM_RESCHEDULE_REM;
3345 }
3346
3347 /*
3348 * Evaluate events to be injected into the guest.
3349 *
3350 * Events in TRPM can be injected without inspecting the guest state.
3351 * If any new events (interrupts/NMI) are pending currently, we try to set up the
3352 * guest to cause a VM-exit the next time they are ready to receive the event.
3353 */
3354 if (TRPMHasTrap(pVCpu))
3355 vmxHCTrpmTrapToPendingEvent(pVCpu);
3356
3357 uint32_t fIntrState;
3358 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, &fIntrState);
3359
3360 /*
3361 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus
3362 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might
3363 * also result in triple-faulting the VM.
3364 *
3365 * With nested-guests, the above does not apply since unrestricted guest execution is a
3366 * requirement. Regardless, we do this here to avoid duplicating code elsewhere.
3367 */
3368 rcStrict = vmxHCInjectPendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, fIntrState, fSingleStepping);
3369 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3370 { /* likely */ }
3371 else
3372 return rcStrict;
3373
3374 int rc = nemR3DarwinExportGuestState(pVM, pVCpu, pVmxTransient);
3375 AssertRCReturn(rc, rc);
3376
3377 LogFlowFunc(("Running vCPU\n"));
3378 pVCpu->nem.s.Event.fPending = false;
3379 return VINF_SUCCESS;
3380}
3381
3382
3383/**
3384 * The normal runloop (no debugging features enabled).
3385 *
3386 * @returns Strict VBox status code.
3387 * @param pVM The cross context VM structure.
3388 * @param pVCpu The cross context virtual CPU structure.
3389 */
3390static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
3391{
3392 /*
3393 * The run loop.
3394 *
3395 * Current approach to state updating to use the sledgehammer and sync
3396 * everything every time. This will be optimized later.
3397 */
3398 VMXTRANSIENT VmxTransient;
3399 RT_ZERO(VmxTransient);
3400 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
3401
3402 /*
3403 * Poll timers and run for a bit.
3404 */
3405 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
3406 * the whole polling job when timers have changed... */
3407 uint64_t offDeltaIgnored;
3408 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
3409 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3410 for (unsigned iLoop = 0;; iLoop++)
3411 {
3412 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, &VmxTransient, false /* fSingleStepping */);
3413 if (rcStrict != VINF_SUCCESS)
3414 break;
3415
3416 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu, &VmxTransient);
3417 if (hrc == HV_SUCCESS)
3418 {
3419 /*
3420 * Deal with the message.
3421 */
3422 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu, &VmxTransient);
3423 if (rcStrict == VINF_SUCCESS)
3424 { /* hopefully likely */ }
3425 else
3426 {
3427 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3428 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3429 break;
3430 }
3431 }
3432 else
3433 {
3434 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n",
3435 pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
3436 VERR_NEM_IPE_0);
3437 }
3438 } /* the run loop */
3439
3440 return rcStrict;
3441}
3442
3443
3444/**
3445 * Checks if any expensive dtrace probes are enabled and we should go to the
3446 * debug loop.
3447 *
3448 * @returns true if we should use debug loop, false if not.
3449 */
3450static bool nemR3DarwinAnyExpensiveProbesEnabled(void)
3451{
3452 /** @todo Check performance penalty when checking these over and over */
3453 return ( VBOXVMM_R0_HMVMX_VMEXIT_ENABLED() /* expensive too due to context */
3454 | VBOXVMM_XCPT_DE_ENABLED()
3455 | VBOXVMM_XCPT_DB_ENABLED()
3456 | VBOXVMM_XCPT_BP_ENABLED()
3457 | VBOXVMM_XCPT_OF_ENABLED()
3458 | VBOXVMM_XCPT_BR_ENABLED()
3459 | VBOXVMM_XCPT_UD_ENABLED()
3460 | VBOXVMM_XCPT_NM_ENABLED()
3461 | VBOXVMM_XCPT_DF_ENABLED()
3462 | VBOXVMM_XCPT_TS_ENABLED()
3463 | VBOXVMM_XCPT_NP_ENABLED()
3464 | VBOXVMM_XCPT_SS_ENABLED()
3465 | VBOXVMM_XCPT_GP_ENABLED()
3466 | VBOXVMM_XCPT_PF_ENABLED()
3467 | VBOXVMM_XCPT_MF_ENABLED()
3468 | VBOXVMM_XCPT_AC_ENABLED()
3469 | VBOXVMM_XCPT_XF_ENABLED()
3470 | VBOXVMM_XCPT_VE_ENABLED()
3471 | VBOXVMM_XCPT_SX_ENABLED()
3472 | VBOXVMM_INT_SOFTWARE_ENABLED()
3473 /* not available in R3 | VBOXVMM_INT_HARDWARE_ENABLED()*/
3474 ) != 0
3475 || ( VBOXVMM_INSTR_HALT_ENABLED()
3476 | VBOXVMM_INSTR_MWAIT_ENABLED()
3477 | VBOXVMM_INSTR_MONITOR_ENABLED()
3478 | VBOXVMM_INSTR_CPUID_ENABLED()
3479 | VBOXVMM_INSTR_INVD_ENABLED()
3480 | VBOXVMM_INSTR_WBINVD_ENABLED()
3481 | VBOXVMM_INSTR_INVLPG_ENABLED()
3482 | VBOXVMM_INSTR_RDTSC_ENABLED()
3483 | VBOXVMM_INSTR_RDTSCP_ENABLED()
3484 | VBOXVMM_INSTR_RDPMC_ENABLED()
3485 | VBOXVMM_INSTR_RDMSR_ENABLED()
3486 | VBOXVMM_INSTR_WRMSR_ENABLED()
3487 | VBOXVMM_INSTR_CRX_READ_ENABLED()
3488 | VBOXVMM_INSTR_CRX_WRITE_ENABLED()
3489 | VBOXVMM_INSTR_DRX_READ_ENABLED()
3490 | VBOXVMM_INSTR_DRX_WRITE_ENABLED()
3491 | VBOXVMM_INSTR_PAUSE_ENABLED()
3492 | VBOXVMM_INSTR_XSETBV_ENABLED()
3493 | VBOXVMM_INSTR_SIDT_ENABLED()
3494 | VBOXVMM_INSTR_LIDT_ENABLED()
3495 | VBOXVMM_INSTR_SGDT_ENABLED()
3496 | VBOXVMM_INSTR_LGDT_ENABLED()
3497 | VBOXVMM_INSTR_SLDT_ENABLED()
3498 | VBOXVMM_INSTR_LLDT_ENABLED()
3499 | VBOXVMM_INSTR_STR_ENABLED()
3500 | VBOXVMM_INSTR_LTR_ENABLED()
3501 | VBOXVMM_INSTR_GETSEC_ENABLED()
3502 | VBOXVMM_INSTR_RSM_ENABLED()
3503 | VBOXVMM_INSTR_RDRAND_ENABLED()
3504 | VBOXVMM_INSTR_RDSEED_ENABLED()
3505 | VBOXVMM_INSTR_XSAVES_ENABLED()
3506 | VBOXVMM_INSTR_XRSTORS_ENABLED()
3507 | VBOXVMM_INSTR_VMM_CALL_ENABLED()
3508 | VBOXVMM_INSTR_VMX_VMCLEAR_ENABLED()
3509 | VBOXVMM_INSTR_VMX_VMLAUNCH_ENABLED()
3510 | VBOXVMM_INSTR_VMX_VMPTRLD_ENABLED()
3511 | VBOXVMM_INSTR_VMX_VMPTRST_ENABLED()
3512 | VBOXVMM_INSTR_VMX_VMREAD_ENABLED()
3513 | VBOXVMM_INSTR_VMX_VMRESUME_ENABLED()
3514 | VBOXVMM_INSTR_VMX_VMWRITE_ENABLED()
3515 | VBOXVMM_INSTR_VMX_VMXOFF_ENABLED()
3516 | VBOXVMM_INSTR_VMX_VMXON_ENABLED()
3517 | VBOXVMM_INSTR_VMX_VMFUNC_ENABLED()
3518 | VBOXVMM_INSTR_VMX_INVEPT_ENABLED()
3519 | VBOXVMM_INSTR_VMX_INVVPID_ENABLED()
3520 | VBOXVMM_INSTR_VMX_INVPCID_ENABLED()
3521 ) != 0
3522 || ( VBOXVMM_EXIT_TASK_SWITCH_ENABLED()
3523 | VBOXVMM_EXIT_HALT_ENABLED()
3524 | VBOXVMM_EXIT_MWAIT_ENABLED()
3525 | VBOXVMM_EXIT_MONITOR_ENABLED()
3526 | VBOXVMM_EXIT_CPUID_ENABLED()
3527 | VBOXVMM_EXIT_INVD_ENABLED()
3528 | VBOXVMM_EXIT_WBINVD_ENABLED()
3529 | VBOXVMM_EXIT_INVLPG_ENABLED()
3530 | VBOXVMM_EXIT_RDTSC_ENABLED()
3531 | VBOXVMM_EXIT_RDTSCP_ENABLED()
3532 | VBOXVMM_EXIT_RDPMC_ENABLED()
3533 | VBOXVMM_EXIT_RDMSR_ENABLED()
3534 | VBOXVMM_EXIT_WRMSR_ENABLED()
3535 | VBOXVMM_EXIT_CRX_READ_ENABLED()
3536 | VBOXVMM_EXIT_CRX_WRITE_ENABLED()
3537 | VBOXVMM_EXIT_DRX_READ_ENABLED()
3538 | VBOXVMM_EXIT_DRX_WRITE_ENABLED()
3539 | VBOXVMM_EXIT_PAUSE_ENABLED()
3540 | VBOXVMM_EXIT_XSETBV_ENABLED()
3541 | VBOXVMM_EXIT_SIDT_ENABLED()
3542 | VBOXVMM_EXIT_LIDT_ENABLED()
3543 | VBOXVMM_EXIT_SGDT_ENABLED()
3544 | VBOXVMM_EXIT_LGDT_ENABLED()
3545 | VBOXVMM_EXIT_SLDT_ENABLED()
3546 | VBOXVMM_EXIT_LLDT_ENABLED()
3547 | VBOXVMM_EXIT_STR_ENABLED()
3548 | VBOXVMM_EXIT_LTR_ENABLED()
3549 | VBOXVMM_EXIT_GETSEC_ENABLED()
3550 | VBOXVMM_EXIT_RSM_ENABLED()
3551 | VBOXVMM_EXIT_RDRAND_ENABLED()
3552 | VBOXVMM_EXIT_RDSEED_ENABLED()
3553 | VBOXVMM_EXIT_XSAVES_ENABLED()
3554 | VBOXVMM_EXIT_XRSTORS_ENABLED()
3555 | VBOXVMM_EXIT_VMM_CALL_ENABLED()
3556 | VBOXVMM_EXIT_VMX_VMCLEAR_ENABLED()
3557 | VBOXVMM_EXIT_VMX_VMLAUNCH_ENABLED()
3558 | VBOXVMM_EXIT_VMX_VMPTRLD_ENABLED()
3559 | VBOXVMM_EXIT_VMX_VMPTRST_ENABLED()
3560 | VBOXVMM_EXIT_VMX_VMREAD_ENABLED()
3561 | VBOXVMM_EXIT_VMX_VMRESUME_ENABLED()
3562 | VBOXVMM_EXIT_VMX_VMWRITE_ENABLED()
3563 | VBOXVMM_EXIT_VMX_VMXOFF_ENABLED()
3564 | VBOXVMM_EXIT_VMX_VMXON_ENABLED()
3565 | VBOXVMM_EXIT_VMX_VMFUNC_ENABLED()
3566 | VBOXVMM_EXIT_VMX_INVEPT_ENABLED()
3567 | VBOXVMM_EXIT_VMX_INVVPID_ENABLED()
3568 | VBOXVMM_EXIT_VMX_INVPCID_ENABLED()
3569 | VBOXVMM_EXIT_VMX_EPT_VIOLATION_ENABLED()
3570 | VBOXVMM_EXIT_VMX_EPT_MISCONFIG_ENABLED()
3571 | VBOXVMM_EXIT_VMX_VAPIC_ACCESS_ENABLED()
3572 | VBOXVMM_EXIT_VMX_VAPIC_WRITE_ENABLED()
3573 ) != 0;
3574}
3575
3576
3577/**
3578 * The debug runloop.
3579 *
3580 * @returns Strict VBox status code.
3581 * @param pVM The cross context VM structure.
3582 * @param pVCpu The cross context virtual CPU structure.
3583 */
3584static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu)
3585{
3586 /*
3587 * The run loop.
3588 *
3589 * Current approach to state updating to use the sledgehammer and sync
3590 * everything every time. This will be optimized later.
3591 */
3592 VMXTRANSIENT VmxTransient;
3593 RT_ZERO(VmxTransient);
3594 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo;
3595
3596 bool const fSavedSingleInstruction = pVCpu->nem.s.fSingleInstruction;
3597 pVCpu->nem.s.fSingleInstruction = pVCpu->nem.s.fSingleInstruction || DBGFIsStepping(pVCpu);
3598 pVCpu->nem.s.fDebugWantRdTscExit = false;
3599 pVCpu->nem.s.fUsingDebugLoop = true;
3600
3601 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */
3602 VMXRUNDBGSTATE DbgState;
3603 vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);
3604 vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);
3605
3606 /*
3607 * Poll timers and run for a bit.
3608 */
3609 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
3610 * the whole polling job when timers have changed... */
3611 uint64_t offDeltaIgnored;
3612 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
3613 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3614 for (unsigned iLoop = 0;; iLoop++)
3615 {
3616 bool fStepping = pVCpu->nem.s.fSingleInstruction;
3617
3618 /* Set up VM-execution controls the next two can respond to. */
3619 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
3620
3621 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, &VmxTransient, fStepping);
3622 if (rcStrict != VINF_SUCCESS)
3623 break;
3624
3625 /* Override any obnoxious code in the above call. */
3626 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);
3627
3628 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu, &VmxTransient);
3629 if (hrc == HV_SUCCESS)
3630 {
3631 /*
3632 * Deal with the message.
3633 */
3634 rcStrict = nemR3DarwinHandleExitDebug(pVM, pVCpu, &VmxTransient, &DbgState);
3635 if (rcStrict == VINF_SUCCESS)
3636 { /* hopefully likely */ }
3637 else
3638 {
3639 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExitDebug -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3640 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3641 break;
3642 }
3643
3644 /*
3645 * Stepping: Did the RIP change, if so, consider it a single step.
3646 * Otherwise, make sure one of the TFs gets set.
3647 */
3648 if (fStepping)
3649 {
3650 int rc = vmxHCImportGuestStateEx(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);
3651 AssertRC(rc);
3652 if ( pVCpu->cpum.GstCtx.rip != DbgState.uRipStart
3653 || pVCpu->cpum.GstCtx.cs.Sel != DbgState.uCsStart)
3654 {
3655 rcStrict = VINF_EM_DBG_STEPPED;
3656 break;
3657 }
3658 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_DR7);
3659 }
3660 }
3661 else
3662 {
3663 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n",
3664 pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),
3665 VERR_NEM_IPE_0);
3666 }
3667 } /* the run loop */
3668
3669 /*
3670 * Clear the X86_EFL_TF if necessary.
3671 */
3672 if (pVCpu->nem.s.fClearTrapFlag)
3673 {
3674 int rc = vmxHCImportGuestStateEx(pVCpu, VmxTransient.pVmcsInfo, CPUMCTX_EXTRN_RFLAGS);
3675 AssertRC(rc);
3676 pVCpu->nem.s.fClearTrapFlag = false;
3677 pVCpu->cpum.GstCtx.eflags.Bits.u1TF = 0;
3678 }
3679
3680 pVCpu->nem.s.fUsingDebugLoop = false;
3681 pVCpu->nem.s.fDebugWantRdTscExit = false;
3682 pVCpu->nem.s.fSingleInstruction = fSavedSingleInstruction;
3683
3684 /* Restore all controls applied by vmxHCPreRunGuestDebugStateApply above. */
3685 return vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);
3686}
3687
3688
3689VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
3690{
3691 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u));
3692#ifdef LOG_ENABLED
3693 if (LogIs3Enabled())
3694 nemR3DarwinLogState(pVM, pVCpu);
3695#endif
3696
3697 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
3698
3699 /*
3700 * Try switch to NEM runloop state.
3701 */
3702 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
3703 { /* likely */ }
3704 else
3705 {
3706 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3707 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
3708 return VINF_SUCCESS;
3709 }
3710
3711 VBOXSTRICTRC rcStrict;
3712 if ( !pVCpu->nem.s.fUseDebugLoop
3713 && !nemR3DarwinAnyExpensiveProbesEnabled()
3714 && !DBGFIsStepping(pVCpu)
3715 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
3716 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
3717 else
3718 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
3719
3720 if (rcStrict == VINF_EM_RAW_TO_R3)
3721 rcStrict = VINF_SUCCESS;
3722
3723 /*
3724 * Convert any pending HM events back to TRPM due to premature exits.
3725 *
3726 * This is because execution may continue from IEM and we would need to inject
3727 * the event from there (hence place it back in TRPM).
3728 */
3729 if (pVCpu->nem.s.Event.fPending)
3730 {
3731 vmxHCPendingEventToTrpmTrap(pVCpu);
3732 Assert(!pVCpu->nem.s.Event.fPending);
3733
3734 /* Clear the events from the VMCS. */
3735 int rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0); AssertRC(rc);
3736 rc = nemR3DarwinWriteVmcs32(pVCpu, VMX_VMCS_GUEST_PENDING_DEBUG_XCPTS, 0); AssertRC(rc);
3737 }
3738
3739
3740 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
3741 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3742
3743 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
3744 {
3745 /* Try anticipate what we might need. */
3746 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
3747 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
3748 || RT_FAILURE(rcStrict))
3749 fImport = CPUMCTX_EXTRN_ALL;
3750 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
3751 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3752 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
3753
3754 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
3755 {
3756 /* Only import what is external currently. */
3757 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
3758 if (RT_SUCCESS(rc2))
3759 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
3760 else if (RT_SUCCESS(rcStrict))
3761 rcStrict = rc2;
3762 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
3763 {
3764 pVCpu->cpum.GstCtx.fExtrn = 0;
3765 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3766 }
3767 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
3768 }
3769 else
3770 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
3771 }
3772 else
3773 {
3774 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
3775 pVCpu->cpum.GstCtx.fExtrn = 0;
3776 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
3777 }
3778
3779 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
3780 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags.u, VBOXSTRICTRC_VAL(rcStrict) ));
3781 return rcStrict;
3782}
3783
3784
3785VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
3786{
3787 NOREF(pVM);
3788 return PGMPhysIsA20Enabled(pVCpu);
3789}
3790
3791
3792bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
3793{
3794 VMCPU_ASSERT_EMT(pVCpu);
3795 bool fOld = pVCpu->nem.s.fSingleInstruction;
3796 pVCpu->nem.s.fSingleInstruction = fEnable;
3797 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
3798 return fOld;
3799}
3800
3801
3802void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
3803{
3804 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
3805
3806 RT_NOREF(pVM, fFlags);
3807
3808 hv_return_t hrc = hv_vcpu_interrupt(&pVCpu->nem.s.hVCpuId, 1);
3809 if (hrc != HV_SUCCESS)
3810 LogRel(("NEM: hv_vcpu_interrupt(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpuId, hrc));
3811}
3812
3813
3814DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
3815{
3816 for (DBGFEVENTTYPE enmEvent = DBGFEVENT_EXIT_VMX_FIRST;
3817 !fUseDebugLoop && enmEvent <= DBGFEVENT_EXIT_VMX_LAST;
3818 enmEvent = (DBGFEVENTTYPE)(enmEvent + 1))
3819 fUseDebugLoop = DBGF_IS_EVENT_ENABLED(pVM, enmEvent);
3820
3821 return fUseDebugLoop;
3822}
3823
3824
3825DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
3826{
3827 RT_NOREF(pVM, pVCpu);
3828 return fUseDebugLoop;
3829}
3830
3831
3832VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
3833 uint8_t *pu2State, uint32_t *puNemRange)
3834{
3835 RT_NOREF(pVM, puNemRange);
3836
3837 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
3838#if defined(VBOX_WITH_PGM_NEM_MODE)
3839 if (pvR3)
3840 {
3841 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
3842 if (RT_FAILURE(rc))
3843 {
3844 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
3845 return VERR_NEM_MAP_PAGES_FAILED;
3846 }
3847 }
3848 return VINF_SUCCESS;
3849#else
3850 RT_NOREF(pVM, GCPhys, cb, pvR3);
3851 return VERR_NEM_MAP_PAGES_FAILED;
3852#endif
3853}
3854
3855
3856VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
3857{
3858 RT_NOREF(pVM);
3859 return false;
3860}
3861
3862
3863VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
3864 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
3865{
3866 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
3867
3868 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
3869 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
3870
3871#if defined(VBOX_WITH_PGM_NEM_MODE)
3872 /*
3873 * Unmap the RAM we're replacing.
3874 */
3875 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
3876 {
3877 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
3878 if (RT_SUCCESS(rc))
3879 { /* likely */ }
3880 else if (pvMmio2)
3881 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
3882 GCPhys, cb, fFlags, rc));
3883 else
3884 {
3885 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
3886 GCPhys, cb, fFlags, rc));
3887 return VERR_NEM_UNMAP_PAGES_FAILED;
3888 }
3889 }
3890
3891 /*
3892 * Map MMIO2 if any.
3893 */
3894 if (pvMmio2)
3895 {
3896 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
3897 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
3898 if (RT_FAILURE(rc))
3899 {
3900 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
3901 GCPhys, cb, fFlags, pvMmio2, rc));
3902 return VERR_NEM_MAP_PAGES_FAILED;
3903 }
3904 }
3905 else
3906 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
3907
3908#else
3909 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
3910 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
3911#endif
3912 return VINF_SUCCESS;
3913}
3914
3915
3916VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
3917 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
3918{
3919 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
3920 return VINF_SUCCESS;
3921}
3922
3923
3924VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
3925 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
3926{
3927 RT_NOREF(pVM, puNemRange);
3928
3929 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
3930 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
3931
3932 int rc = VINF_SUCCESS;
3933#if defined(VBOX_WITH_PGM_NEM_MODE)
3934 /*
3935 * Unmap the MMIO2 pages.
3936 */
3937 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
3938 * we may have more stuff to unmap even in case of pure MMIO... */
3939 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
3940 {
3941 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
3942 if (RT_FAILURE(rc))
3943 {
3944 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
3945 GCPhys, cb, fFlags, rc));
3946 rc = VERR_NEM_UNMAP_PAGES_FAILED;
3947 }
3948 }
3949
3950 /* Ensure the page is masked as unmapped if relevant. */
3951 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
3952
3953 /*
3954 * Restore the RAM we replaced.
3955 */
3956 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
3957 {
3958 AssertPtr(pvRam);
3959 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
3960 if (RT_SUCCESS(rc))
3961 { /* likely */ }
3962 else
3963 {
3964 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
3965 rc = VERR_NEM_MAP_PAGES_FAILED;
3966 }
3967 }
3968
3969 RT_NOREF(pvMmio2);
3970#else
3971 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
3972 if (pu2State)
3973 *pu2State = UINT8_MAX;
3974 rc = VERR_NEM_UNMAP_PAGES_FAILED;
3975#endif
3976 return rc;
3977}
3978
3979
3980VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
3981 void *pvBitmap, size_t cbBitmap)
3982{
3983 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
3984 AssertFailed();
3985 return VERR_NOT_IMPLEMENTED;
3986}
3987
3988
3989VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
3990 uint8_t *pu2State, uint32_t *puNemRange)
3991{
3992 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
3993
3994 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
3995 *pu2State = UINT8_MAX;
3996 *puNemRange = 0;
3997 return VINF_SUCCESS;
3998}
3999
4000
4001VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
4002 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
4003{
4004 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
4005 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
4006 *pu2State = UINT8_MAX;
4007
4008#if defined(VBOX_WITH_PGM_NEM_MODE)
4009 /*
4010 * (Re-)map readonly.
4011 */
4012 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
4013 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
4014 if (RT_FAILURE(rc))
4015 {
4016 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
4017 GCPhys, cb, pvPages, fFlags, rc));
4018 return VERR_NEM_MAP_PAGES_FAILED;
4019 }
4020 RT_NOREF(fFlags, puNemRange);
4021 return VINF_SUCCESS;
4022#else
4023 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
4024 return VERR_NEM_MAP_PAGES_FAILED;
4025#endif
4026}
4027
4028
4029VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
4030 RTR3PTR pvMemR3, uint8_t *pu2State)
4031{
4032 RT_NOREF(pVM);
4033
4034 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
4035 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
4036
4037 *pu2State = UINT8_MAX;
4038#if defined(VBOX_WITH_PGM_NEM_MODE)
4039 if (pvMemR3)
4040 {
4041 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
4042 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
4043 pvMemR3, GCPhys, cb, rc));
4044 }
4045 RT_NOREF(enmKind);
4046#else
4047 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
4048 AssertFailed();
4049#endif
4050}
4051
4052
4053VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
4054{
4055 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
4056 RT_NOREF(pVCpu, fEnabled);
4057}
4058
4059
4060void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
4061{
4062 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
4063 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
4064}
4065
4066
4067void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
4068 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
4069{
4070 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
4071 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
4072 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
4073}
4074
4075
4076int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4077 PGMPAGETYPE enmType, uint8_t *pu2State)
4078{
4079 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4080 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4081 RT_NOREF(HCPhys, fPageProt, enmType);
4082
4083 return nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE, pu2State);
4084}
4085
4086
4087VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
4088 PGMPAGETYPE enmType, uint8_t *pu2State)
4089{
4090 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4091 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4092 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
4093
4094 nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE, pu2State);
4095}
4096
4097
4098VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4099 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4100{
4101 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4102 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4103 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
4104
4105 nemR3DarwinUnmap(pVM, GCPhys, X86_PAGE_SIZE, pu2State);
4106}
4107
4108
4109/**
4110 * Interface for importing state on demand (used by IEM).
4111 *
4112 * @returns VBox status code.
4113 * @param pVCpu The cross context CPU structure.
4114 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
4115 */
4116VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
4117{
4118 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
4119 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
4120
4121 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
4122}
4123
4124
4125/**
4126 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
4127 *
4128 * @returns VBox status code.
4129 * @param pVCpu The cross context CPU structure.
4130 * @param pcTicks Where to return the CPU tick count.
4131 * @param puAux Where to return the TSC_AUX register value.
4132 */
4133VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
4134{
4135 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
4136 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
4137
4138 int rc = nemR3DarwinMsrRead(pVCpu, MSR_IA32_TSC, pcTicks);
4139 if ( RT_SUCCESS(rc)
4140 && puAux)
4141 {
4142 if (pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX)
4143 {
4144 uint64_t u64Aux;
4145 rc = nemR3DarwinMsrRead(pVCpu, MSR_K8_TSC_AUX, &u64Aux);
4146 if (RT_SUCCESS(rc))
4147 *puAux = (uint32_t)u64Aux;
4148 }
4149 else
4150 *puAux = CPUMGetGuestTscAux(pVCpu);
4151 }
4152
4153 return rc;
4154}
4155
4156
4157/**
4158 * Resumes CPU clock (TSC) on all virtual CPUs.
4159 *
4160 * This is called by TM when the VM is started, restored, resumed or similar.
4161 *
4162 * @returns VBox status code.
4163 * @param pVM The cross context VM structure.
4164 * @param pVCpu The cross context CPU structure of the calling EMT.
4165 * @param uPausedTscValue The TSC value at the time of pausing.
4166 */
4167VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
4168{
4169 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
4170 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
4171 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
4172
4173 hv_return_t hrc = hv_vm_sync_tsc(uPausedTscValue);
4174 if (RT_LIKELY(hrc == HV_SUCCESS))
4175 {
4176 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_TSC_AUX);
4177 return VINF_SUCCESS;
4178 }
4179
4180 return nemR3DarwinHvSts2Rc(hrc);
4181}
4182
4183
4184/**
4185 * Returns features supported by the NEM backend.
4186 *
4187 * @returns Flags of features supported by the native NEM backend.
4188 * @param pVM The cross context VM structure.
4189 */
4190VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
4191{
4192 RT_NOREF(pVM);
4193 /*
4194 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
4195 * and unrestricted guest execution support so we can safely return these flags here always.
4196 */
4197 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
4198}
4199
4200
4201/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
4202 *
4203 * @todo Add notes as the implementation progresses...
4204 */
4205
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette