VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 99492

最後變更 在這個檔案從99492是 99379,由 vboxsync 提交於 2 年 前

VMM/ARMv8: Sync and log the TCR_EL1 register as well, bugref:10390, bugref:10387, bugref:10388

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 61.9 KB
 
1/* $Id: NEMR3Native-darwin-armv8.cpp 99379 2023-04-12 10:30:59Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.alldomusa.eu.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/apic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/hm_vmx.h>
46#include <VBox/vmm/dbgftrace.h>
47#include <VBox/vmm/gcm.h>
48#include "NEMInternal.h"
49#include <VBox/vmm/vmcc.h>
50#include "dtrace/VBoxVMM.h"
51
52#include <iprt/armv8.h>
53#include <iprt/asm.h>
54#include <iprt/ldr.h>
55#include <iprt/mem.h>
56#include <iprt/path.h>
57#include <iprt/string.h>
58#include <iprt/system.h>
59#include <iprt/utf16.h>
60
61#include <mach/mach_time.h>
62#include <mach/kern_return.h>
63
64#include <Hypervisor/Hypervisor.h>
65
66
67/*********************************************************************************************************************************
68* Defined Constants And Macros *
69*********************************************************************************************************************************/
70
71
72/*********************************************************************************************************************************
73* Structures and Typedefs *
74*********************************************************************************************************************************/
75
76
77/*********************************************************************************************************************************
78* Global Variables *
79*********************************************************************************************************************************/
80/** NEM_DARWIN_PAGE_STATE_XXX names. */
81NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
82/** The general registers. */
83static const struct
84{
85 hv_reg_t enmHvReg;
86 uint32_t fCpumExtrn;
87 uint32_t offCpumCtx;
88} s_aCpumRegs[] =
89{
90#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
91#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
92 CPUM_GREG_EMIT_X0_X3(0),
93 CPUM_GREG_EMIT_X0_X3(1),
94 CPUM_GREG_EMIT_X0_X3(2),
95 CPUM_GREG_EMIT_X0_X3(3),
96 CPUM_GREG_EMIT_X4_X28(4),
97 CPUM_GREG_EMIT_X4_X28(5),
98 CPUM_GREG_EMIT_X4_X28(6),
99 CPUM_GREG_EMIT_X4_X28(7),
100 CPUM_GREG_EMIT_X4_X28(8),
101 CPUM_GREG_EMIT_X4_X28(9),
102 CPUM_GREG_EMIT_X4_X28(10),
103 CPUM_GREG_EMIT_X4_X28(11),
104 CPUM_GREG_EMIT_X4_X28(12),
105 CPUM_GREG_EMIT_X4_X28(13),
106 CPUM_GREG_EMIT_X4_X28(14),
107 CPUM_GREG_EMIT_X4_X28(15),
108 CPUM_GREG_EMIT_X4_X28(16),
109 CPUM_GREG_EMIT_X4_X28(17),
110 CPUM_GREG_EMIT_X4_X28(18),
111 CPUM_GREG_EMIT_X4_X28(19),
112 CPUM_GREG_EMIT_X4_X28(20),
113 CPUM_GREG_EMIT_X4_X28(21),
114 CPUM_GREG_EMIT_X4_X28(22),
115 CPUM_GREG_EMIT_X4_X28(23),
116 CPUM_GREG_EMIT_X4_X28(24),
117 CPUM_GREG_EMIT_X4_X28(25),
118 CPUM_GREG_EMIT_X4_X28(26),
119 CPUM_GREG_EMIT_X4_X28(27),
120 CPUM_GREG_EMIT_X4_X28(28),
121 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
122 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
123 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
124 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
125 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
126#undef CPUM_GREG_EMIT_X0_X3
127#undef CPUM_GREG_EMIT_X4_X28
128};
129/** SIMD/FP registers. */
130static const struct
131{
132 hv_simd_fp_reg_t enmHvReg;
133 uint32_t offCpumCtx;
134} s_aCpumFpRegs[] =
135{
136#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
137 CPUM_VREG_EMIT(0),
138 CPUM_VREG_EMIT(1),
139 CPUM_VREG_EMIT(2),
140 CPUM_VREG_EMIT(3),
141 CPUM_VREG_EMIT(4),
142 CPUM_VREG_EMIT(5),
143 CPUM_VREG_EMIT(6),
144 CPUM_VREG_EMIT(7),
145 CPUM_VREG_EMIT(8),
146 CPUM_VREG_EMIT(9),
147 CPUM_VREG_EMIT(10),
148 CPUM_VREG_EMIT(11),
149 CPUM_VREG_EMIT(12),
150 CPUM_VREG_EMIT(13),
151 CPUM_VREG_EMIT(14),
152 CPUM_VREG_EMIT(15),
153 CPUM_VREG_EMIT(16),
154 CPUM_VREG_EMIT(17),
155 CPUM_VREG_EMIT(18),
156 CPUM_VREG_EMIT(19),
157 CPUM_VREG_EMIT(20),
158 CPUM_VREG_EMIT(21),
159 CPUM_VREG_EMIT(22),
160 CPUM_VREG_EMIT(23),
161 CPUM_VREG_EMIT(24),
162 CPUM_VREG_EMIT(25),
163 CPUM_VREG_EMIT(26),
164 CPUM_VREG_EMIT(27),
165 CPUM_VREG_EMIT(28),
166 CPUM_VREG_EMIT(29),
167 CPUM_VREG_EMIT(30),
168 CPUM_VREG_EMIT(31)
169#undef CPUM_VREG_EMIT
170};
171/** System registers. */
172static const struct
173{
174 hv_sys_reg_t enmHvReg;
175 uint32_t fCpumExtrn;
176 uint32_t offCpumCtx;
177} s_aCpumSysRegs[] =
178{
179 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
180 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
181 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
182 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
183 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
184 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
185 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
186 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
187};
188
189
190/*********************************************************************************************************************************
191* Internal Functions *
192*********************************************************************************************************************************/
193
194
195/**
196 * Converts a HV return code to a VBox status code.
197 *
198 * @returns VBox status code.
199 * @param hrc The HV return code to convert.
200 */
201DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
202{
203 if (hrc == HV_SUCCESS)
204 return VINF_SUCCESS;
205
206 switch (hrc)
207 {
208 case HV_ERROR: return VERR_INVALID_STATE;
209 case HV_BUSY: return VERR_RESOURCE_BUSY;
210 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
211 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
212 case HV_NO_DEVICE: return VERR_NOT_FOUND;
213 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
214 }
215
216 return VERR_IPE_UNEXPECTED_STATUS;
217}
218
219
220/**
221 * Returns a human readable string of the given exception class.
222 *
223 * @returns Pointer to the string matching the given EC.
224 * @param u32Ec The exception class to return the string for.
225 */
226static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
227{
228 switch (u32Ec)
229 {
230#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
231 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
232 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
233 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
234 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
235 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
236 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
272 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
273 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
274 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
275 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
276#undef ARMV8_EC_CASE
277 default:
278 break;
279 }
280
281 return "<INVALID>";
282}
283
284
285/**
286 * Unmaps the given guest physical address range (page aligned).
287 *
288 * @returns VBox status code.
289 * @param pVM The cross context VM structure.
290 * @param GCPhys The guest physical address to start unmapping at.
291 * @param cb The size of the range to unmap in bytes.
292 * @param pu2State Where to store the new state of the unmappd page, optional.
293 */
294DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
295{
296 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
297 {
298 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
299 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
300 return VINF_SUCCESS;
301 }
302
303 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
304 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
305 if (RT_LIKELY(hrc == HV_SUCCESS))
306 {
307 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
308 if (pu2State)
309 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
310 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
311 return VINF_SUCCESS;
312 }
313
314 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
315 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
316 GCPhys, hrc));
317 return VERR_NEM_IPE_6;
318}
319
320
321/**
322 * Maps a given guest physical address range backed by the given memory with the given
323 * protection flags.
324 *
325 * @returns VBox status code.
326 * @param pVM The cross context VM structure.
327 * @param GCPhys The guest physical address to start mapping.
328 * @param pvRam The R3 pointer of the memory to back the range with.
329 * @param cb The size of the range, page aligned.
330 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
331 * @param pu2State Where to store the state for the new page, optional.
332 */
333DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
334{
335 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
336
337 Assert(fPageProt != NEM_PAGE_PROT_NONE);
338 RT_NOREF(pVM);
339
340 hv_memory_flags_t fHvMemProt = 0;
341 if (fPageProt & NEM_PAGE_PROT_READ)
342 fHvMemProt |= HV_MEMORY_READ;
343 if (fPageProt & NEM_PAGE_PROT_WRITE)
344 fHvMemProt |= HV_MEMORY_WRITE;
345 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
346 fHvMemProt |= HV_MEMORY_EXEC;
347
348 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
349 if (hrc == HV_SUCCESS)
350 {
351 if (pu2State)
352 *pu2State = (fPageProt & NEM_PAGE_PROT_WRITE)
353 ? NEM_DARWIN_PAGE_STATE_WRITABLE
354 : NEM_DARWIN_PAGE_STATE_READABLE;
355 return VINF_SUCCESS;
356 }
357
358 return nemR3DarwinHvSts2Rc(hrc);
359}
360
361#if 0 /* unused */
362DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
363{
364 hv_memory_flags_t fHvMemProt = 0;
365 if (fPageProt & NEM_PAGE_PROT_READ)
366 fHvMemProt |= HV_MEMORY_READ;
367 if (fPageProt & NEM_PAGE_PROT_WRITE)
368 fHvMemProt |= HV_MEMORY_WRITE;
369 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
370 fHvMemProt |= HV_MEMORY_EXEC;
371
372 hv_return_t hrc;
373 if (pVM->nem.s.fCreatedAsid)
374 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
375 else
376 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
377
378 return nemR3DarwinHvSts2Rc(hrc);
379}
380#endif
381
382DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv)
383{
384 PGMPAGEMAPLOCK Lock;
385 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, ppv, &Lock);
386 if (RT_SUCCESS(rc))
387 PGMPhysReleasePageMappingLock(pVM, &Lock);
388 return rc;
389}
390
391
392DECLINLINE(int) nemR3NativeGCPhys2R3PtrWriteable(PVM pVM, RTGCPHYS GCPhys, void **ppv)
393{
394 PGMPAGEMAPLOCK Lock;
395 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, ppv, &Lock);
396 if (RT_SUCCESS(rc))
397 PGMPhysReleasePageMappingLock(pVM, &Lock);
398 return rc;
399}
400
401
402#ifdef LOG_ENABLED
403/**
404 * Logs the current CPU state.
405 */
406static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
407{
408 if (LogIs3Enabled())
409 {
410 char szRegs[4096];
411 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
412 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
413 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
414 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
415 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
416 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
417 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
418 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
419 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
420 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
421 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
422 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
423 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
424 );
425 char szInstr[256]; RT_ZERO(szInstr);
426#if 0
427 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
428 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
429 szInstr, sizeof(szInstr), NULL);
430#endif
431 Log3(("%s%s\n", szRegs, szInstr));
432 }
433}
434#endif /* LOG_ENABLED */
435
436
437static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
438{
439 RT_NOREF(pVM);
440 hv_return_t hrc = HV_SUCCESS;
441
442 if (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
443 {
444 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
445 {
446 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
447 {
448 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
449 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
450 }
451 }
452 }
453
454 if ( hrc == HV_SUCCESS
455 && (fWhat & CPUMCTX_EXTRN_V0_V31))
456 {
457 /* SIMD/FP registers. */
458 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
459 {
460 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
461 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
462 }
463 }
464
465 if ( hrc == HV_SUCCESS
466 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR)))
467 {
468 /* System registers. */
469 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
470 {
471 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
472 {
473 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
474 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
475 }
476 }
477 }
478
479 if ( hrc == HV_SUCCESS
480 && (fWhat & CPUMCTX_EXTRN_PSTATE))
481 {
482 uint64_t u64Tmp;
483 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
484 if (hrc == HV_SUCCESS)
485 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
486 }
487
488 /* Almost done, just update extern flags. */
489 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
490 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
491 pVCpu->cpum.GstCtx.fExtrn = 0;
492
493 return nemR3DarwinHvSts2Rc(hrc);
494}
495
496
497/**
498 * State to pass between vmxHCExitEptViolation
499 * and nemR3DarwinHandleMemoryAccessPageCheckerCallback.
500 */
501typedef struct NEMHCDARWINHMACPCCSTATE
502{
503 /** Input: Write access. */
504 bool fWriteAccess;
505 /** Output: Set if we did something. */
506 bool fDidSomething;
507 /** Output: Set it we should resume. */
508 bool fCanResume;
509} NEMHCDARWINHMACPCCSTATE;
510
511/**
512 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
513 * Worker for vmxHCExitEptViolation; pvUser points to a
514 * NEMHCDARWINHMACPCCSTATE structure. }
515 */
516static DECLCALLBACK(int)
517nemR3DarwinHandleMemoryAccessPageCheckerCallback(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
518{
519 RT_NOREF(pVCpu);
520
521 NEMHCDARWINHMACPCCSTATE *pState = (NEMHCDARWINHMACPCCSTATE *)pvUser;
522 pState->fDidSomething = false;
523 pState->fCanResume = false;
524
525 uint8_t u2State = pInfo->u2NemState;
526
527 /*
528 * Consolidate current page state with actual page protection and access type.
529 * We don't really consider downgrades here, as they shouldn't happen.
530 */
531 switch (u2State)
532 {
533 case NEM_DARWIN_PAGE_STATE_UNMAPPED:
534 case NEM_DARWIN_PAGE_STATE_NOT_SET:
535 {
536 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
537 {
538 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
539 return VINF_SUCCESS;
540 }
541
542 /* Don't bother remapping it if it's a write request to a non-writable page. */
543 if ( pState->fWriteAccess
544 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
545 {
546 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
547 return VINF_SUCCESS;
548 }
549
550 int rc = VINF_SUCCESS;
551 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
552 {
553 void *pvPage;
554 rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhys, &pvPage);
555 if (RT_SUCCESS(rc))
556 rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
557 }
558 else if (pInfo->fNemProt & NEM_PAGE_PROT_READ)
559 {
560 const void *pvPage;
561 rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhys, &pvPage);
562 if (RT_SUCCESS(rc))
563 rc = nemR3DarwinMap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, pvPage, X86_PAGE_SIZE, pInfo->fNemProt, &u2State);
564 }
565 else /* Only EXECUTE doesn't work. */
566 AssertReleaseFailed();
567
568 pInfo->u2NemState = u2State;
569 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
570 GCPhys, g_apszPageStates[u2State], rc));
571 pState->fDidSomething = true;
572 pState->fCanResume = true;
573 return rc;
574 }
575 case NEM_DARWIN_PAGE_STATE_READABLE:
576 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
577 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
578 {
579 pState->fCanResume = true;
580 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
581 return VINF_SUCCESS;
582 }
583 break;
584
585 case NEM_DARWIN_PAGE_STATE_WRITABLE:
586 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
587 {
588 pState->fCanResume = true;
589 if (pInfo->u2OldNemState == NEM_DARWIN_PAGE_STATE_WRITABLE)
590 Log4(("nemR3DarwinHandleMemoryAccessPageCheckerCallback: Spurious EPT fault\n", GCPhys));
591 return VINF_SUCCESS;
592 }
593 break;
594
595 default:
596 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
597 }
598
599 /* Unmap and restart the instruction. */
600 int rc = nemR3DarwinUnmap(pVM, GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE, &u2State);
601 if (RT_SUCCESS(rc))
602 {
603 pInfo->u2NemState = u2State;
604 pState->fDidSomething = true;
605 pState->fCanResume = true;
606 Log5(("NEM GPA unmapped/exit: %RGp (was %s)\n", GCPhys, g_apszPageStates[u2State]));
607 return VINF_SUCCESS;
608 }
609
610 LogRel(("nemR3DarwinHandleMemoryAccessPageCheckerCallback/unmap: GCPhys=%RGp %s rc=%Rrc\n",
611 GCPhys, g_apszPageStates[u2State], rc));
612 return VERR_NEM_UNMAP_PAGES_FAILED;
613}
614
615
616/**
617 * Exports the guest state to HV for execution.
618 *
619 * @returns VBox status code.
620 * @param pVM The cross context VM structure.
621 * @param pVCpu The cross context virtual CPU structure of the
622 * calling EMT.
623 */
624static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
625{
626 RT_NOREF(pVM);
627 hv_return_t hrc = HV_SUCCESS;
628
629 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
630 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
631 {
632 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
633 {
634 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
635 {
636 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
637 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
638 }
639 }
640 }
641
642 if ( hrc == HV_SUCCESS
643 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
644 {
645 /* SIMD/FP registers. */
646 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
647 {
648 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
649 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
650 }
651 }
652
653 if ( hrc == HV_SUCCESS
654 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
655 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
656 {
657 /* System registers. */
658 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
659 {
660 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
661 {
662 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
663 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
664 }
665 }
666 }
667
668 if ( hrc == HV_SUCCESS
669 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
670 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
671
672 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
673 return nemR3DarwinHvSts2Rc(hrc);
674}
675
676
677/**
678 * Try initialize the native API.
679 *
680 * This may only do part of the job, more can be done in
681 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
682 *
683 * @returns VBox status code.
684 * @param pVM The cross context VM structure.
685 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
686 * the latter we'll fail if we cannot initialize.
687 * @param fForced Whether the HMForced flag is set and we should
688 * fail if we cannot initialize.
689 */
690int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
691{
692 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
693
694 /*
695 * Some state init.
696 */
697 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
698 RT_NOREF(pCfgNem);
699
700 /*
701 * Error state.
702 * The error message will be non-empty on failure and 'rc' will be set too.
703 */
704 RTERRINFOSTATIC ErrInfo;
705 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
706
707 int rc = VINF_SUCCESS;
708 hv_return_t hrc = hv_vm_create(NULL);
709 if (hrc == HV_SUCCESS)
710 {
711 pVM->nem.s.fCreatedVm = true;
712 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
713 Log(("NEM: Marked active!\n"));
714 PGMR3EnableNemMode(pVM);
715 }
716 else
717 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
718 "hv_vm_create() failed: %#x", hrc);
719
720 /*
721 * We only fail if in forced mode, otherwise just log the complaint and return.
722 */
723 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
724 if ( (fForced || !fFallback)
725 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
726 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
727
728if (RTErrInfoIsSet(pErrInfo))
729 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
730 return VINF_SUCCESS;
731}
732
733
734/**
735 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
736 *
737 * @returns VBox status code
738 * @param pVM The VM handle.
739 * @param pVCpu The vCPU handle.
740 * @param idCpu ID of the CPU to create.
741 */
742static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
743{
744 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
745 if (hrc != HV_SUCCESS)
746 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
747 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
748
749 if (idCpu == 0)
750 {
751 /** @todo */
752 }
753
754 return VINF_SUCCESS;
755}
756
757
758/**
759 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
760 *
761 * @returns VBox status code
762 * @param pVCpu The vCPU handle.
763 */
764static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
765{
766 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
767 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
768 return VINF_SUCCESS;
769}
770
771
772/**
773 * This is called after CPUMR3Init is done.
774 *
775 * @returns VBox status code.
776 * @param pVM The VM handle..
777 */
778int nemR3NativeInitAfterCPUM(PVM pVM)
779{
780 /*
781 * Validate sanity.
782 */
783 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
784 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
785
786 /*
787 * Setup the EMTs.
788 */
789 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
790 {
791 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
792
793 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
794 if (RT_FAILURE(rc))
795 {
796 /* Rollback. */
797 while (idCpu--)
798 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
799
800 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
801 }
802 }
803
804 pVM->nem.s.fCreatedEmts = true;
805 return VINF_SUCCESS;
806}
807
808
809int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
810{
811 RT_NOREF(pVM, enmWhat);
812 return VINF_SUCCESS;
813}
814
815
816int nemR3NativeTerm(PVM pVM)
817{
818 /*
819 * Delete the VM.
820 */
821
822 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
823 {
824 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
825
826 /*
827 * Apple's documentation states that the vCPU should be destroyed
828 * on the thread running the vCPU but as all the other EMTs are gone
829 * at this point, destroying the VM would hang.
830 *
831 * We seem to be at luck here though as destroying apparently works
832 * from EMT(0) as well.
833 */
834 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
835 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
836 }
837
838 pVM->nem.s.fCreatedEmts = false;
839 if (pVM->nem.s.fCreatedVm)
840 {
841 hv_return_t hrc = hv_vm_destroy();
842 if (hrc != HV_SUCCESS)
843 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
844
845 pVM->nem.s.fCreatedVm = false;
846 }
847 return VINF_SUCCESS;
848}
849
850
851/**
852 * VM reset notification.
853 *
854 * @param pVM The cross context VM structure.
855 */
856void nemR3NativeReset(PVM pVM)
857{
858 RT_NOREF(pVM);
859}
860
861
862/**
863 * Reset CPU due to INIT IPI or hot (un)plugging.
864 *
865 * @param pVCpu The cross context virtual CPU structure of the CPU being
866 * reset.
867 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
868 */
869void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
870{
871 RT_NOREF(pVCpu, fInitIpi);
872}
873
874
875/**
876 * Returns the byte size from the given access SAS value.
877 *
878 * @returns Number of bytes to transfer.
879 * @param uSas The SAS value to convert.
880 */
881DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
882{
883 switch (uSas)
884 {
885 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
886 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
887 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
888 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
889 default:
890 AssertReleaseFailed();
891 }
892
893 return 0;
894}
895
896
897/**
898 * Sets the given general purpose register to the given value.
899 *
900 * @returns nothing.
901 * @param pVCpu The cross context virtual CPU structure of the
902 * calling EMT.
903 * @param uReg The register index.
904 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
905 * @param fSignExtend Flag whether to sign extend the value.
906 * @param u64Val The value.
907 */
908DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
909{
910 AssertReturnVoid(uReg < 31);
911
912 if (f64BitReg)
913 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
914 else
915 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
916
917 /* Mark the register as not extern anymore. */
918 switch (uReg)
919 {
920 case 0:
921 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
922 break;
923 case 1:
924 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
925 break;
926 case 2:
927 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
928 break;
929 case 3:
930 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
931 break;
932 default:
933 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
934 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
935 }
936}
937
938
939/**
940 * Gets the given general purpose register and returns the value.
941 *
942 * @returns Value from the given register.
943 * @param pVCpu The cross context virtual CPU structure of the
944 * calling EMT.
945 * @param uReg The register index.
946 */
947DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
948{
949 AssertReturn(uReg < 31, 0);
950
951 /** @todo Import the register if extern. */
952 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
953
954 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
955}
956
957
958/**
959 * Works on the data abort exception (which will be a MMIO access most of the time).
960 *
961 * @returns VBox strict status code.
962 * @param pVM The cross context VM structure.
963 * @param pVCpu The cross context virtual CPU structure of the
964 * calling EMT.
965 * @param uIss The instruction specific syndrome value.
966 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
967 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
968 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
969 */
970static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
971 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
972{
973 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
974 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
975 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
976 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
977 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
978 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
979 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
980 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
981 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
982 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
983
984 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
985
986 EMHistoryAddExit(pVCpu,
987 fWrite
988 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
989 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
990 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
991
992 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
993 uint64_t u64Val;
994 if (fWrite)
995 {
996 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
997 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
998 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
999 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1000 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1001 }
1002 else
1003 {
1004 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1005 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1006 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1007 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1008 if (rcStrict == VINF_SUCCESS)
1009 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1010 }
1011
1012 if (rcStrict == VINF_SUCCESS)
1013 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1014
1015 return rcStrict;
1016}
1017
1018
1019/**
1020 * Works on the trapped MRS, MSR and system instruction exception.
1021 *
1022 * @returns VBox strict status code.
1023 * @param pVM The cross context VM structure.
1024 * @param pVCpu The cross context virtual CPU structure of the
1025 * calling EMT.
1026 * @param uIss The instruction specific syndrome value.
1027 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1028 */
1029static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1030{
1031 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1032 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1033 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1034 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1035 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1036 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1037 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1038 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1039 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1040 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1041
1042 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1043 EMHistoryAddExit(pVCpu,
1044 fRead
1045 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1046 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1047 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1048
1049 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1050 uint64_t u64Val = 0;
1051 if (fRead)
1052 {
1053 RT_NOREF(pVM);
1054 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1055 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1056 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1057 VBOXSTRICTRC_VAL(rcStrict) ));
1058 if (rcStrict == VINF_SUCCESS)
1059 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1060 }
1061 else
1062 {
1063 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1064 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1065 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1066 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1067 VBOXSTRICTRC_VAL(rcStrict) ));
1068 }
1069
1070 if (rcStrict == VINF_SUCCESS)
1071 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1072
1073 return rcStrict;
1074}
1075
1076
1077/**
1078 * Handles an exception VM exit.
1079 *
1080 * @returns VBox strict status code.
1081 * @param pVM The cross context VM structure.
1082 * @param pVCpu The cross context virtual CPU structure of the
1083 * calling EMT.
1084 * @param pExit Pointer to the exit information.
1085 */
1086static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1087{
1088 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1089 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1090 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1091
1092 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1093 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1094
1095 switch (uEc)
1096 {
1097 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1098 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1099 pExit->exception.physical_address);
1100 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1101 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1102 case ARMV8_ESR_EL2_EC_UNKNOWN:
1103 default:
1104 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1105 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1106 return VERR_NOT_IMPLEMENTED;
1107 }
1108
1109 return VINF_SUCCESS;
1110}
1111
1112
1113/**
1114 * Handles an exit from hv_vcpu_run().
1115 *
1116 * @returns VBox strict status code.
1117 * @param pVM The cross context VM structure.
1118 * @param pVCpu The cross context virtual CPU structure of the
1119 * calling EMT.
1120 */
1121static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1122{
1123 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1124 if (RT_FAILURE(rc))
1125 return rc;
1126
1127#ifdef LOG_ENABLED
1128 if (LogIs3Enabled())
1129 nemR3DarwinLogState(pVM, pVCpu);
1130#endif
1131
1132 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1133 switch (pExit->reason)
1134 {
1135 case HV_EXIT_REASON_CANCELED:
1136 return VINF_EM_RAW_INTERRUPT;
1137 case HV_EXIT_REASON_EXCEPTION:
1138 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1139 default:
1140 AssertReleaseFailed();
1141 break;
1142 }
1143
1144 return VERR_INVALID_STATE;
1145}
1146
1147
1148/**
1149 * Runs the guest once until an exit occurs.
1150 *
1151 * @returns HV status code.
1152 * @param pVM The cross context VM structure.
1153 * @param pVCpu The cross context virtual CPU structure.
1154 */
1155static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1156{
1157 TMNotifyStartOfExecution(pVM, pVCpu);
1158
1159 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1160
1161 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1162
1163 return hrc;
1164}
1165
1166
1167/**
1168 * Prepares the VM to run the guest.
1169 *
1170 * @returns Strict VBox status code.
1171 * @param pVM The cross context VM structure.
1172 * @param pVCpu The cross context virtual CPU structure.
1173 * @param fSingleStepping Flag whether we run in single stepping mode.
1174 */
1175static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1176{
1177#ifdef LOG_ENABLED
1178 if (LogIs3Enabled())
1179 nemR3DarwinLogState(pVM, pVCpu);
1180#endif
1181
1182 /** @todo */ RT_NOREF(fSingleStepping);
1183 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1184 AssertRCReturn(rc, rc);
1185
1186 LogFlowFunc(("Running vCPU\n"));
1187 pVCpu->nem.s.fEventPending = false;
1188 return VINF_SUCCESS;
1189}
1190
1191
1192/**
1193 * The normal runloop (no debugging features enabled).
1194 *
1195 * @returns Strict VBox status code.
1196 * @param pVM The cross context VM structure.
1197 * @param pVCpu The cross context virtual CPU structure.
1198 */
1199static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1200{
1201 /*
1202 * The run loop.
1203 *
1204 * Current approach to state updating to use the sledgehammer and sync
1205 * everything every time. This will be optimized later.
1206 */
1207
1208 /*
1209 * Poll timers and run for a bit.
1210 */
1211 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1212 * the whole polling job when timers have changed... */
1213 uint64_t offDeltaIgnored;
1214 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1215 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1216 for (unsigned iLoop = 0;; iLoop++)
1217 {
1218 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1219 if (rcStrict != VINF_SUCCESS)
1220 break;
1221
1222 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1223 if (hrc == HV_SUCCESS)
1224 {
1225 /*
1226 * Deal with the message.
1227 */
1228 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1229 if (rcStrict == VINF_SUCCESS)
1230 { /* hopefully likely */ }
1231 else
1232 {
1233 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1234 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1235 break;
1236 }
1237 }
1238 else
1239 {
1240 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1241 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1242 }
1243 } /* the run loop */
1244
1245 return rcStrict;
1246}
1247
1248
1249VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1250{
1251#ifdef LOG_ENABLED
1252 if (LogIs3Enabled())
1253 nemR3DarwinLogState(pVM, pVCpu);
1254#endif
1255
1256 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1257
1258 /*
1259 * Try switch to NEM runloop state.
1260 */
1261 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1262 { /* likely */ }
1263 else
1264 {
1265 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1266 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1267 return VINF_SUCCESS;
1268 }
1269
1270 VBOXSTRICTRC rcStrict;
1271#if 0
1272 if ( !pVCpu->nem.s.fUseDebugLoop
1273 && !nemR3DarwinAnyExpensiveProbesEnabled()
1274 && !DBGFIsStepping(pVCpu)
1275 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1276#endif
1277 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1278#if 0
1279 else
1280 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1281#endif
1282
1283 if (rcStrict == VINF_EM_RAW_TO_R3)
1284 rcStrict = VINF_SUCCESS;
1285
1286 /*
1287 * Convert any pending HM events back to TRPM due to premature exits.
1288 *
1289 * This is because execution may continue from IEM and we would need to inject
1290 * the event from there (hence place it back in TRPM).
1291 */
1292 if (pVCpu->nem.s.fEventPending)
1293 {
1294 /** @todo */
1295 }
1296
1297
1298 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1299 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1300
1301 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1302 {
1303 /* Try anticipate what we might need. */
1304 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1305 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1306 || RT_FAILURE(rcStrict))
1307 fImport = CPUMCTX_EXTRN_ALL;
1308 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
1309 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1310 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1311
1312 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1313 {
1314 /* Only import what is external currently. */
1315 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1316 if (RT_SUCCESS(rc2))
1317 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1318 else if (RT_SUCCESS(rcStrict))
1319 rcStrict = rc2;
1320 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1321 pVCpu->cpum.GstCtx.fExtrn = 0;
1322 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1323 }
1324 else
1325 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1326 }
1327 else
1328 {
1329 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1330 pVCpu->cpum.GstCtx.fExtrn = 0;
1331 }
1332
1333 return rcStrict;
1334}
1335
1336
1337VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1338{
1339 RT_NOREF(pVM, pVCpu);
1340 return true; /** @todo Are there any cases where we have to emulate? */
1341}
1342
1343
1344bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1345{
1346 VMCPU_ASSERT_EMT(pVCpu);
1347 bool fOld = pVCpu->nem.s.fSingleInstruction;
1348 pVCpu->nem.s.fSingleInstruction = fEnable;
1349 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1350 return fOld;
1351}
1352
1353
1354void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1355{
1356 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1357
1358 RT_NOREF(pVM, fFlags);
1359
1360 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1361 if (hrc != HV_SUCCESS)
1362 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1363}
1364
1365
1366DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1367{
1368 RT_NOREF(pVM, fUseDebugLoop);
1369 AssertReleaseFailed();
1370 return false;
1371}
1372
1373
1374DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1375{
1376 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1377 return fUseDebugLoop;
1378}
1379
1380
1381VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1382 uint8_t *pu2State, uint32_t *puNemRange)
1383{
1384 RT_NOREF(pVM, puNemRange);
1385
1386 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1387#if defined(VBOX_WITH_PGM_NEM_MODE)
1388 if (pvR3)
1389 {
1390 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1391 if (RT_FAILURE(rc))
1392 {
1393 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1394 return VERR_NEM_MAP_PAGES_FAILED;
1395 }
1396 }
1397 return VINF_SUCCESS;
1398#else
1399 RT_NOREF(pVM, GCPhys, cb, pvR3);
1400 return VERR_NEM_MAP_PAGES_FAILED;
1401#endif
1402}
1403
1404
1405VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1406{
1407 RT_NOREF(pVM);
1408 return false;
1409}
1410
1411
1412VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1413 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1414{
1415 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
1416
1417 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1418 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1419
1420#if defined(VBOX_WITH_PGM_NEM_MODE)
1421 /*
1422 * Unmap the RAM we're replacing.
1423 */
1424 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1425 {
1426 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1427 if (RT_SUCCESS(rc))
1428 { /* likely */ }
1429 else if (pvMmio2)
1430 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1431 GCPhys, cb, fFlags, rc));
1432 else
1433 {
1434 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1435 GCPhys, cb, fFlags, rc));
1436 return VERR_NEM_UNMAP_PAGES_FAILED;
1437 }
1438 }
1439
1440 /*
1441 * Map MMIO2 if any.
1442 */
1443 if (pvMmio2)
1444 {
1445 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1446 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1447 if (RT_FAILURE(rc))
1448 {
1449 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1450 GCPhys, cb, fFlags, pvMmio2, rc));
1451 return VERR_NEM_MAP_PAGES_FAILED;
1452 }
1453 }
1454 else
1455 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1456
1457#else
1458 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1459 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1460#endif
1461 return VINF_SUCCESS;
1462}
1463
1464
1465VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1466 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1467{
1468 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1469 return VINF_SUCCESS;
1470}
1471
1472
1473VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1474 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1475{
1476 RT_NOREF(pVM, puNemRange);
1477
1478 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1479 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1480
1481 int rc = VINF_SUCCESS;
1482#if defined(VBOX_WITH_PGM_NEM_MODE)
1483 /*
1484 * Unmap the MMIO2 pages.
1485 */
1486 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1487 * we may have more stuff to unmap even in case of pure MMIO... */
1488 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1489 {
1490 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1491 if (RT_FAILURE(rc))
1492 {
1493 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1494 GCPhys, cb, fFlags, rc));
1495 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1496 }
1497 }
1498
1499 /* Ensure the page is masked as unmapped if relevant. */
1500 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1501
1502 /*
1503 * Restore the RAM we replaced.
1504 */
1505 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1506 {
1507 AssertPtr(pvRam);
1508 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1509 if (RT_SUCCESS(rc))
1510 { /* likely */ }
1511 else
1512 {
1513 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1514 rc = VERR_NEM_MAP_PAGES_FAILED;
1515 }
1516 }
1517
1518 RT_NOREF(pvMmio2);
1519#else
1520 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1521 if (pu2State)
1522 *pu2State = UINT8_MAX;
1523 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1524#endif
1525 return rc;
1526}
1527
1528
1529VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1530 void *pvBitmap, size_t cbBitmap)
1531{
1532 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
1533 AssertReleaseFailed();
1534 return VERR_NOT_IMPLEMENTED;
1535}
1536
1537
1538VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1539 uint8_t *pu2State, uint32_t *puNemRange)
1540{
1541 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1542
1543 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1544 *pu2State = UINT8_MAX;
1545 *puNemRange = 0;
1546 return VINF_SUCCESS;
1547}
1548
1549
1550VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1551 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1552{
1553 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1554 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1555 *pu2State = UINT8_MAX;
1556
1557#if defined(VBOX_WITH_PGM_NEM_MODE)
1558 /*
1559 * (Re-)map readonly.
1560 */
1561 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1562 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1563 if (RT_FAILURE(rc))
1564 {
1565 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1566 GCPhys, cb, pvPages, fFlags, rc));
1567 return VERR_NEM_MAP_PAGES_FAILED;
1568 }
1569 RT_NOREF(fFlags, puNemRange);
1570 return VINF_SUCCESS;
1571#else
1572 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1573 return VERR_NEM_MAP_PAGES_FAILED;
1574#endif
1575}
1576
1577
1578VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1579 RTR3PTR pvMemR3, uint8_t *pu2State)
1580{
1581 RT_NOREF(pVM);
1582
1583 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1584 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1585
1586 *pu2State = UINT8_MAX;
1587#if defined(VBOX_WITH_PGM_NEM_MODE)
1588 if (pvMemR3)
1589 {
1590 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1591 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1592 pvMemR3, GCPhys, cb, rc));
1593 }
1594 RT_NOREF(enmKind);
1595#else
1596 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1597 AssertFailed();
1598#endif
1599}
1600
1601
1602VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1603{
1604 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1605 RT_NOREF(pVCpu, fEnabled);
1606}
1607
1608
1609void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1610{
1611 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1612 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1613}
1614
1615
1616void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1617 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1618{
1619 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1620 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1621 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1622}
1623
1624
1625int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1626 PGMPAGETYPE enmType, uint8_t *pu2State)
1627{
1628 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1629 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1630 RT_NOREF(HCPhys, fPageProt, enmType);
1631
1632 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1633}
1634
1635
1636VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1637 PGMPAGETYPE enmType, uint8_t *pu2State)
1638{
1639 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1640 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1641 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
1642
1643 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1644}
1645
1646
1647VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1648 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1649{
1650 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1651 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1652 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
1653
1654 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1655}
1656
1657
1658/**
1659 * Interface for importing state on demand (used by IEM).
1660 *
1661 * @returns VBox status code.
1662 * @param pVCpu The cross context CPU structure.
1663 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1664 */
1665VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1666{
1667 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1668 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1669
1670 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1671}
1672
1673
1674/**
1675 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1676 *
1677 * @returns VBox status code.
1678 * @param pVCpu The cross context CPU structure.
1679 * @param pcTicks Where to return the CPU tick count.
1680 * @param puAux Where to return the TSC_AUX register value.
1681 */
1682VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1683{
1684 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1685 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1686
1687 AssertReleaseFailed();
1688 return VERR_NOT_IMPLEMENTED;
1689}
1690
1691
1692/**
1693 * Resumes CPU clock (TSC) on all virtual CPUs.
1694 *
1695 * This is called by TM when the VM is started, restored, resumed or similar.
1696 *
1697 * @returns VBox status code.
1698 * @param pVM The cross context VM structure.
1699 * @param pVCpu The cross context CPU structure of the calling EMT.
1700 * @param uPausedTscValue The TSC value at the time of pausing.
1701 */
1702VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1703{
1704 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
1705 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1706 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1707
1708 //AssertReleaseFailed();
1709 return VINF_SUCCESS;
1710}
1711
1712
1713/**
1714 * Returns features supported by the NEM backend.
1715 *
1716 * @returns Flags of features supported by the native NEM backend.
1717 * @param pVM The cross context VM structure.
1718 */
1719VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1720{
1721 RT_NOREF(pVM);
1722 /*
1723 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1724 * and unrestricted guest execution support so we can safely return these flags here always.
1725 */
1726 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1727}
1728
1729
1730/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1731 *
1732 * @todo Add notes as the implementation progresses...
1733 */
1734
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette