VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 99995

最後變更 在這個檔案從99995是 99976,由 vboxsync 提交於 22 月 前

VMM/NEMR3Native-darwin-armv8: Set the vTimer offset to account for suspending VMs, bugref:10387 bugref:10390

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 63.3 KB
 
1/* $Id: NEMR3Native-darwin-armv8.cpp 99976 2023-05-25 11:44:00Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.alldomusa.eu.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#define CPUM_WITH_NONCONST_HOST_FEATURES /* required for initializing parts of the g_CpumHostFeatures structure here. */
39#include <VBox/vmm/nem.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/em.h>
42#include <VBox/vmm/gic.h>
43#include <VBox/vmm/pdm.h>
44#include <VBox/vmm/dbgftrace.h>
45#include <VBox/vmm/gcm.h>
46#include "NEMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "dtrace/VBoxVMM.h"
49
50#include <iprt/armv8.h>
51#include <iprt/asm.h>
52#include <iprt/asm-arm.h>
53#include <iprt/ldr.h>
54#include <iprt/mem.h>
55#include <iprt/path.h>
56#include <iprt/string.h>
57#include <iprt/system.h>
58#include <iprt/utf16.h>
59
60#include <mach/mach_time.h>
61#include <mach/kern_return.h>
62
63#include <Hypervisor/Hypervisor.h>
64
65
66/*********************************************************************************************************************************
67* Defined Constants And Macros *
68*********************************************************************************************************************************/
69
70
71/** @todo The vTimer PPI for the virt platform, make it configurable. */
72#define NEM_DARWIN_VTIMER_GIC_PPI_IRQ 11
73
74
75/*********************************************************************************************************************************
76* Structures and Typedefs *
77*********************************************************************************************************************************/
78
79
80/*********************************************************************************************************************************
81* Global Variables *
82*********************************************************************************************************************************/
83/** The general registers. */
84static const struct
85{
86 hv_reg_t enmHvReg;
87 uint32_t fCpumExtrn;
88 uint32_t offCpumCtx;
89} s_aCpumRegs[] =
90{
91#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
92#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
93 CPUM_GREG_EMIT_X0_X3(0),
94 CPUM_GREG_EMIT_X0_X3(1),
95 CPUM_GREG_EMIT_X0_X3(2),
96 CPUM_GREG_EMIT_X0_X3(3),
97 CPUM_GREG_EMIT_X4_X28(4),
98 CPUM_GREG_EMIT_X4_X28(5),
99 CPUM_GREG_EMIT_X4_X28(6),
100 CPUM_GREG_EMIT_X4_X28(7),
101 CPUM_GREG_EMIT_X4_X28(8),
102 CPUM_GREG_EMIT_X4_X28(9),
103 CPUM_GREG_EMIT_X4_X28(10),
104 CPUM_GREG_EMIT_X4_X28(11),
105 CPUM_GREG_EMIT_X4_X28(12),
106 CPUM_GREG_EMIT_X4_X28(13),
107 CPUM_GREG_EMIT_X4_X28(14),
108 CPUM_GREG_EMIT_X4_X28(15),
109 CPUM_GREG_EMIT_X4_X28(16),
110 CPUM_GREG_EMIT_X4_X28(17),
111 CPUM_GREG_EMIT_X4_X28(18),
112 CPUM_GREG_EMIT_X4_X28(19),
113 CPUM_GREG_EMIT_X4_X28(20),
114 CPUM_GREG_EMIT_X4_X28(21),
115 CPUM_GREG_EMIT_X4_X28(22),
116 CPUM_GREG_EMIT_X4_X28(23),
117 CPUM_GREG_EMIT_X4_X28(24),
118 CPUM_GREG_EMIT_X4_X28(25),
119 CPUM_GREG_EMIT_X4_X28(26),
120 CPUM_GREG_EMIT_X4_X28(27),
121 CPUM_GREG_EMIT_X4_X28(28),
122 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
123 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
124 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
125 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
126 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
127#undef CPUM_GREG_EMIT_X0_X3
128#undef CPUM_GREG_EMIT_X4_X28
129};
130/** SIMD/FP registers. */
131static const struct
132{
133 hv_simd_fp_reg_t enmHvReg;
134 uint32_t offCpumCtx;
135} s_aCpumFpRegs[] =
136{
137#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
138 CPUM_VREG_EMIT(0),
139 CPUM_VREG_EMIT(1),
140 CPUM_VREG_EMIT(2),
141 CPUM_VREG_EMIT(3),
142 CPUM_VREG_EMIT(4),
143 CPUM_VREG_EMIT(5),
144 CPUM_VREG_EMIT(6),
145 CPUM_VREG_EMIT(7),
146 CPUM_VREG_EMIT(8),
147 CPUM_VREG_EMIT(9),
148 CPUM_VREG_EMIT(10),
149 CPUM_VREG_EMIT(11),
150 CPUM_VREG_EMIT(12),
151 CPUM_VREG_EMIT(13),
152 CPUM_VREG_EMIT(14),
153 CPUM_VREG_EMIT(15),
154 CPUM_VREG_EMIT(16),
155 CPUM_VREG_EMIT(17),
156 CPUM_VREG_EMIT(18),
157 CPUM_VREG_EMIT(19),
158 CPUM_VREG_EMIT(20),
159 CPUM_VREG_EMIT(21),
160 CPUM_VREG_EMIT(22),
161 CPUM_VREG_EMIT(23),
162 CPUM_VREG_EMIT(24),
163 CPUM_VREG_EMIT(25),
164 CPUM_VREG_EMIT(26),
165 CPUM_VREG_EMIT(27),
166 CPUM_VREG_EMIT(28),
167 CPUM_VREG_EMIT(29),
168 CPUM_VREG_EMIT(30),
169 CPUM_VREG_EMIT(31)
170#undef CPUM_VREG_EMIT
171};
172/** System registers. */
173static const struct
174{
175 hv_sys_reg_t enmHvReg;
176 uint32_t fCpumExtrn;
177 uint32_t offCpumCtx;
178} s_aCpumSysRegs[] =
179{
180 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
181 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
182 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
183 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
184 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
185 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
186 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
187 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
188};
189
190
191/*********************************************************************************************************************************
192* Internal Functions *
193*********************************************************************************************************************************/
194
195
196/**
197 * Converts a HV return code to a VBox status code.
198 *
199 * @returns VBox status code.
200 * @param hrc The HV return code to convert.
201 */
202DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
203{
204 if (hrc == HV_SUCCESS)
205 return VINF_SUCCESS;
206
207 switch (hrc)
208 {
209 case HV_ERROR: return VERR_INVALID_STATE;
210 case HV_BUSY: return VERR_RESOURCE_BUSY;
211 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
212 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
213 case HV_NO_DEVICE: return VERR_NOT_FOUND;
214 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
215 }
216
217 return VERR_IPE_UNEXPECTED_STATUS;
218}
219
220
221/**
222 * Returns a human readable string of the given exception class.
223 *
224 * @returns Pointer to the string matching the given EC.
225 * @param u32Ec The exception class to return the string for.
226 */
227static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
228{
229 switch (u32Ec)
230 {
231#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
232 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
233 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
234 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
235 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
236 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
237 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
238 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
239 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
240 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
241 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
242 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
243 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
244 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
245 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
246 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
247 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
248 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
249 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
250 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
251 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
252 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
253 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
254 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
255 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
256 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
257 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
258 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
259 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
260 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
261 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
262 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
263 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
264 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
265 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
266 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
267 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
268 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
269 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
270 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
271 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
272 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
273 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
274 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
275 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
276 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
277#undef ARMV8_EC_CASE
278 default:
279 break;
280 }
281
282 return "<INVALID>";
283}
284
285
286/**
287 * Resolves a NEM page state from the given protection flags.
288 *
289 * @returns NEM page state.
290 * @param fPageProt The page protection flags.
291 */
292DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
293{
294 switch (fPageProt)
295 {
296 case NEM_PAGE_PROT_NONE:
297 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
298 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
299 return NEM_DARWIN_PAGE_STATE_RX;
300 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
301 return NEM_DARWIN_PAGE_STATE_RW;
302 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
303 return NEM_DARWIN_PAGE_STATE_RWX;
304 default:
305 break;
306 }
307
308 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
309 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
310}
311
312
313/**
314 * Unmaps the given guest physical address range (page aligned).
315 *
316 * @returns VBox status code.
317 * @param pVM The cross context VM structure.
318 * @param GCPhys The guest physical address to start unmapping at.
319 * @param cb The size of the range to unmap in bytes.
320 * @param pu2State Where to store the new state of the unmappd page, optional.
321 */
322DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
323{
324 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
325 {
326 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
327 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
328 return VINF_SUCCESS;
329 }
330
331 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
332 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
333 if (RT_LIKELY(hrc == HV_SUCCESS))
334 {
335 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
336 if (pu2State)
337 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
338 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
339 return VINF_SUCCESS;
340 }
341
342 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
343 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
344 GCPhys, hrc));
345 return VERR_NEM_IPE_6;
346}
347
348
349/**
350 * Maps a given guest physical address range backed by the given memory with the given
351 * protection flags.
352 *
353 * @returns VBox status code.
354 * @param pVM The cross context VM structure.
355 * @param GCPhys The guest physical address to start mapping.
356 * @param pvRam The R3 pointer of the memory to back the range with.
357 * @param cb The size of the range, page aligned.
358 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
359 * @param pu2State Where to store the state for the new page, optional.
360 */
361DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
362{
363 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
364
365 Assert(fPageProt != NEM_PAGE_PROT_NONE);
366 RT_NOREF(pVM);
367
368 hv_memory_flags_t fHvMemProt = 0;
369 if (fPageProt & NEM_PAGE_PROT_READ)
370 fHvMemProt |= HV_MEMORY_READ;
371 if (fPageProt & NEM_PAGE_PROT_WRITE)
372 fHvMemProt |= HV_MEMORY_WRITE;
373 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
374 fHvMemProt |= HV_MEMORY_EXEC;
375
376 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
377 if (hrc == HV_SUCCESS)
378 {
379 if (pu2State)
380 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
381 return VINF_SUCCESS;
382 }
383
384 return nemR3DarwinHvSts2Rc(hrc);
385}
386
387#if 0 /* unused */
388DECLINLINE(int) nemR3DarwinProtectPage(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt)
389{
390 hv_memory_flags_t fHvMemProt = 0;
391 if (fPageProt & NEM_PAGE_PROT_READ)
392 fHvMemProt |= HV_MEMORY_READ;
393 if (fPageProt & NEM_PAGE_PROT_WRITE)
394 fHvMemProt |= HV_MEMORY_WRITE;
395 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
396 fHvMemProt |= HV_MEMORY_EXEC;
397
398 hv_return_t hrc;
399 if (pVM->nem.s.fCreatedAsid)
400 hrc = hv_vm_protect_space(pVM->nem.s.uVmAsid, GCPhys, cb, fHvMemProt);
401 else
402 hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
403
404 return nemR3DarwinHvSts2Rc(hrc);
405}
406#endif
407
408#ifdef LOG_ENABLED
409/**
410 * Logs the current CPU state.
411 */
412static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
413{
414 if (LogIs3Enabled())
415 {
416 char szRegs[4096];
417 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
418 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
419 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
420 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
421 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
422 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
423 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
424 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
425 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
426 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
427 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
428 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
429 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
430 );
431 char szInstr[256]; RT_ZERO(szInstr);
432#if 0
433 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
434 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
435 szInstr, sizeof(szInstr), NULL);
436#endif
437 Log3(("%s%s\n", szRegs, szInstr));
438 }
439}
440#endif /* LOG_ENABLED */
441
442
443static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
444{
445 RT_NOREF(pVM);
446
447 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
448 if (hrc == HV_SUCCESS)
449 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
450
451 if ( hrc == HV_SUCCESS
452 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
453 {
454 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
455 {
456 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
457 {
458 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
459 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
460 }
461 }
462 }
463
464 if ( hrc == HV_SUCCESS
465 && (fWhat & CPUMCTX_EXTRN_V0_V31))
466 {
467 /* SIMD/FP registers. */
468 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
469 {
470 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
471 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
472 }
473 }
474
475 if ( hrc == HV_SUCCESS
476 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR)))
477 {
478 /* System registers. */
479 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
480 {
481 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
482 {
483 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
484 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
485 }
486 }
487 }
488
489 if ( hrc == HV_SUCCESS
490 && (fWhat & CPUMCTX_EXTRN_PSTATE))
491 {
492 uint64_t u64Tmp;
493 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
494 if (hrc == HV_SUCCESS)
495 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
496 }
497
498 /* Almost done, just update extern flags. */
499 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
500 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
501 pVCpu->cpum.GstCtx.fExtrn = 0;
502
503 return nemR3DarwinHvSts2Rc(hrc);
504}
505
506
507/**
508 * Exports the guest state to HV for execution.
509 *
510 * @returns VBox status code.
511 * @param pVM The cross context VM structure.
512 * @param pVCpu The cross context virtual CPU structure of the
513 * calling EMT.
514 */
515static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
516{
517 RT_NOREF(pVM);
518 hv_return_t hrc = HV_SUCCESS;
519
520 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
521 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
522 {
523 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
524 {
525 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
526 {
527 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
528 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
529 }
530 }
531 }
532
533 if ( hrc == HV_SUCCESS
534 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
535 {
536 /* SIMD/FP registers. */
537 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
538 {
539 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
540 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
541 }
542 }
543
544 if ( hrc == HV_SUCCESS
545 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
546 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR))
547 {
548 /* System registers. */
549 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
550 {
551 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
552 {
553 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
554 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
555 }
556 }
557 }
558
559 if ( hrc == HV_SUCCESS
560 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
561 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
562
563 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
564 return nemR3DarwinHvSts2Rc(hrc);
565}
566
567
568/**
569 * Try initialize the native API.
570 *
571 * This may only do part of the job, more can be done in
572 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
573 *
574 * @returns VBox status code.
575 * @param pVM The cross context VM structure.
576 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
577 * the latter we'll fail if we cannot initialize.
578 * @param fForced Whether the HMForced flag is set and we should
579 * fail if we cannot initialize.
580 */
581int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
582{
583 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
584
585 /*
586 * Some state init.
587 */
588 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
589 RT_NOREF(pCfgNem);
590
591 /*
592 * Error state.
593 * The error message will be non-empty on failure and 'rc' will be set too.
594 */
595 RTERRINFOSTATIC ErrInfo;
596 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
597
598 int rc = VINF_SUCCESS;
599 hv_return_t hrc = hv_vm_create(NULL);
600 if (hrc == HV_SUCCESS)
601 {
602 pVM->nem.s.fCreatedVm = true;
603 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
604 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
605 Log(("NEM: Marked active!\n"));
606 PGMR3EnableNemMode(pVM);
607 }
608 else
609 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
610 "hv_vm_create() failed: %#x", hrc);
611
612 /*
613 * We only fail if in forced mode, otherwise just log the complaint and return.
614 */
615 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
616 if ( (fForced || !fFallback)
617 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
618 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
619
620if (RTErrInfoIsSet(pErrInfo))
621 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
622 return VINF_SUCCESS;
623}
624
625
626/**
627 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
628 *
629 * @returns VBox status code
630 * @param pVM The VM handle.
631 * @param pVCpu The vCPU handle.
632 * @param idCpu ID of the CPU to create.
633 */
634static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
635{
636 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, NULL);
637 if (hrc != HV_SUCCESS)
638 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
639 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
640
641 /* Set the vTiemr offset so all vCpus start at (nearly) 0. */
642 pVCpu->nem.s.u64VTimerOff = ASMReadTSC();
643
644 hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVCpu->nem.s.u64VTimerOff);
645 AssertLogRelMsg(hrc == HV_SUCCESS, ("vCPU#%u: Failed to set vTimer offset to %#RX64 -> hrc = %#x\n",
646 pVCpu->idCpu, pVCpu->nem.s.u64VTimerOff, hrc));
647
648 if (idCpu == 0)
649 {
650 /** @todo */
651 }
652
653 return VINF_SUCCESS;
654}
655
656
657/**
658 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
659 *
660 * @returns VBox status code
661 * @param pVCpu The vCPU handle.
662 */
663static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVMCPU pVCpu)
664{
665 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
666 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
667 return VINF_SUCCESS;
668}
669
670
671/**
672 * This is called after CPUMR3Init is done.
673 *
674 * @returns VBox status code.
675 * @param pVM The VM handle..
676 */
677int nemR3NativeInitAfterCPUM(PVM pVM)
678{
679 /*
680 * Validate sanity.
681 */
682 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
683 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
684
685 /*
686 * Setup the EMTs.
687 */
688 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
689 {
690 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
691
692 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
693 if (RT_FAILURE(rc))
694 {
695 /* Rollback. */
696 while (idCpu--)
697 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 1, pVCpu);
698
699 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
700 }
701 }
702
703 pVM->nem.s.fCreatedEmts = true;
704 return VINF_SUCCESS;
705}
706
707
708int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
709{
710 RT_NOREF(pVM, enmWhat);
711 return VINF_SUCCESS;
712}
713
714
715int nemR3NativeTerm(PVM pVM)
716{
717 /*
718 * Delete the VM.
719 */
720
721 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
722 {
723 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
724
725 /*
726 * Apple's documentation states that the vCPU should be destroyed
727 * on the thread running the vCPU but as all the other EMTs are gone
728 * at this point, destroying the VM would hang.
729 *
730 * We seem to be at luck here though as destroying apparently works
731 * from EMT(0) as well.
732 */
733 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
734 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
735 }
736
737 pVM->nem.s.fCreatedEmts = false;
738 if (pVM->nem.s.fCreatedVm)
739 {
740 hv_return_t hrc = hv_vm_destroy();
741 if (hrc != HV_SUCCESS)
742 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
743
744 pVM->nem.s.fCreatedVm = false;
745 }
746 return VINF_SUCCESS;
747}
748
749
750/**
751 * VM reset notification.
752 *
753 * @param pVM The cross context VM structure.
754 */
755void nemR3NativeReset(PVM pVM)
756{
757 RT_NOREF(pVM);
758}
759
760
761/**
762 * Reset CPU due to INIT IPI or hot (un)plugging.
763 *
764 * @param pVCpu The cross context virtual CPU structure of the CPU being
765 * reset.
766 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
767 */
768void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
769{
770 RT_NOREF(pVCpu, fInitIpi);
771}
772
773
774/**
775 * Returns the byte size from the given access SAS value.
776 *
777 * @returns Number of bytes to transfer.
778 * @param uSas The SAS value to convert.
779 */
780DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
781{
782 switch (uSas)
783 {
784 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
785 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
786 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
787 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
788 default:
789 AssertReleaseFailed();
790 }
791
792 return 0;
793}
794
795
796/**
797 * Sets the given general purpose register to the given value.
798 *
799 * @param pVCpu The cross context virtual CPU structure of the
800 * calling EMT.
801 * @param uReg The register index.
802 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
803 * @param fSignExtend Flag whether to sign extend the value.
804 * @param u64Val The value.
805 */
806DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
807{
808 AssertReturnVoid(uReg < 31);
809
810 if (f64BitReg)
811 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
812 else
813 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
814
815 /* Mark the register as not extern anymore. */
816 switch (uReg)
817 {
818 case 0:
819 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
820 break;
821 case 1:
822 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
823 break;
824 case 2:
825 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
826 break;
827 case 3:
828 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
829 break;
830 default:
831 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
832 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
833 }
834}
835
836
837/**
838 * Gets the given general purpose register and returns the value.
839 *
840 * @returns Value from the given register.
841 * @param pVCpu The cross context virtual CPU structure of the
842 * calling EMT.
843 * @param uReg The register index.
844 */
845DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
846{
847 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
848
849 if (uReg == ARMV8_AARCH64_REG_ZR)
850 return 0;
851
852 /** @todo Import the register if extern. */
853 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
854
855 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
856}
857
858
859/**
860 * Works on the data abort exception (which will be a MMIO access most of the time).
861 *
862 * @returns VBox strict status code.
863 * @param pVM The cross context VM structure.
864 * @param pVCpu The cross context virtual CPU structure of the
865 * calling EMT.
866 * @param uIss The instruction specific syndrome value.
867 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
868 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
869 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
870 */
871static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
872 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
873{
874 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
875 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
876 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
877 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
878 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
879 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
880 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
881 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
882 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
883 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
884
885 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
886
887 EMHistoryAddExit(pVCpu,
888 fWrite
889 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
890 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
891 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
892
893 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
894 uint64_t u64Val = 0;
895 if (fWrite)
896 {
897 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
898 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
899 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
900 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
901 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
902 }
903 else
904 {
905 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
906 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
907 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
908 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
909 if (rcStrict == VINF_SUCCESS)
910 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
911 }
912
913 if (rcStrict == VINF_SUCCESS)
914 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
915
916 return rcStrict;
917}
918
919
920/**
921 * Works on the trapped MRS, MSR and system instruction exception.
922 *
923 * @returns VBox strict status code.
924 * @param pVM The cross context VM structure.
925 * @param pVCpu The cross context virtual CPU structure of the
926 * calling EMT.
927 * @param uIss The instruction specific syndrome value.
928 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
929 */
930static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
931{
932 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
933 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
934 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
935 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
936 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
937 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
938 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
939 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
940 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
941 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
942
943 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
944 EMHistoryAddExit(pVCpu,
945 fRead
946 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
947 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
948 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
949
950 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
951 uint64_t u64Val = 0;
952 if (fRead)
953 {
954 RT_NOREF(pVM);
955 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
956 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
957 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
958 VBOXSTRICTRC_VAL(rcStrict) ));
959 if (rcStrict == VINF_SUCCESS)
960 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
961 }
962 else
963 {
964 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
965 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
966 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
967 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
968 VBOXSTRICTRC_VAL(rcStrict) ));
969 }
970
971 if (rcStrict == VINF_SUCCESS)
972 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
973
974 return rcStrict;
975}
976
977
978/**
979 * Works on the trapped HVC instruction exception.
980 *
981 * @returns VBox strict status code.
982 * @param pVM The cross context VM structure.
983 * @param pVCpu The cross context virtual CPU structure of the
984 * calling EMT.
985 * @param uIss The instruction specific syndrome value.
986 */
987static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss)
988{
989 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
990 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
991
992#if 0 /** @todo For later */
993 EMHistoryAddExit(pVCpu,
994 fRead
995 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
996 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
997 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
998#endif
999
1000 RT_NOREF(pVM);
1001 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1002 /** @todo Raise exception to EL1 if PSCI not configured. */
1003 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. Always return -1 for now (PSCI). */
1004 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)-1);
1005
1006 return rcStrict;
1007}
1008
1009
1010/**
1011 * Handles an exception VM exit.
1012 *
1013 * @returns VBox strict status code.
1014 * @param pVM The cross context VM structure.
1015 * @param pVCpu The cross context virtual CPU structure of the
1016 * calling EMT.
1017 * @param pExit Pointer to the exit information.
1018 */
1019static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1020{
1021 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1022 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1023 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1024
1025 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1026 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1027
1028 switch (uEc)
1029 {
1030 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1031 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1032 pExit->exception.physical_address);
1033 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1034 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1035 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1036 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1037 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1038 return VINF_SUCCESS; /** @todo VINF_EM_HALT; We don't get notified about the vTimer if halting here currently leading to a guest hang...*/
1039 case ARMV8_ESR_EL2_EC_UNKNOWN:
1040 default:
1041 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1042 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1043 AssertReleaseFailed();
1044 return VERR_NOT_IMPLEMENTED;
1045 }
1046
1047 return VINF_SUCCESS;
1048}
1049
1050
1051/**
1052 * Handles an exit from hv_vcpu_run().
1053 *
1054 * @returns VBox strict status code.
1055 * @param pVM The cross context VM structure.
1056 * @param pVCpu The cross context virtual CPU structure of the
1057 * calling EMT.
1058 */
1059static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1060{
1061 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1062 if (RT_FAILURE(rc))
1063 return rc;
1064
1065#ifdef LOG_ENABLED
1066 if (LogIs3Enabled())
1067 nemR3DarwinLogState(pVM, pVCpu);
1068#endif
1069
1070 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1071 switch (pExit->reason)
1072 {
1073 case HV_EXIT_REASON_CANCELED:
1074 return VINF_EM_RAW_INTERRUPT;
1075 case HV_EXIT_REASON_EXCEPTION:
1076 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1077 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1078 pVCpu->nem.s.fVTimerActivated = true;
1079 return GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, true /*fAsserted*/);
1080 default:
1081 AssertReleaseFailed();
1082 break;
1083 }
1084
1085 return VERR_INVALID_STATE;
1086}
1087
1088
1089/**
1090 * Runs the guest once until an exit occurs.
1091 *
1092 * @returns HV status code.
1093 * @param pVM The cross context VM structure.
1094 * @param pVCpu The cross context virtual CPU structure.
1095 */
1096static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1097{
1098 TMNotifyStartOfExecution(pVM, pVCpu);
1099
1100 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1101
1102 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1103
1104 return hrc;
1105}
1106
1107
1108/**
1109 * Prepares the VM to run the guest.
1110 *
1111 * @returns Strict VBox status code.
1112 * @param pVM The cross context VM structure.
1113 * @param pVCpu The cross context virtual CPU structure.
1114 * @param fSingleStepping Flag whether we run in single stepping mode.
1115 */
1116static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
1117{
1118#ifdef LOG_ENABLED
1119 bool fIrq = false;
1120 bool fFiq = false;
1121
1122 if (LogIs3Enabled())
1123 nemR3DarwinLogState(pVM, pVCpu);
1124#endif
1125
1126 /** @todo */ RT_NOREF(fSingleStepping);
1127 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
1128 AssertRCReturn(rc, rc);
1129
1130 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
1131 if (pVCpu->nem.s.fVTimerActivated)
1132 {
1133 /* Read the CNTV_CTL_EL0 register. */
1134 uint64_t u64CntvCtl = 0;
1135
1136 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
1137 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1138
1139 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1140 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
1141 {
1142 /* Clear the interrupt. */
1143 GICPpiSet(pVCpu, NEM_DARWIN_VTIMER_GIC_PPI_IRQ, false /*fAsserted*/);
1144
1145 pVCpu->nem.s.fVTimerActivated = false;
1146 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
1147 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1148 }
1149 }
1150
1151 /* Set the pending interrupt state. */
1152 hv_return_t hrc = HV_SUCCESS;
1153 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
1154 {
1155 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
1156 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1157#ifdef LOG_ENABLED
1158 fIrq = true;
1159#endif
1160 }
1161 else
1162 {
1163 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
1164 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1165 }
1166
1167 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
1168 {
1169 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
1170 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1171#ifdef LOG_ENABLED
1172 fFiq = true;
1173#endif
1174 }
1175 else
1176 {
1177 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
1178 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
1179 }
1180
1181 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
1182 pVCpu->nem.s.fEventPending = false;
1183 return VINF_SUCCESS;
1184}
1185
1186
1187/**
1188 * The normal runloop (no debugging features enabled).
1189 *
1190 * @returns Strict VBox status code.
1191 * @param pVM The cross context VM structure.
1192 * @param pVCpu The cross context virtual CPU structure.
1193 */
1194static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
1195{
1196 /*
1197 * The run loop.
1198 *
1199 * Current approach to state updating to use the sledgehammer and sync
1200 * everything every time. This will be optimized later.
1201 */
1202
1203 /* Update the vTimer offset after resuming if instructed. */
1204 if (pVCpu->nem.s.fVTimerOffUpdate)
1205 {
1206 /*
1207 * Program the new offset, first get the new TSC value with the old vTimer offset and then adjust the
1208 * the new offset to let the guest not notice the pause.
1209 */
1210 uint64_t u64TscNew = ASMReadTSC() - pVCpu->nem.s.u64VTimerOff;
1211 Assert(u64TscNew >= pVM->nem.s.u64VTimerValuePaused);
1212 pVCpu->nem.s.u64VTimerOff += u64TscNew - pVM->nem.s.u64VTimerValuePaused;
1213 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVCpu->nem.s.u64VTimerOff);
1214 if (hrc != HV_SUCCESS)
1215 return nemR3DarwinHvSts2Rc(hrc);
1216
1217 pVCpu->nem.s.fVTimerOffUpdate = false;
1218 }
1219
1220 /*
1221 * Poll timers and run for a bit.
1222 */
1223 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
1224 * the whole polling job when timers have changed... */
1225 uint64_t offDeltaIgnored;
1226 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
1227 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1228 for (unsigned iLoop = 0;; iLoop++)
1229 {
1230 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
1231 if (rcStrict != VINF_SUCCESS)
1232 break;
1233
1234 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
1235 if (hrc == HV_SUCCESS)
1236 {
1237 /*
1238 * Deal with the message.
1239 */
1240 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
1241 if (rcStrict == VINF_SUCCESS)
1242 { /* hopefully likely */ }
1243 else
1244 {
1245 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
1246 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
1247 break;
1248 }
1249 }
1250 else
1251 {
1252 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
1253 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
1254 }
1255 } /* the run loop */
1256
1257 return rcStrict;
1258}
1259
1260
1261VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
1262{
1263#ifdef LOG_ENABLED
1264 if (LogIs3Enabled())
1265 nemR3DarwinLogState(pVM, pVCpu);
1266#endif
1267
1268 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
1269
1270 /*
1271 * Try switch to NEM runloop state.
1272 */
1273 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
1274 { /* likely */ }
1275 else
1276 {
1277 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1278 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
1279 return VINF_SUCCESS;
1280 }
1281
1282 VBOXSTRICTRC rcStrict;
1283#if 0
1284 if ( !pVCpu->nem.s.fUseDebugLoop
1285 && !nemR3DarwinAnyExpensiveProbesEnabled()
1286 && !DBGFIsStepping(pVCpu)
1287 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
1288#endif
1289 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
1290#if 0
1291 else
1292 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
1293#endif
1294
1295 if (rcStrict == VINF_EM_RAW_TO_R3)
1296 rcStrict = VINF_SUCCESS;
1297
1298 /*
1299 * Convert any pending HM events back to TRPM due to premature exits.
1300 *
1301 * This is because execution may continue from IEM and we would need to inject
1302 * the event from there (hence place it back in TRPM).
1303 */
1304 if (pVCpu->nem.s.fEventPending)
1305 {
1306 /** @todo */
1307 }
1308
1309
1310 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
1311 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
1312
1313 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
1314 {
1315 /* Try anticipate what we might need. */
1316 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
1317 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
1318 || RT_FAILURE(rcStrict))
1319 fImport = CPUMCTX_EXTRN_ALL;
1320 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
1321 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
1322 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
1323
1324 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
1325 {
1326 /* Only import what is external currently. */
1327 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
1328 if (RT_SUCCESS(rc2))
1329 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
1330 else if (RT_SUCCESS(rcStrict))
1331 rcStrict = rc2;
1332 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
1333 pVCpu->cpum.GstCtx.fExtrn = 0;
1334 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
1335 }
1336 else
1337 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1338 }
1339 else
1340 {
1341 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
1342 pVCpu->cpum.GstCtx.fExtrn = 0;
1343 }
1344
1345 return rcStrict;
1346}
1347
1348
1349VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
1350{
1351 RT_NOREF(pVM, pVCpu);
1352 return true; /** @todo Are there any cases where we have to emulate? */
1353}
1354
1355
1356bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
1357{
1358 VMCPU_ASSERT_EMT(pVCpu);
1359 bool fOld = pVCpu->nem.s.fSingleInstruction;
1360 pVCpu->nem.s.fSingleInstruction = fEnable;
1361 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
1362 return fOld;
1363}
1364
1365
1366void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
1367{
1368 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
1369
1370 RT_NOREF(pVM, fFlags);
1371
1372 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
1373 if (hrc != HV_SUCCESS)
1374 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
1375}
1376
1377
1378DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
1379{
1380 RT_NOREF(pVM, fUseDebugLoop);
1381 AssertReleaseFailed();
1382 return false;
1383}
1384
1385
1386DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
1387{
1388 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
1389 return fUseDebugLoop;
1390}
1391
1392
1393VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
1394 uint8_t *pu2State, uint32_t *puNemRange)
1395{
1396 RT_NOREF(pVM, puNemRange);
1397
1398 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
1399#if defined(VBOX_WITH_PGM_NEM_MODE)
1400 if (pvR3)
1401 {
1402 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1403 if (RT_FAILURE(rc))
1404 {
1405 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
1406 return VERR_NEM_MAP_PAGES_FAILED;
1407 }
1408 }
1409 return VINF_SUCCESS;
1410#else
1411 RT_NOREF(pVM, GCPhys, cb, pvR3);
1412 return VERR_NEM_MAP_PAGES_FAILED;
1413#endif
1414}
1415
1416
1417VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
1418{
1419 RT_NOREF(pVM);
1420 return false;
1421}
1422
1423
1424VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1425 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1426{
1427 RT_NOREF(pVM, puNemRange, pvRam, fFlags);
1428
1429 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
1430 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
1431
1432#if defined(VBOX_WITH_PGM_NEM_MODE)
1433 /*
1434 * Unmap the RAM we're replacing.
1435 */
1436 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1437 {
1438 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1439 if (RT_SUCCESS(rc))
1440 { /* likely */ }
1441 else if (pvMmio2)
1442 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
1443 GCPhys, cb, fFlags, rc));
1444 else
1445 {
1446 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1447 GCPhys, cb, fFlags, rc));
1448 return VERR_NEM_UNMAP_PAGES_FAILED;
1449 }
1450 }
1451
1452 /*
1453 * Map MMIO2 if any.
1454 */
1455 if (pvMmio2)
1456 {
1457 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
1458 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1459 if (RT_FAILURE(rc))
1460 {
1461 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
1462 GCPhys, cb, fFlags, pvMmio2, rc));
1463 return VERR_NEM_MAP_PAGES_FAILED;
1464 }
1465 }
1466 else
1467 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
1468
1469#else
1470 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
1471 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
1472#endif
1473 return VINF_SUCCESS;
1474}
1475
1476
1477VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
1478 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
1479{
1480 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
1481 return VINF_SUCCESS;
1482}
1483
1484
1485VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
1486 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
1487{
1488 RT_NOREF(pVM, puNemRange);
1489
1490 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
1491 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
1492
1493 int rc = VINF_SUCCESS;
1494#if defined(VBOX_WITH_PGM_NEM_MODE)
1495 /*
1496 * Unmap the MMIO2 pages.
1497 */
1498 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
1499 * we may have more stuff to unmap even in case of pure MMIO... */
1500 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
1501 {
1502 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
1503 if (RT_FAILURE(rc))
1504 {
1505 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
1506 GCPhys, cb, fFlags, rc));
1507 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1508 }
1509 }
1510
1511 /* Ensure the page is masked as unmapped if relevant. */
1512 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
1513
1514 /*
1515 * Restore the RAM we replaced.
1516 */
1517 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
1518 {
1519 AssertPtr(pvRam);
1520 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1521 if (RT_SUCCESS(rc))
1522 { /* likely */ }
1523 else
1524 {
1525 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
1526 rc = VERR_NEM_MAP_PAGES_FAILED;
1527 }
1528 }
1529
1530 RT_NOREF(pvMmio2);
1531#else
1532 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
1533 if (pu2State)
1534 *pu2State = UINT8_MAX;
1535 rc = VERR_NEM_UNMAP_PAGES_FAILED;
1536#endif
1537 return rc;
1538}
1539
1540
1541VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
1542 void *pvBitmap, size_t cbBitmap)
1543{
1544 RT_NOREF(pVM, GCPhys, cb, uNemRange, pvBitmap, cbBitmap);
1545 AssertReleaseFailed();
1546 return VERR_NOT_IMPLEMENTED;
1547}
1548
1549
1550VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
1551 uint8_t *pu2State, uint32_t *puNemRange)
1552{
1553 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1554
1555 Log5(("nemR3NativeNotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
1556 *pu2State = UINT8_MAX;
1557 *puNemRange = 0;
1558 return VINF_SUCCESS;
1559}
1560
1561
1562VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
1563 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
1564{
1565 Log5(("nemR3NativeNotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
1566 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
1567 *pu2State = UINT8_MAX;
1568
1569#if defined(VBOX_WITH_PGM_NEM_MODE)
1570 /*
1571 * (Re-)map readonly.
1572 */
1573 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
1574 int rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
1575 if (RT_FAILURE(rc))
1576 {
1577 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
1578 GCPhys, cb, pvPages, fFlags, rc));
1579 return VERR_NEM_MAP_PAGES_FAILED;
1580 }
1581 RT_NOREF(fFlags, puNemRange);
1582 return VINF_SUCCESS;
1583#else
1584 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
1585 return VERR_NEM_MAP_PAGES_FAILED;
1586#endif
1587}
1588
1589
1590VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
1591 RTR3PTR pvMemR3, uint8_t *pu2State)
1592{
1593 RT_NOREF(pVM);
1594
1595 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
1596 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
1597
1598 *pu2State = UINT8_MAX;
1599#if defined(VBOX_WITH_PGM_NEM_MODE)
1600 if (pvMemR3)
1601 {
1602 int rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
1603 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
1604 pvMemR3, GCPhys, cb, rc));
1605 }
1606 RT_NOREF(enmKind);
1607#else
1608 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
1609 AssertFailed();
1610#endif
1611}
1612
1613
1614VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
1615{
1616 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
1617 RT_NOREF(pVCpu, fEnabled);
1618}
1619
1620
1621void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
1622{
1623 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
1624 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
1625}
1626
1627
1628void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
1629 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
1630{
1631 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
1632 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
1633 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
1634}
1635
1636
1637int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
1638 PGMPAGETYPE enmType, uint8_t *pu2State)
1639{
1640 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1641 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1642 RT_NOREF(HCPhys, fPageProt, enmType);
1643
1644 return nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1645}
1646
1647
1648VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
1649 PGMPAGETYPE enmType, uint8_t *pu2State)
1650{
1651 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1652 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
1653 RT_NOREF(HCPhys, pvR3, fPageProt, enmType)
1654
1655 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1656}
1657
1658
1659VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
1660 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
1661{
1662 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
1663 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
1664 RT_NOREF(HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType);
1665
1666 nemR3DarwinUnmap(pVM, GCPhys, GUEST_PAGE_SIZE, pu2State);
1667}
1668
1669
1670/**
1671 * Interface for importing state on demand (used by IEM).
1672 *
1673 * @returns VBox status code.
1674 * @param pVCpu The cross context CPU structure.
1675 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1676 */
1677VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
1678{
1679 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
1680 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1681
1682 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
1683}
1684
1685
1686/**
1687 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1688 *
1689 * @returns VBox status code.
1690 * @param pVCpu The cross context CPU structure.
1691 * @param pcTicks Where to return the CPU tick count.
1692 * @param puAux Where to return the TSC_AUX register value.
1693 */
1694VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1695{
1696 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
1697 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
1698
1699 if (puAux)
1700 *puAux = 0;
1701 *pcTicks = ASMReadTSC() - pVCpu->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
1702 return VINF_SUCCESS;
1703}
1704
1705
1706/**
1707 * Resumes CPU clock (TSC) on all virtual CPUs.
1708 *
1709 * This is called by TM when the VM is started, restored, resumed or similar.
1710 *
1711 * @returns VBox status code.
1712 * @param pVM The cross context VM structure.
1713 * @param pVCpu The cross context CPU structure of the calling EMT.
1714 * @param uPausedTscValue The TSC value at the time of pausing.
1715 */
1716VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
1717{
1718 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVCpu, uPausedTscValue));
1719 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1720 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1721
1722 pVM->nem.s.u64VTimerValuePaused = uPausedTscValue;
1723
1724 /*
1725 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
1726 * (needs to be done on the actual EMT).
1727 */
1728 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1729 {
1730 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
1731 pVCpuDst->nem.s.fVTimerOffUpdate = true;
1732 }
1733
1734 return VINF_SUCCESS;
1735}
1736
1737
1738/**
1739 * Returns features supported by the NEM backend.
1740 *
1741 * @returns Flags of features supported by the native NEM backend.
1742 * @param pVM The cross context VM structure.
1743 */
1744VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
1745{
1746 RT_NOREF(pVM);
1747 /*
1748 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
1749 * and unrestricted guest execution support so we can safely return these flags here always.
1750 */
1751 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
1752}
1753
1754
1755/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
1756 *
1757 * @todo Add notes as the implementation progresses...
1758 */
1759
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette