VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin-armv8.cpp@ 105689

最後變更 在這個檔案從105689是 105689,由 vboxsync 提交於 7 月 前

VMMArm/NEMR3Native-darwin-armv8: Some preliminary code to support the new nested virtualization (EL2) support with the upcoming Sequioa (15.0) on M3 hardware, not yet working, bugref:10747

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 113.4 KB
 
1/* $Id: NEMR3Native-darwin-armv8.cpp 105689 2024-08-15 12:47:47Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, native ring-3 macOS backend using Hypervisor.framework, ARMv8 variant.
4 *
5 * Log group 2: Exit logging.
6 * Log group 3: Log context on exit.
7 * Log group 5: Ring-3 memory management
8 */
9
10/*
11 * Copyright (C) 2023 Oracle and/or its affiliates.
12 *
13 * This file is part of VirtualBox base platform packages, as
14 * available from https://www.alldomusa.eu.org.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation, in version 3 of the
19 * License.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <https://www.gnu.org/licenses>.
28 *
29 * SPDX-License-Identifier: GPL-3.0-only
30 */
31
32
33/*********************************************************************************************************************************
34* Header Files *
35*********************************************************************************************************************************/
36#define LOG_GROUP LOG_GROUP_NEM
37#define VMCPU_INCL_CPUM_GST_CTX
38#include <VBox/vmm/nem.h>
39#include <VBox/vmm/iem.h>
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/gic.h>
42#include <VBox/vmm/pdm.h>
43#include <VBox/vmm/dbgftrace.h>
44#include <VBox/vmm/gcm.h>
45#include "NEMInternal.h"
46#include <VBox/vmm/vmcc.h>
47#include <VBox/vmm/vmm.h>
48#include <VBox/gic.h>
49#include "dtrace/VBoxVMM.h"
50
51#include <iprt/armv8.h>
52#include <iprt/asm.h>
53#include <iprt/asm-arm.h>
54#include <iprt/asm-math.h>
55#include <iprt/ldr.h>
56#include <iprt/mem.h>
57#include <iprt/path.h>
58#include <iprt/string.h>
59#include <iprt/system.h>
60#include <iprt/utf16.h>
61
62#include <iprt/formats/arm-psci.h>
63
64#include <mach/mach_time.h>
65#include <mach/kern_return.h>
66
67#include <Hypervisor/Hypervisor.h>
68
69
70/*********************************************************************************************************************************
71* Defined Constants And Macros *
72*********************************************************************************************************************************/
73
74
75/*********************************************************************************************************************************
76* Structures and Typedefs *
77*********************************************************************************************************************************/
78
79#if MAC_OS_X_VERSION_MIN_REQUIRED < 150000
80
81/* Since 15.0+ */
82typedef enum hv_gic_distributor_reg_t : uint16_t
83{
84 HV_GIC_DISTRIBUTOR_REG_GICD_CTLR,
85 HV_GIC_DISTRIBUTOR_REG_GICD_ICACTIVER0
86 /** @todo */
87} hv_gic_distributor_reg_t;
88
89
90typedef enum hv_gic_icc_reg_t : uint16_t
91{
92 HV_GIC_ICC_REG_AP0R0_EL1
93 /** @todo */
94} hv_gic_icc_reg_t;
95
96
97typedef enum hv_gic_ich_reg_t : uint16_t
98{
99 HV_GIC_ICH_REG_AP0R0_EL2
100 /** @todo */
101} hv_gic_ich_reg_t;
102
103
104typedef enum hv_gic_icv_reg_t : uint16_t
105{
106 HV_GIC_ICV_REG_AP0R0_EL1
107 /** @todo */
108} hv_gic_icv_reg_t;
109
110
111typedef enum hv_gic_msi_reg_t : uint16_t
112{
113 HV_GIC_REG_GICM_SET_SPI_NSR
114 /** @todo */
115} hv_gic_msi_reg_t;
116
117
118typedef enum hv_gic_redistributor_reg_t : uint16_t
119{
120 HV_GIC_REDISTRIBUTOR_REG_GICR_ICACTIVER0
121 /** @todo */
122} hv_gic_redistributor_reg_t;
123
124
125typedef enum hv_gic_intid_t : uint16_t
126{
127 HV_GIC_INT_EL1_PHYSICAL_TIMER = 23,
128 HV_GIC_INT_EL1_VIRTUAL_TIMER = 25,
129 HV_GIC_INT_EL2_PHYSICAL_TIMER = 26,
130 HV_GIC_INT_MAINTENANCE = 27,
131 HV_GIC_INT_PERFORMANCE_MONITOR = 30
132} hv_gic_intid_t;
133
134#endif
135
136typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_SUPPORTED(bool *el2_supported);
137typedef hv_return_t FN_HV_VM_CONFIG_GET_EL2_ENABLED(hv_vm_config_t config, bool *el2_enabled);
138typedef hv_return_t FN_HV_VM_CONFIG_SET_EL2_ENABLED(hv_vm_config_t config, bool el2_enabled);
139
140typedef struct hv_gic_config_s *hv_gic_config_t;
141typedef hv_return_t FN_HV_GIC_CREATE(hv_gic_config_t gic_config);
142typedef hv_return_t FN_HV_GIC_RESET(void);
143typedef hv_gic_config_t FN_HV_GIC_CONFIG_CREATE(void);
144typedef hv_return_t FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t distributor_base_address);
145typedef hv_return_t FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE(hv_gic_config_t config, hv_ipa_t redistributor_base_address);
146typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE(hv_gic_config_t config, hv_ipa_t msi_region_base_address);
147typedef hv_return_t FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE(hv_gic_config_t config, uint32_t msi_intid_base, uint32_t msi_intid_count);
148
149typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE(hv_vcpu_t vcpu, hv_ipa_t *redistributor_base_address);
150typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE(size_t *redistributor_region_size);
151typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_SIZE(size_t *redistributor_size);
152typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_SIZE(size_t *distributor_size);
153typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT(size_t *distributor_base_alignment);
154typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT(size_t *redistributor_base_alignment);
155typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT(size_t *msi_region_base_alignment);
156typedef hv_return_t FN_HV_GIC_GET_MSI_REGION_SIZE(size_t *msi_region_size);
157typedef hv_return_t FN_HV_GIC_GET_SPI_INTERRUPT_RANGE(uint32_t *spi_intid_base, uint32_t *spi_intid_count);
158
159typedef struct hv_gic_state_s *hv_gic_state_t;
160typedef hv_gic_state_t FN_HV_GIC_STATE_CREATE(void);
161typedef hv_return_t FN_HV_GIC_SET_STATE(const void *gic_state_data, size_t gic_state_size);
162typedef hv_return_t FN_HV_GIC_STATE_GET_SIZE(hv_gic_state_t state, size_t *gic_state_size);
163typedef hv_return_t FN_HV_GIC_STATE_GET_DATA(hv_gic_state_t state, void *gic_state_data);
164
165typedef hv_return_t FN_HV_GIC_SEND_MSI(hv_ipa_t address, uint32_t intid);
166typedef hv_return_t FN_HV_GIC_SET_SPI(uint32_t intid, bool level);
167
168typedef hv_return_t FN_HV_GIC_GET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t *value);
169typedef hv_return_t FN_HV_GIC_GET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t *value);
170typedef hv_return_t FN_HV_GIC_GET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t *value);
171typedef hv_return_t FN_HV_GIC_GET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t *value);
172typedef hv_return_t FN_HV_GIC_GET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t *value);
173typedef hv_return_t FN_HV_GIC_GET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t *value);
174
175typedef hv_return_t FN_HV_GIC_SET_DISTRIBUTOR_REG(hv_gic_distributor_reg_t reg, uint64_t value);
176typedef hv_return_t FN_HV_GIC_SET_MSI_REG(hv_gic_msi_reg_t reg, uint64_t value);
177typedef hv_return_t FN_HV_GIC_SET_ICC_REG(hv_vcpu_t vcpu, hv_gic_icc_reg_t reg, uint64_t value);
178typedef hv_return_t FN_HV_GIC_SET_ICH_REG(hv_vcpu_t vcpu, hv_gic_ich_reg_t reg, uint64_t value);
179typedef hv_return_t FN_HV_GIC_SET_ICV_REG(hv_vcpu_t vcpu, hv_gic_icv_reg_t reg, uint64_t value);
180typedef hv_return_t FN_HV_GIC_SET_REDISTRIBUTOR_REG(hv_vcpu_t vcpu, hv_gic_redistributor_reg_t reg, uint64_t value);
181
182typedef hv_return_t FN_HV_GIC_GET_INTID(hv_gic_intid_t interrupt, uint32_t *intid);
183
184/*********************************************************************************************************************************
185* Global Variables *
186*********************************************************************************************************************************/
187/** @name Optional APIs imported from Hypervisor.framework.
188 * @{ */
189static FN_HV_VM_CONFIG_GET_EL2_SUPPORTED *g_pfnHvVmConfigGetEl2Supported = NULL; /* Since 15.0 */
190static FN_HV_VM_CONFIG_GET_EL2_ENABLED *g_pfnHvVmConfigGetEl2Enabled = NULL; /* Since 15.0 */
191static FN_HV_VM_CONFIG_SET_EL2_ENABLED *g_pfnHvVmConfigSetEl2Enabled = NULL; /* Since 15.0 */
192
193static FN_HV_GIC_CREATE *g_pfnHvGicCreate = NULL; /* Since 15.0 */
194static FN_HV_GIC_RESET *g_pfnHvGicReset = NULL; /* Since 15.0 */
195static FN_HV_GIC_CONFIG_CREATE *g_pfnHvGicConfigCreate = NULL; /* Since 15.0 */
196static FN_HV_GIC_CONFIG_SET_DISTRIBUTOR_BASE *g_pfnHvGicConfigSetDistributorBase = NULL; /* Since 15.0 */
197static FN_HV_GIC_CONFIG_SET_REDISTRIBUTOR_BASE *g_pfnHvGicConfigSetRedistributorBase = NULL; /* Since 15.0 */
198static FN_HV_GIC_CONFIG_SET_MSI_REGION_BASE *g_pfnHvGicConfigSetMsiRegionBase = NULL; /* Since 15.0 */
199static FN_HV_GIC_CONFIG_SET_MSI_INTERRUPT_RANGE *g_pfnHvGicConfigSetMsiInterruptRange = NULL; /* Since 15.0 */
200static FN_HV_GIC_GET_REDISTRIBUTOR_BASE *g_pfnHvGicGetRedistributorBase = NULL; /* Since 15.0 */
201static FN_HV_GIC_GET_REDISTRIBUTOR_REGION_SIZE *g_pfnHvGicGetRedistributorRegionSize = NULL; /* Since 15.0 */
202static FN_HV_GIC_GET_REDISTRIBUTOR_SIZE *g_pfnHvGicGetRedistributorSize = NULL; /* Since 15.0 */
203static FN_HV_GIC_GET_DISTRIBUTOR_SIZE *g_pfnHvGicGetDistributorSize = NULL; /* Since 15.0 */
204static FN_HV_GIC_GET_DISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetDistributorBaseAlignment = NULL; /* Since 15.0 */
205static FN_HV_GIC_GET_REDISTRIBUTOR_BASE_ALIGNMENT *g_pfnHvGicGetRedistributorBaseAlignment = NULL; /* Since 15.0 */
206static FN_HV_GIC_GET_MSI_REGION_BASE_ALIGNMENT *g_pfnHvGicGetMsiRegionBaseAlignment = NULL; /* Since 15.0 */
207static FN_HV_GIC_GET_MSI_REGION_SIZE *g_pfnHvGicGetMsiRegionSize = NULL; /* Since 15.0 */
208static FN_HV_GIC_GET_SPI_INTERRUPT_RANGE *g_pfnHvGicGetSpiInterruptRange = NULL; /* Since 15.0 */
209static FN_HV_GIC_STATE_CREATE *g_pfnHvGicStateCreate = NULL; /* Since 15.0 */
210static FN_HV_GIC_SET_STATE *g_pfnHvGicSetState = NULL; /* Since 15.0 */
211static FN_HV_GIC_STATE_GET_SIZE *g_pfnHvGicStateGetSize = NULL; /* Since 15.0 */
212static FN_HV_GIC_STATE_GET_DATA *g_pfnHvGicStateGetData = NULL; /* Since 15.0 */
213static FN_HV_GIC_SEND_MSI *g_pfnHvGicSendMsi = NULL; /* Since 15.0 */
214static FN_HV_GIC_SET_SPI *g_pfnHvGicSetSpi = NULL; /* Since 15.0 */
215static FN_HV_GIC_GET_DISTRIBUTOR_REG *g_pfnHvGicGetDistributorReg = NULL; /* Since 15.0 */
216static FN_HV_GIC_GET_MSI_REG *g_pfnHvGicGetMsiReg = NULL; /* Since 15.0 */
217static FN_HV_GIC_GET_ICC_REG *g_pfnHvGicGetIccReg = NULL; /* Since 15.0 */
218static FN_HV_GIC_GET_ICH_REG *g_pfnHvGicGetIchReg = NULL; /* Since 15.0 */
219static FN_HV_GIC_GET_ICV_REG *g_pfnHvGicGetIcvReg = NULL; /* Since 15.0 */
220static FN_HV_GIC_GET_REDISTRIBUTOR_REG *g_pfnHvGicGetRedistributorReg = NULL; /* Since 15.0 */
221static FN_HV_GIC_SET_DISTRIBUTOR_REG *g_pfnHvGicSetDistributorReg = NULL; /* Since 15.0 */
222static FN_HV_GIC_SET_MSI_REG *g_pfnHvGicSetMsiReg = NULL; /* Since 15.0 */
223static FN_HV_GIC_SET_ICC_REG *g_pfnHvGicSetIccReg = NULL; /* Since 15.0 */
224static FN_HV_GIC_SET_ICH_REG *g_pfnHvGicSetIchReg = NULL; /* Since 15.0 */
225static FN_HV_GIC_SET_ICV_REG *g_pfnHvGicSetIcvReg = NULL; /* Since 15.0 */
226static FN_HV_GIC_SET_REDISTRIBUTOR_REG *g_pfnHvGicSetRedistributorReg = NULL; /* Since 15.0 */
227static FN_HV_GIC_GET_INTID *g_pfnHvGicGetIntid = NULL; /* Since 15.0 */
228/** @} */
229
230
231/**
232 * Import instructions.
233 */
234static const struct
235{
236 void **ppfn; /**< The function pointer variable. */
237 const char *pszName; /**< The function name. */
238} g_aImports[] =
239{
240#define NEM_DARWIN_IMPORT(a_Pfn, a_Name) { (void **)&(a_Pfn), #a_Name }
241 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Supported, hv_vm_config_get_el2_supported),
242 NEM_DARWIN_IMPORT(g_pfnHvVmConfigGetEl2Enabled, hv_vm_config_get_el2_enabled),
243 NEM_DARWIN_IMPORT(g_pfnHvVmConfigSetEl2Enabled, hv_vm_config_set_el2_enabled),
244
245 NEM_DARWIN_IMPORT(g_pfnHvGicCreate, hv_gic_create),
246 NEM_DARWIN_IMPORT(g_pfnHvGicReset, hv_gic_reset),
247 NEM_DARWIN_IMPORT(g_pfnHvGicConfigCreate, hv_gic_config_create),
248 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetDistributorBase, hv_gic_config_set_distributor_base),
249 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetRedistributorBase, hv_gic_config_set_redistributor_base),
250 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiRegionBase, hv_gic_config_set_msi_region_base),
251 NEM_DARWIN_IMPORT(g_pfnHvGicConfigSetMsiInterruptRange, hv_gic_config_set_msi_interrupt_range),
252 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBase, hv_gic_get_redistributor_base),
253 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorRegionSize, hv_gic_get_redistributor_region_size),
254 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorSize, hv_gic_get_redistributor_size),
255 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorSize, hv_gic_get_distributor_size),
256 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorBaseAlignment, hv_gic_get_distributor_base_alignment),
257 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorBaseAlignment, hv_gic_get_redistributor_base_alignment),
258 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionBaseAlignment, hv_gic_get_msi_region_base_alignment),
259 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiRegionSize, hv_gic_get_msi_region_size),
260 NEM_DARWIN_IMPORT(g_pfnHvGicGetSpiInterruptRange, hv_gic_get_spi_interrupt_range),
261 NEM_DARWIN_IMPORT(g_pfnHvGicStateCreate, hv_gic_state_create),
262 NEM_DARWIN_IMPORT(g_pfnHvGicSetState, hv_gic_set_state),
263 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetSize, hv_gic_state_get_size),
264 NEM_DARWIN_IMPORT(g_pfnHvGicStateGetData, hv_gic_state_get_data),
265 NEM_DARWIN_IMPORT(g_pfnHvGicSendMsi, hv_gic_send_msi),
266 NEM_DARWIN_IMPORT(g_pfnHvGicSetSpi, hv_gic_set_spi),
267 NEM_DARWIN_IMPORT(g_pfnHvGicGetDistributorReg, hv_gic_get_distributor_reg),
268 NEM_DARWIN_IMPORT(g_pfnHvGicGetMsiReg, hv_gic_get_msi_reg),
269 NEM_DARWIN_IMPORT(g_pfnHvGicGetIccReg, hv_gic_get_icc_reg),
270 NEM_DARWIN_IMPORT(g_pfnHvGicGetIchReg, hv_gic_get_ich_reg),
271 NEM_DARWIN_IMPORT(g_pfnHvGicGetIcvReg, hv_gic_get_icv_reg),
272 NEM_DARWIN_IMPORT(g_pfnHvGicGetRedistributorReg, hv_gic_get_redistributor_reg),
273 NEM_DARWIN_IMPORT(g_pfnHvGicSetDistributorReg, hv_gic_set_distributor_reg),
274 NEM_DARWIN_IMPORT(g_pfnHvGicSetMsiReg, hv_gic_set_msi_reg),
275 NEM_DARWIN_IMPORT(g_pfnHvGicSetIccReg, hv_gic_set_icc_reg),
276 NEM_DARWIN_IMPORT(g_pfnHvGicSetIchReg, hv_gic_set_ich_reg),
277 NEM_DARWIN_IMPORT(g_pfnHvGicSetIcvReg, hv_gic_set_icv_reg),
278 NEM_DARWIN_IMPORT(g_pfnHvGicSetRedistributorReg, hv_gic_set_redistributor_reg),
279 NEM_DARWIN_IMPORT(g_pfnHvGicGetIntid, hv_gic_get_intid)
280#undef NEM_DARWIN_IMPORT
281};
282
283
284/*
285 * Let the preprocessor alias the APIs to import variables for better autocompletion.
286 */
287#ifndef IN_SLICKEDIT
288# define hv_vm_config_get_el2_supported g_pfnHvVmConfigGetEl2Supported
289# define hv_vm_config_get_el2_enabled g_pfnHvVmConfigGetEl2Enabled
290# define hv_vm_config_set_el2_enabled g_pfnHvVmConfigSetEl2Enabled
291
292# define hv_gic_create g_pfnHvGicCreate
293# define hv_gic_reset g_pfnHvGicReset
294# define hv_gic_config_create g_pfnHvGicConfigCreate
295# define hv_gic_config_set_distributor_base g_pfnHvGicConfigSetDistributorBase
296# define hv_gic_config_set_redistributor_base g_pfnHvGicConfigSetRedistributorBase
297# define hv_gic_config_set_msi_region_base g_pfnHvGicConfigSetMsiRegionBase
298# define hv_gic_config_set_msi_interrupt_range g_pfnHvGicConfigSetMsiInterruptRange
299# define hv_gic_get_redistributor_base g_pfnHvGicGetRedistributorBase
300# define hv_gic_get_redistributor_region_size g_pfnHvGicGetRedistributorRegionSize
301# define hv_gic_get_redistributor_size g_pfnHvGicGetRedistributorSize
302# define hv_gic_get_distributor_size g_pfnHvGicGetDistributorSize
303# define hv_gic_get_distributor_base_alignment g_pfnHvGicGetDistributorBaseAlignment
304# define hv_gic_get_redistributor_base_alignment g_pfnHvGicGetRedistributorBaseAlignment
305# define hv_gic_get_msi_region_base_alignment g_pfnHvGicGetMsiRegionBaseAlignment
306# define hv_gic_get_msi_region_size g_pfnHvGicGetMsiRegionSize
307# define hv_gic_get_spi_interrupt_range g_pfnHvGicGetSpiInterruptRange
308# define hv_gic_state_create g_pfnHvGicStateCreate
309# define hv_gic_set_state g_pfnHvGicSetState
310# define hv_gic_state_get_size g_pfnHvGicStateGetSize
311# define hv_gic_state_get_data g_pfnHvGicStateGetData
312# define hv_gic_send_msi g_pfnHvGicSendMsi
313# define hv_gic_set_spi g_pfnHvGicSetSpi
314# define hv_gic_get_distributor_reg g_pfnHvGicGetDistributorReg
315# define hv_gic_get_msi_reg g_pfnHvGicGetMsiReg
316# define hv_gic_get_icc_reg g_pfnHvGicGetIccReg
317# define hv_gic_get_ich_reg g_pfnHvGicGetIchReg
318# define hv_gic_get_icv_reg g_pfnHvGicGetIcvReg
319# define hv_gic_get_redistributor_reg g_pfnHvGicGetRedistributorReg
320# define hv_gic_set_distributor_reg g_pfnHvGicSetDistributorReg
321# define hv_gic_set_msi_reg g_pfnHvGicSetMsiReg
322# define hv_gic_set_icc_reg g_pfnHvGicSetIccReg
323# define hv_gic_set_ich_reg g_pfnHvGicSetIchReg
324# define hv_gic_set_icv_reg g_pfnHvGicSetIcvReg
325# define hv_gic_set_redistributor_reg g_pfnHvGicSetRedistributorReg
326# define hv_gic_get_intid g_pfnHvGicGetIntid
327#endif
328
329
330/** The general registers. */
331static const struct
332{
333 hv_reg_t enmHvReg;
334 uint32_t fCpumExtrn;
335 uint32_t offCpumCtx;
336} s_aCpumRegs[] =
337{
338#define CPUM_GREG_EMIT_X0_X3(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X ## a_Idx, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
339#define CPUM_GREG_EMIT_X4_X28(a_Idx) { HV_REG_X ## a_Idx, CPUMCTX_EXTRN_X4_X28, RT_UOFFSETOF(CPUMCTX, aGRegs[a_Idx].x) }
340 CPUM_GREG_EMIT_X0_X3(0),
341 CPUM_GREG_EMIT_X0_X3(1),
342 CPUM_GREG_EMIT_X0_X3(2),
343 CPUM_GREG_EMIT_X0_X3(3),
344 CPUM_GREG_EMIT_X4_X28(4),
345 CPUM_GREG_EMIT_X4_X28(5),
346 CPUM_GREG_EMIT_X4_X28(6),
347 CPUM_GREG_EMIT_X4_X28(7),
348 CPUM_GREG_EMIT_X4_X28(8),
349 CPUM_GREG_EMIT_X4_X28(9),
350 CPUM_GREG_EMIT_X4_X28(10),
351 CPUM_GREG_EMIT_X4_X28(11),
352 CPUM_GREG_EMIT_X4_X28(12),
353 CPUM_GREG_EMIT_X4_X28(13),
354 CPUM_GREG_EMIT_X4_X28(14),
355 CPUM_GREG_EMIT_X4_X28(15),
356 CPUM_GREG_EMIT_X4_X28(16),
357 CPUM_GREG_EMIT_X4_X28(17),
358 CPUM_GREG_EMIT_X4_X28(18),
359 CPUM_GREG_EMIT_X4_X28(19),
360 CPUM_GREG_EMIT_X4_X28(20),
361 CPUM_GREG_EMIT_X4_X28(21),
362 CPUM_GREG_EMIT_X4_X28(22),
363 CPUM_GREG_EMIT_X4_X28(23),
364 CPUM_GREG_EMIT_X4_X28(24),
365 CPUM_GREG_EMIT_X4_X28(25),
366 CPUM_GREG_EMIT_X4_X28(26),
367 CPUM_GREG_EMIT_X4_X28(27),
368 CPUM_GREG_EMIT_X4_X28(28),
369 { HV_REG_FP, CPUMCTX_EXTRN_FP, RT_UOFFSETOF(CPUMCTX, aGRegs[29].x) },
370 { HV_REG_LR, CPUMCTX_EXTRN_LR, RT_UOFFSETOF(CPUMCTX, aGRegs[30].x) },
371 { HV_REG_PC, CPUMCTX_EXTRN_PC, RT_UOFFSETOF(CPUMCTX, Pc.u64) },
372 { HV_REG_FPCR, CPUMCTX_EXTRN_FPCR, RT_UOFFSETOF(CPUMCTX, fpcr) },
373 { HV_REG_FPSR, CPUMCTX_EXTRN_FPSR, RT_UOFFSETOF(CPUMCTX, fpsr) }
374#undef CPUM_GREG_EMIT_X0_X3
375#undef CPUM_GREG_EMIT_X4_X28
376};
377/** SIMD/FP registers. */
378static const struct
379{
380 hv_simd_fp_reg_t enmHvReg;
381 uint32_t offCpumCtx;
382} s_aCpumFpRegs[] =
383{
384#define CPUM_VREG_EMIT(a_Idx) { HV_SIMD_FP_REG_Q ## a_Idx, RT_UOFFSETOF(CPUMCTX, aVRegs[a_Idx].v) }
385 CPUM_VREG_EMIT(0),
386 CPUM_VREG_EMIT(1),
387 CPUM_VREG_EMIT(2),
388 CPUM_VREG_EMIT(3),
389 CPUM_VREG_EMIT(4),
390 CPUM_VREG_EMIT(5),
391 CPUM_VREG_EMIT(6),
392 CPUM_VREG_EMIT(7),
393 CPUM_VREG_EMIT(8),
394 CPUM_VREG_EMIT(9),
395 CPUM_VREG_EMIT(10),
396 CPUM_VREG_EMIT(11),
397 CPUM_VREG_EMIT(12),
398 CPUM_VREG_EMIT(13),
399 CPUM_VREG_EMIT(14),
400 CPUM_VREG_EMIT(15),
401 CPUM_VREG_EMIT(16),
402 CPUM_VREG_EMIT(17),
403 CPUM_VREG_EMIT(18),
404 CPUM_VREG_EMIT(19),
405 CPUM_VREG_EMIT(20),
406 CPUM_VREG_EMIT(21),
407 CPUM_VREG_EMIT(22),
408 CPUM_VREG_EMIT(23),
409 CPUM_VREG_EMIT(24),
410 CPUM_VREG_EMIT(25),
411 CPUM_VREG_EMIT(26),
412 CPUM_VREG_EMIT(27),
413 CPUM_VREG_EMIT(28),
414 CPUM_VREG_EMIT(29),
415 CPUM_VREG_EMIT(30),
416 CPUM_VREG_EMIT(31)
417#undef CPUM_VREG_EMIT
418};
419/** Debug system registers. */
420static const struct
421{
422 hv_sys_reg_t enmHvReg;
423 uint32_t offCpumCtx;
424} s_aCpumDbgRegs[] =
425{
426#define CPUM_DBGREG_EMIT(a_BorW, a_Idx) \
427 { HV_SYS_REG_DBG ## a_BorW ## CR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Ctrl.u64) }, \
428 { HV_SYS_REG_DBG ## a_BorW ## VR ## a_Idx ## _EL1, RT_UOFFSETOF(CPUMCTX, a ## a_BorW ## p[a_Idx].Value.u64) }
429 /* Breakpoint registers. */
430 CPUM_DBGREG_EMIT(B, 0),
431 CPUM_DBGREG_EMIT(B, 1),
432 CPUM_DBGREG_EMIT(B, 2),
433 CPUM_DBGREG_EMIT(B, 3),
434 CPUM_DBGREG_EMIT(B, 4),
435 CPUM_DBGREG_EMIT(B, 5),
436 CPUM_DBGREG_EMIT(B, 6),
437 CPUM_DBGREG_EMIT(B, 7),
438 CPUM_DBGREG_EMIT(B, 8),
439 CPUM_DBGREG_EMIT(B, 9),
440 CPUM_DBGREG_EMIT(B, 10),
441 CPUM_DBGREG_EMIT(B, 11),
442 CPUM_DBGREG_EMIT(B, 12),
443 CPUM_DBGREG_EMIT(B, 13),
444 CPUM_DBGREG_EMIT(B, 14),
445 CPUM_DBGREG_EMIT(B, 15),
446 /* Watchpoint registers. */
447 CPUM_DBGREG_EMIT(W, 0),
448 CPUM_DBGREG_EMIT(W, 1),
449 CPUM_DBGREG_EMIT(W, 2),
450 CPUM_DBGREG_EMIT(W, 3),
451 CPUM_DBGREG_EMIT(W, 4),
452 CPUM_DBGREG_EMIT(W, 5),
453 CPUM_DBGREG_EMIT(W, 6),
454 CPUM_DBGREG_EMIT(W, 7),
455 CPUM_DBGREG_EMIT(W, 8),
456 CPUM_DBGREG_EMIT(W, 9),
457 CPUM_DBGREG_EMIT(W, 10),
458 CPUM_DBGREG_EMIT(W, 11),
459 CPUM_DBGREG_EMIT(W, 12),
460 CPUM_DBGREG_EMIT(W, 13),
461 CPUM_DBGREG_EMIT(W, 14),
462 CPUM_DBGREG_EMIT(W, 15),
463 { HV_SYS_REG_MDSCR_EL1, RT_UOFFSETOF(CPUMCTX, Mdscr.u64) }
464#undef CPUM_DBGREG_EMIT
465};
466/** PAuth key system registers. */
467static const struct
468{
469 hv_sys_reg_t enmHvReg;
470 uint32_t offCpumCtx;
471} s_aCpumPAuthKeyRegs[] =
472{
473 { HV_SYS_REG_APDAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apda.Low.u64) },
474 { HV_SYS_REG_APDAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apda.High.u64) },
475 { HV_SYS_REG_APDBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.Low.u64) },
476 { HV_SYS_REG_APDBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apdb.High.u64) },
477 { HV_SYS_REG_APGAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apga.Low.u64) },
478 { HV_SYS_REG_APGAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apga.High.u64) },
479 { HV_SYS_REG_APIAKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apia.Low.u64) },
480 { HV_SYS_REG_APIAKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apia.High.u64) },
481 { HV_SYS_REG_APIBKEYLO_EL1, RT_UOFFSETOF(CPUMCTX, Apib.Low.u64) },
482 { HV_SYS_REG_APIBKEYHI_EL1, RT_UOFFSETOF(CPUMCTX, Apib.High.u64) }
483};
484/** System registers. */
485static const struct
486{
487 hv_sys_reg_t enmHvReg;
488 uint32_t fCpumExtrn;
489 uint32_t offCpumCtx;
490} s_aCpumSysRegs[] =
491{
492 { HV_SYS_REG_SP_EL0, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[0].u64) },
493 { HV_SYS_REG_SP_EL1, CPUMCTX_EXTRN_SP, RT_UOFFSETOF(CPUMCTX, aSpReg[1].u64) },
494 { HV_SYS_REG_SPSR_EL1, CPUMCTX_EXTRN_SPSR, RT_UOFFSETOF(CPUMCTX, Spsr.u64) },
495 { HV_SYS_REG_ELR_EL1, CPUMCTX_EXTRN_ELR, RT_UOFFSETOF(CPUMCTX, Elr.u64) },
496 { HV_SYS_REG_SCTLR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Sctlr.u64) },
497 { HV_SYS_REG_TCR_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Tcr.u64) },
498 { HV_SYS_REG_TTBR0_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr0.u64) },
499 { HV_SYS_REG_TTBR1_EL1, CPUMCTX_EXTRN_SCTLR_TCR_TTBR, RT_UOFFSETOF(CPUMCTX, Ttbr1.u64) },
500 { HV_SYS_REG_VBAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, VBar.u64) },
501 { HV_SYS_REG_AFSR0_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr0.u64) },
502 { HV_SYS_REG_AFSR1_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Afsr1.u64) },
503 { HV_SYS_REG_AMAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Amair.u64) },
504 { HV_SYS_REG_CNTKCTL_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, CntKCtl.u64) },
505 { HV_SYS_REG_CONTEXTIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, ContextIdr.u64) },
506 { HV_SYS_REG_CPACR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Cpacr.u64) },
507 { HV_SYS_REG_CSSELR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Csselr.u64) },
508 { HV_SYS_REG_ESR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Esr.u64) },
509 { HV_SYS_REG_FAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Far.u64) },
510 { HV_SYS_REG_MAIR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Mair.u64) },
511 { HV_SYS_REG_PAR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, Par.u64) },
512 { HV_SYS_REG_TPIDRRO_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, TpIdrRoEl0.u64) },
513 { HV_SYS_REG_TPIDR_EL0, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[0].u64) },
514 { HV_SYS_REG_TPIDR_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, aTpIdr[1].u64) },
515 { HV_SYS_REG_MDCCINT_EL1, CPUMCTX_EXTRN_SYSREG_MISC, RT_UOFFSETOF(CPUMCTX, MDccInt.u64) }
516
517};
518/** EL2 support system registers. */
519static const struct
520{
521 uint16_t idSysReg;
522 uint32_t offCpumCtx;
523} s_aCpumEl2SysRegs[] =
524{
525 { ARMV8_AARCH64_SYSREG_CNTHCTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHCtlEl2.u64) },
526 { ARMV8_AARCH64_SYSREG_CNTHP_CTL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCtlEl2.u64) },
527 { ARMV8_AARCH64_SYSREG_CNTHP_CVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpCValEl2.u64) },
528 { ARMV8_AARCH64_SYSREG_CNTHP_TVAL_EL2, RT_UOFFSETOF(CPUMCTX, CntHpTValEl2.u64) },
529 { ARMV8_AARCH64_SYSREG_CNTVOFF_EL2, RT_UOFFSETOF(CPUMCTX, CntVOffEl2.u64) },
530 { ARMV8_AARCH64_SYSREG_CPTR_EL2, RT_UOFFSETOF(CPUMCTX, CptrEl2.u64) },
531 { ARMV8_AARCH64_SYSREG_ELR_EL2, RT_UOFFSETOF(CPUMCTX, ElrEl2.u64) },
532 { ARMV8_AARCH64_SYSREG_ESR_EL2, RT_UOFFSETOF(CPUMCTX, EsrEl2.u64) },
533 { ARMV8_AARCH64_SYSREG_FAR_EL2, RT_UOFFSETOF(CPUMCTX, FarEl2.u64) },
534 { ARMV8_AARCH64_SYSREG_HCR_EL2, RT_UOFFSETOF(CPUMCTX, HcrEl2.u64) },
535 { ARMV8_AARCH64_SYSREG_HPFAR_EL2, RT_UOFFSETOF(CPUMCTX, HpFarEl2.u64) },
536 { ARMV8_AARCH64_SYSREG_MAIR_EL2, RT_UOFFSETOF(CPUMCTX, MairEl2.u64) },
537 //{ ARMV8_AARCH64_SYSREG_MDCR_EL2, RT_UOFFSETOF(CPUMCTX, MdcrEl2.u64) },
538 { ARMV8_AARCH64_SYSREG_SCTLR_EL2, RT_UOFFSETOF(CPUMCTX, SctlrEl2.u64) },
539 { ARMV8_AARCH64_SYSREG_SPSR_EL2, RT_UOFFSETOF(CPUMCTX, SpsrEl2.u64) },
540 { ARMV8_AARCH64_SYSREG_SP_EL2, RT_UOFFSETOF(CPUMCTX, SpEl2.u64) },
541 { ARMV8_AARCH64_SYSREG_TCR_EL2, RT_UOFFSETOF(CPUMCTX, TcrEl2.u64) },
542 { ARMV8_AARCH64_SYSREG_TPIDR_EL2, RT_UOFFSETOF(CPUMCTX, TpidrEl2.u64) },
543 { ARMV8_AARCH64_SYSREG_TTBR0_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr0El2.u64) },
544 { ARMV8_AARCH64_SYSREG_TTBR1_EL2, RT_UOFFSETOF(CPUMCTX, Ttbr1El2.u64) },
545 { ARMV8_AARCH64_SYSREG_VBAR_EL2, RT_UOFFSETOF(CPUMCTX, VBarEl2.u64) },
546 { ARMV8_AARCH64_SYSREG_VMPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VMpidrEl2.u64) },
547 { ARMV8_AARCH64_SYSREG_VPIDR_EL2, RT_UOFFSETOF(CPUMCTX, VPidrEl2.u64) },
548 { ARMV8_AARCH64_SYSREG_VTCR_EL2, RT_UOFFSETOF(CPUMCTX, VTcrEl2.u64) },
549 { ARMV8_AARCH64_SYSREG_VTTBR_EL2, RT_UOFFSETOF(CPUMCTX, VTtbrEl2.u64) }
550};
551/** ID registers. */
552static const struct
553{
554 hv_feature_reg_t enmHvReg;
555 uint32_t offIdStruct;
556} s_aIdRegs[] =
557{
558 { HV_FEATURE_REG_ID_AA64DFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr0El1) },
559 { HV_FEATURE_REG_ID_AA64DFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Dfr1El1) },
560 { HV_FEATURE_REG_ID_AA64ISAR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar0El1) },
561 { HV_FEATURE_REG_ID_AA64ISAR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Isar1El1) },
562 { HV_FEATURE_REG_ID_AA64MMFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr0El1) },
563 { HV_FEATURE_REG_ID_AA64MMFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr1El1) },
564 { HV_FEATURE_REG_ID_AA64MMFR2_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Mmfr2El1) },
565 { HV_FEATURE_REG_ID_AA64PFR0_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr0El1) },
566 { HV_FEATURE_REG_ID_AA64PFR1_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegIdAa64Pfr1El1) },
567 { HV_FEATURE_REG_CLIDR_EL1, RT_UOFFSETOF(CPUMIDREGS, u64RegClidrEl1) },
568 { HV_FEATURE_REG_CTR_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegCtrEl0) },
569 { HV_FEATURE_REG_DCZID_EL0, RT_UOFFSETOF(CPUMIDREGS, u64RegDczidEl0) }
570};
571
572
573/*********************************************************************************************************************************
574* Internal Functions *
575*********************************************************************************************************************************/
576
577
578/**
579 * Converts a HV return code to a VBox status code.
580 *
581 * @returns VBox status code.
582 * @param hrc The HV return code to convert.
583 */
584DECLINLINE(int) nemR3DarwinHvSts2Rc(hv_return_t hrc)
585{
586 if (hrc == HV_SUCCESS)
587 return VINF_SUCCESS;
588
589 switch (hrc)
590 {
591 case HV_ERROR: return VERR_INVALID_STATE;
592 case HV_BUSY: return VERR_RESOURCE_BUSY;
593 case HV_BAD_ARGUMENT: return VERR_INVALID_PARAMETER;
594 case HV_NO_RESOURCES: return VERR_OUT_OF_RESOURCES;
595 case HV_NO_DEVICE: return VERR_NOT_FOUND;
596 case HV_UNSUPPORTED: return VERR_NOT_SUPPORTED;
597 }
598
599 return VERR_IPE_UNEXPECTED_STATUS;
600}
601
602
603/**
604 * Returns a human readable string of the given exception class.
605 *
606 * @returns Pointer to the string matching the given EC.
607 * @param u32Ec The exception class to return the string for.
608 */
609static const char *nemR3DarwinEsrEl2EcStringify(uint32_t u32Ec)
610{
611 switch (u32Ec)
612 {
613#define ARMV8_EC_CASE(a_Ec) case a_Ec: return #a_Ec
614 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_UNKNOWN);
615 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TRAPPED_WFX);
616 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_15);
617 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCRR_MRRC_COPROC15);
618 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MCR_MRC_COPROC_14);
619 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_LDC_STC);
620 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_SME_SVE_NEON);
621 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_VMRS);
622 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_PA_INSN);
623 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_LS64_EXCEPTION);
624 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_MRRC_COPROC14);
625 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_BTI_BRANCH_TARGET_EXCEPTION);
626 ARMV8_EC_CASE(ARMV8_ESR_EL2_ILLEGAL_EXECUTION_STATE);
627 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SVC_INSN);
628 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_HVC_INSN);
629 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_SMC_INSN);
630 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SVC_INSN);
631 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN);
632 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN);
633 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN);
634 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SVE_TRAPPED);
635 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_PAUTH_NV_TRAPPED_ERET_ERETAA_ERETAB);
636 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_TME_TSTART_INSN_EXCEPTION);
637 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_FPAC_PA_INSN_FAILURE_EXCEPTION);
638 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_SME_TRAPPED_SME_ACCESS);
639 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_RME_GRANULE_PROT_CHECK_EXCEPTION);
640 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_LOWER_EL);
641 ARMV8_EC_CASE(ARMV8_ESR_EL2_INSN_ABORT_FROM_EL2);
642 ARMV8_EC_CASE(ARMV8_ESR_EL2_PC_ALIGNMENT_EXCEPTION);
643 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL);
644 ARMV8_EC_CASE(ARMV8_ESR_EL2_DATA_ABORT_FROM_EL2);
645 ARMV8_EC_CASE(ARMV8_ESR_EL2_SP_ALIGNMENT_EXCEPTION);
646 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_MOPS_EXCEPTION);
647 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_TRAPPED_FP_EXCEPTION);
648 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_FP_EXCEPTION);
649 ARMV8_EC_CASE(ARMV8_ESR_EL2_SERROR_INTERRUPT);
650 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_LOWER_EL);
651 ARMV8_EC_CASE(ARMV8_ESR_EL2_BKPT_EXCEPTION_FROM_EL2);
652 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_LOWER_EL);
653 ARMV8_EC_CASE(ARMV8_ESR_EL2_SS_EXCEPTION_FROM_EL2);
654 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_LOWER_EL);
655 ARMV8_EC_CASE(ARMV8_ESR_EL2_WATCHPOINT_EXCEPTION_FROM_EL2);
656 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_BKPT_INSN);
657 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH32_VEC_CATCH_EXCEPTION);
658 ARMV8_EC_CASE(ARMV8_ESR_EL2_EC_AARCH64_BRK_INSN);
659#undef ARMV8_EC_CASE
660 default:
661 break;
662 }
663
664 return "<INVALID>";
665}
666
667
668/**
669 * Resolves a NEM page state from the given protection flags.
670 *
671 * @returns NEM page state.
672 * @param fPageProt The page protection flags.
673 */
674DECLINLINE(uint8_t) nemR3DarwinPageStateFromProt(uint32_t fPageProt)
675{
676 switch (fPageProt)
677 {
678 case NEM_PAGE_PROT_NONE:
679 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
680 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE:
681 return NEM_DARWIN_PAGE_STATE_RX;
682 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE:
683 return NEM_DARWIN_PAGE_STATE_RW;
684 case NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE:
685 return NEM_DARWIN_PAGE_STATE_RWX;
686 default:
687 break;
688 }
689
690 AssertLogRelMsgFailed(("Invalid combination of page protection flags %#x, can't map to page state!\n", fPageProt));
691 return NEM_DARWIN_PAGE_STATE_UNMAPPED;
692}
693
694
695/**
696 * Unmaps the given guest physical address range (page aligned).
697 *
698 * @returns VBox status code.
699 * @param pVM The cross context VM structure.
700 * @param GCPhys The guest physical address to start unmapping at.
701 * @param cb The size of the range to unmap in bytes.
702 * @param pu2State Where to store the new state of the unmappd page, optional.
703 */
704DECLINLINE(int) nemR3DarwinUnmap(PVM pVM, RTGCPHYS GCPhys, size_t cb, uint8_t *pu2State)
705{
706 if (*pu2State <= NEM_DARWIN_PAGE_STATE_UNMAPPED)
707 {
708 Log5(("nemR3DarwinUnmap: %RGp == unmapped\n", GCPhys));
709 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
710 return VINF_SUCCESS;
711 }
712
713 LogFlowFunc(("Unmapping %RGp LB %zu\n", GCPhys, cb));
714 hv_return_t hrc = hv_vm_unmap(GCPhys, cb);
715 if (RT_LIKELY(hrc == HV_SUCCESS))
716 {
717 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPage);
718 if (pu2State)
719 *pu2State = NEM_DARWIN_PAGE_STATE_UNMAPPED;
720 Log5(("nemR3DarwinUnmap: %RGp => unmapped\n", GCPhys));
721 return VINF_SUCCESS;
722 }
723
724 STAM_REL_COUNTER_INC(&pVM->nem.s.StatUnmapPageFailed);
725 LogRel(("nemR3DarwinUnmap(%RGp): failed! hrc=%#x\n",
726 GCPhys, hrc));
727 return VERR_NEM_IPE_6;
728}
729
730
731/**
732 * Maps a given guest physical address range backed by the given memory with the given
733 * protection flags.
734 *
735 * @returns VBox status code.
736 * @param pVM The cross context VM structure.
737 * @param GCPhys The guest physical address to start mapping.
738 * @param pvRam The R3 pointer of the memory to back the range with.
739 * @param cb The size of the range, page aligned.
740 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
741 * @param pu2State Where to store the state for the new page, optional.
742 */
743DECLINLINE(int) nemR3DarwinMap(PVM pVM, RTGCPHYS GCPhys, const void *pvRam, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
744{
745 LogFlowFunc(("Mapping %RGp LB %zu fProt=%#x\n", GCPhys, cb, fPageProt));
746
747 Assert(fPageProt != NEM_PAGE_PROT_NONE);
748 RT_NOREF(pVM);
749
750 hv_memory_flags_t fHvMemProt = 0;
751 if (fPageProt & NEM_PAGE_PROT_READ)
752 fHvMemProt |= HV_MEMORY_READ;
753 if (fPageProt & NEM_PAGE_PROT_WRITE)
754 fHvMemProt |= HV_MEMORY_WRITE;
755 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
756 fHvMemProt |= HV_MEMORY_EXEC;
757
758 hv_return_t hrc = hv_vm_map((void *)pvRam, GCPhys, cb, fHvMemProt);
759 if (hrc == HV_SUCCESS)
760 {
761 if (pu2State)
762 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
763 return VINF_SUCCESS;
764 }
765
766 return nemR3DarwinHvSts2Rc(hrc);
767}
768
769
770/**
771 * Changes the protection flags for the given guest physical address range.
772 *
773 * @returns VBox status code.
774 * @param GCPhys The guest physical address to start mapping.
775 * @param cb The size of the range, page aligned.
776 * @param fPageProt The page protection flags to use for this range, combination of NEM_PAGE_PROT_XXX
777 * @param pu2State Where to store the state for the new page, optional.
778 */
779DECLINLINE(int) nemR3DarwinProtect(RTGCPHYS GCPhys, size_t cb, uint32_t fPageProt, uint8_t *pu2State)
780{
781 hv_memory_flags_t fHvMemProt = 0;
782 if (fPageProt & NEM_PAGE_PROT_READ)
783 fHvMemProt |= HV_MEMORY_READ;
784 if (fPageProt & NEM_PAGE_PROT_WRITE)
785 fHvMemProt |= HV_MEMORY_WRITE;
786 if (fPageProt & NEM_PAGE_PROT_EXECUTE)
787 fHvMemProt |= HV_MEMORY_EXEC;
788
789 hv_return_t hrc = hv_vm_protect(GCPhys, cb, fHvMemProt);
790 if (hrc == HV_SUCCESS)
791 {
792 if (pu2State)
793 *pu2State = nemR3DarwinPageStateFromProt(fPageProt);
794 return VINF_SUCCESS;
795 }
796
797 LogRel(("nemR3DarwinProtect(%RGp,%zu,%#x): failed! hrc=%#x\n",
798 GCPhys, cb, fPageProt, hrc));
799 return nemR3DarwinHvSts2Rc(hrc);
800}
801
802
803#ifdef LOG_ENABLED
804/**
805 * Logs the current CPU state.
806 */
807static void nemR3DarwinLogState(PVMCC pVM, PVMCPUCC pVCpu)
808{
809 if (LogIs3Enabled())
810 {
811 char szRegs[4096];
812 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
813 "x0=%016VR{x0} x1=%016VR{x1} x2=%016VR{x2} x3=%016VR{x3}\n"
814 "x4=%016VR{x4} x5=%016VR{x5} x6=%016VR{x6} x7=%016VR{x7}\n"
815 "x8=%016VR{x8} x9=%016VR{x9} x10=%016VR{x10} x11=%016VR{x11}\n"
816 "x12=%016VR{x12} x13=%016VR{x13} x14=%016VR{x14} x15=%016VR{x15}\n"
817 "x16=%016VR{x16} x17=%016VR{x17} x18=%016VR{x18} x19=%016VR{x19}\n"
818 "x20=%016VR{x20} x21=%016VR{x21} x22=%016VR{x22} x23=%016VR{x23}\n"
819 "x24=%016VR{x24} x25=%016VR{x25} x26=%016VR{x26} x27=%016VR{x27}\n"
820 "x28=%016VR{x28} x29=%016VR{x29} x30=%016VR{x30}\n"
821 "pc=%016VR{pc} pstate=%016VR{pstate}\n"
822 "sp_el0=%016VR{sp_el0} sp_el1=%016VR{sp_el1} elr_el1=%016VR{elr_el1}\n"
823 "sctlr_el1=%016VR{sctlr_el1} tcr_el1=%016VR{tcr_el1}\n"
824 "ttbr0_el1=%016VR{ttbr0_el1} ttbr1_el1=%016VR{ttbr1_el1}\n"
825 "vbar_el1=%016VR{vbar_el1}\n"
826 );
827 char szInstr[256]; RT_ZERO(szInstr);
828#if 0
829 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
830 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
831 szInstr, sizeof(szInstr), NULL);
832#endif
833 Log3(("%s%s\n", szRegs, szInstr));
834
835 if (pVM->nem.s.fEl2Enabled)
836 {
837 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
838 "sp_el2=%016VR{sp_el2} elr_el2=%016VR{elr_el2}\n"
839 "spsr_el2=%016VR{spsr_el2} tpidr_el2=%016VR{tpidr_el2}\n"
840 "sctlr_el2=%016VR{sctlr_el2} tcr_el2=%016VR{tcr_el2}\n"
841 "ttbr0_el2=%016VR{ttbr0_el2} ttbr1_el2=%016VR{ttbr1_el2}\n"
842 "esr_el2=%016VR{esr_el2} far_el2=%016VR{far_el2}\n"
843 "hcr_el2=%016VR{hcr_el2} tcr_el2=%016VR{tcr_el2}\n"
844 "vbar_el2=%016VR{vbar_el2} cptr_el2=%016VR{cptr_el2}\n"
845 );
846 }
847 Log3(("%s%s\n", szRegs));
848 }
849}
850#endif /* LOG_ENABLED */
851
852
853static int nemR3DarwinCopyStateFromHv(PVMCC pVM, PVMCPUCC pVCpu, uint64_t fWhat)
854{
855 RT_NOREF(pVM);
856
857 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &pVCpu->cpum.GstCtx.CntvCtlEl0);
858 if (hrc == HV_SUCCESS)
859 hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, &pVCpu->cpum.GstCtx.CntvCValEl0);
860
861 if ( hrc == HV_SUCCESS
862 && (fWhat & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR)))
863 {
864 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
865 {
866 if (s_aCpumRegs[i].fCpumExtrn & fWhat)
867 {
868 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
869 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, pu64);
870 }
871 }
872 }
873
874 if ( hrc == HV_SUCCESS
875 && (fWhat & CPUMCTX_EXTRN_V0_V31))
876 {
877 /* SIMD/FP registers. */
878 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
879 {
880 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
881 hrc |= hv_vcpu_get_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, pu128);
882 }
883 }
884
885 if ( hrc == HV_SUCCESS
886 && (fWhat & CPUMCTX_EXTRN_SYSREG_DEBUG))
887 {
888 /* Debug registers. */
889 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
890 {
891 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
892 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, pu64);
893 }
894 }
895
896 if ( hrc == HV_SUCCESS
897 && (fWhat & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
898 {
899 /* Debug registers. */
900 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
901 {
902 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
903 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, pu64);
904 }
905 }
906
907 if ( hrc == HV_SUCCESS
908 && (fWhat & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC)))
909 {
910 /* System registers. */
911 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
912 {
913 if (s_aCpumSysRegs[i].fCpumExtrn & fWhat)
914 {
915 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
916 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, pu64);
917 }
918 }
919 }
920
921 if ( hrc == HV_SUCCESS
922 && (fWhat & CPUMCTX_EXTRN_SYSREG_EL2)
923 && pVM->nem.s.fEl2Enabled)
924 {
925 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
926 {
927 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
928 hrc |= hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, pu64);
929 }
930 }
931
932 if ( hrc == HV_SUCCESS
933 && (fWhat & CPUMCTX_EXTRN_PSTATE))
934 {
935 uint64_t u64Tmp;
936 hrc |= hv_vcpu_get_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, &u64Tmp);
937 if (hrc == HV_SUCCESS)
938 pVCpu->cpum.GstCtx.fPState = (uint32_t)u64Tmp;
939 }
940
941 /* Almost done, just update extern flags. */
942 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat;
943 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
944 pVCpu->cpum.GstCtx.fExtrn = 0;
945
946 return nemR3DarwinHvSts2Rc(hrc);
947}
948
949
950/**
951 * Exports the guest state to HV for execution.
952 *
953 * @returns VBox status code.
954 * @param pVM The cross context VM structure.
955 * @param pVCpu The cross context virtual CPU structure of the
956 * calling EMT.
957 */
958static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu)
959{
960 RT_NOREF(pVM);
961 hv_return_t hrc = HV_SUCCESS;
962
963 if ( (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
964 != (CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_PC | CPUMCTX_EXTRN_FPCR | CPUMCTX_EXTRN_FPSR))
965 {
966 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumRegs); i++)
967 {
968 if (!(s_aCpumRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
969 {
970 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumRegs[i].offCpumCtx);
971 hrc |= hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, s_aCpumRegs[i].enmHvReg, *pu64);
972 }
973 }
974 }
975
976 if ( hrc == HV_SUCCESS
977 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_V0_V31))
978 {
979 /* SIMD/FP registers. */
980 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumFpRegs); i++)
981 {
982 hv_simd_fp_uchar16_t *pu128 = (hv_simd_fp_uchar16_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumFpRegs[i].offCpumCtx);
983 hrc |= hv_vcpu_set_simd_fp_reg(pVCpu->nem.s.hVCpu, s_aCpumFpRegs[i].enmHvReg, *pu128);
984 }
985 }
986
987 if ( hrc == HV_SUCCESS
988 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_DEBUG))
989 {
990 /* Debug registers. */
991 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumDbgRegs); i++)
992 {
993 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumDbgRegs[i].offCpumCtx);
994 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumDbgRegs[i].enmHvReg, *pu64);
995 }
996 }
997
998 if ( hrc == HV_SUCCESS
999 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_PAUTH_KEYS))
1000 {
1001 /* Debug registers. */
1002 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumPAuthKeyRegs); i++)
1003 {
1004 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumPAuthKeyRegs[i].offCpumCtx);
1005 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumPAuthKeyRegs[i].enmHvReg, *pu64);
1006 }
1007 }
1008
1009 if ( hrc == HV_SUCCESS
1010 && (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1011 != (CPUMCTX_EXTRN_SPSR | CPUMCTX_EXTRN_ELR | CPUMCTX_EXTRN_SP | CPUMCTX_EXTRN_SCTLR_TCR_TTBR | CPUMCTX_EXTRN_SYSREG_MISC))
1012 {
1013 /* System registers. */
1014 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumSysRegs); i++)
1015 {
1016 if (!(s_aCpumSysRegs[i].fCpumExtrn & pVCpu->cpum.GstCtx.fExtrn))
1017 {
1018 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumSysRegs[i].offCpumCtx);
1019 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aCpumSysRegs[i].enmHvReg, *pu64);
1020 }
1021 }
1022 }
1023
1024 if ( hrc == HV_SUCCESS
1025 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_SYSREG_EL2)
1026 && pVM->nem.s.fEl2Enabled)
1027 {
1028 for (uint32_t i = 0; i < RT_ELEMENTS(s_aCpumEl2SysRegs); i++)
1029 {
1030 uint64_t *pu64 = (uint64_t *)((uint8_t *)&pVCpu->cpum.GstCtx + s_aCpumEl2SysRegs[i].offCpumCtx);
1031 hrc |= hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, (hv_sys_reg_t)s_aCpumEl2SysRegs[i].idSysReg, *pu64);
1032 Assert(hrc == HV_SUCCESS);
1033 }
1034 }
1035
1036 if ( hrc == HV_SUCCESS
1037 && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_PSTATE))
1038 hrc = hv_vcpu_set_reg(pVCpu->nem.s.hVCpu, HV_REG_CPSR, pVCpu->cpum.GstCtx.fPState);
1039
1040 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM;
1041 return nemR3DarwinHvSts2Rc(hrc);
1042}
1043
1044
1045/**
1046 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library.
1047 *
1048 * @returns VBox status code.
1049 * @param pErrInfo Where to always return error info.
1050 */
1051static int nemR3DarwinLoadHv(PRTERRINFO pErrInfo)
1052{
1053 RTLDRMOD hMod = NIL_RTLDRMOD;
1054 static const char *s_pszHvPath = "/System/Library/Frameworks/Hypervisor.framework/Hypervisor";
1055
1056 int rc = RTLdrLoadEx(s_pszHvPath, &hMod, RTLDRLOAD_FLAGS_NO_UNLOAD | RTLDRLOAD_FLAGS_NO_SUFFIX, pErrInfo);
1057 if (RT_SUCCESS(rc))
1058 {
1059 for (unsigned i = 0; i < RT_ELEMENTS(g_aImports); i++)
1060 {
1061 int rc2 = RTLdrGetSymbol(hMod, g_aImports[i].pszName, (void **)g_aImports[i].ppfn);
1062 if (RT_SUCCESS(rc2))
1063 {
1064 LogRel(("NEM: info: Found optional import Hypervisor!%s.\n",
1065 g_aImports[i].pszName));
1066 }
1067 else
1068 {
1069 *g_aImports[i].ppfn = NULL;
1070
1071 LogRel(("NEM: info: Failed to import Hypervisor!%s: %Rrc\n",
1072 g_aImports[i].pszName, rc2));
1073 }
1074 }
1075 if (RT_SUCCESS(rc))
1076 {
1077 Assert(!RTErrInfoIsSet(pErrInfo));
1078 }
1079
1080 RTLdrClose(hMod);
1081 }
1082 else
1083 {
1084 RTErrInfoAddF(pErrInfo, rc, "Failed to load Hypervisor.framwork: %s: %Rrc", s_pszHvPath, rc);
1085 rc = VERR_NEM_INIT_FAILED;
1086 }
1087
1088 return rc;
1089}
1090
1091
1092/**
1093 * Dumps some GIC information to the release log.
1094 */
1095static void nemR3DarwinDumpGicInfo(void)
1096{
1097 size_t val = 0;
1098 hv_return_t hrc = hv_gic_get_redistributor_size(&val);
1099 LogRel(("GICNem: hv_gic_get_redistributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1100 hrc = hv_gic_get_distributor_size(&val);
1101 LogRel(("GICNem: hv_gic_get_distributor_size() -> hrc=%#x / size=%zu\n", hrc, val));
1102 hrc = hv_gic_get_distributor_base_alignment(&val);
1103 LogRel(("GICNem: hv_gic_get_distributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1104 hrc = hv_gic_get_redistributor_base_alignment(&val);
1105 LogRel(("GICNem: hv_gic_get_redistributor_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1106 hrc = hv_gic_get_msi_region_base_alignment(&val);
1107 LogRel(("GICNem: hv_gic_get_msi_region_base_alignment() -> hrc=%#x / size=%zu\n", hrc, val));
1108 hrc = hv_gic_get_msi_region_size(&val);
1109 LogRel(("GICNem: hv_gic_get_msi_region_size() -> hrc=%#x / size=%zu\n", hrc, val));
1110 uint32_t u32SpiIntIdBase = 0;
1111 uint32_t cSpiIntIds = 0;
1112 hrc = hv_gic_get_spi_interrupt_range(&u32SpiIntIdBase, &cSpiIntIds);
1113 LogRel(("GICNem: hv_gic_get_spi_interrupt_range() -> hrc=%#x / SpiIntIdBase=%u, cSpiIntIds=%u\n", hrc, u32SpiIntIdBase, cSpiIntIds));
1114
1115 uint32_t u32IntId = 0;
1116 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER, &u32IntId);
1117 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1118 hrc = hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER, &u32IntId);
1119 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL1_VIRTUAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1120 hrc = hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER, &u32IntId);
1121 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_EL2_PHYSICAL_TIMER) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1122 hrc = hv_gic_get_intid(HV_GIC_INT_MAINTENANCE, &u32IntId);
1123 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_MAINTENANCE) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1124 hrc = hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR, &u32IntId);
1125 LogRel(("GICNem: hv_gic_get_intid(HV_GIC_INT_PERFORMANCE_MONITOR) -> hrc=%#x / IntId=%u\n", hrc, u32IntId));
1126}
1127
1128
1129/**
1130 * Sets the given SPI inside the in-kernel KVM GIC.
1131 *
1132 * @returns VBox status code.
1133 * @param pVM The VM instance.
1134 * @param uIntId The SPI ID to update.
1135 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1136 */
1137VMMR3_INT_DECL(int) GICR3NemSpiSet(PVMCC pVM, uint32_t uIntId, bool fAsserted)
1138{
1139 RT_NOREF(pVM);
1140 Assert(hv_gic_set_spi);
1141
1142 hv_return_t hrc = hv_gic_set_spi(uIntId + GIC_INTID_RANGE_SPI_START, fAsserted);
1143 return nemR3DarwinHvSts2Rc(hrc);
1144}
1145
1146
1147/**
1148 * Sets the given PPI inside the in-kernel KVM GIC.
1149 *
1150 * @returns VBox status code.
1151 * @param pVCpu The vCPU for whih the PPI state is updated.
1152 * @param uIntId The PPI ID to update.
1153 * @param fAsserted Flag whether the interrupt is asserted (true) or not (false).
1154 */
1155VMMR3_INT_DECL(int) GICR3NemPpiSet(PVMCPUCC pVCpu, uint32_t uIntId, bool fAsserted)
1156{
1157 RT_NOREF(pVCpu, uIntId, fAsserted);
1158
1159 /* Should never be called as the PPIs are handled entirely in Hypervisor.framework/AppleHV. */
1160 AssertFailed();
1161 return VERR_NEM_IPE_9;
1162}
1163
1164
1165static int nemR3DarwinGicCreate(PVM pVM)
1166{
1167 nemR3DarwinDumpGicInfo();
1168
1169 //PCFGMNODE pGicDev = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic/0");
1170 PCFGMNODE pGicCfg = CFGMR3GetChild(CFGMR3GetRoot(pVM), "Devices/gic-nem/0/Config");
1171
1172 hv_gic_config_t hGicCfg = hv_gic_config_create();
1173
1174 /*
1175 * Query the MMIO ranges.
1176 */
1177 RTGCPHYS GCPhysMmioBaseDist = 0;
1178 int rc = CFGMR3QueryU64(pGicCfg, "DistributorMmioBase", &GCPhysMmioBaseDist);
1179 if (RT_FAILURE(rc))
1180 return VMSetError(pVM, rc, RT_SRC_POS,
1181 "Configuration error: Failed to get the \"DistributorMmioBase\" value\n");
1182
1183 RTGCPHYS GCPhysMmioBaseReDist = 0;
1184 rc = CFGMR3QueryU64(pGicCfg, "RedistributorMmioBase", &GCPhysMmioBaseReDist);
1185 if (RT_FAILURE(rc))
1186 return VMSetError(pVM, rc, RT_SRC_POS,
1187 "Configuration error: Failed to get the \"RedistributorMmioBase\" value\n");
1188
1189 hv_return_t hrc = hv_gic_config_set_distributor_base(hGicCfg, GCPhysMmioBaseDist);
1190 if (hrc != HV_SUCCESS)
1191 return nemR3DarwinHvSts2Rc(hrc);
1192
1193 hrc = hv_gic_config_set_redistributor_base(hGicCfg, GCPhysMmioBaseReDist);
1194 if (hrc != HV_SUCCESS)
1195 return nemR3DarwinHvSts2Rc(hrc);
1196
1197 hrc = hv_gic_create(hGicCfg);
1198 os_release(hGicCfg);
1199 if (hrc != HV_SUCCESS)
1200 return nemR3DarwinHvSts2Rc(hrc);
1201
1202 /* Make sure the device is not instantiated as Hypervisor.framework provides it. */
1203 //CFGMR3RemoveNode(pGicDev);
1204 return rc;
1205}
1206
1207
1208/**
1209 * Try initialize the native API.
1210 *
1211 * This may only do part of the job, more can be done in
1212 * nemR3NativeInitAfterCPUM() and nemR3NativeInitCompleted().
1213 *
1214 * @returns VBox status code.
1215 * @param pVM The cross context VM structure.
1216 * @param fFallback Whether we're in fallback mode or use-NEM mode. In
1217 * the latter we'll fail if we cannot initialize.
1218 * @param fForced Whether the HMForced flag is set and we should
1219 * fail if we cannot initialize.
1220 */
1221int nemR3NativeInit(PVM pVM, bool fFallback, bool fForced)
1222{
1223 AssertReturn(!pVM->nem.s.fCreatedVm, VERR_WRONG_ORDER);
1224
1225 /*
1226 * Some state init.
1227 */
1228 PCFGMNODE pCfgNem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "NEM/");
1229 RT_NOREF(pCfgNem);
1230
1231 /*
1232 * Error state.
1233 * The error message will be non-empty on failure and 'rc' will be set too.
1234 */
1235 RTERRINFOSTATIC ErrInfo;
1236 PRTERRINFO pErrInfo = RTErrInfoInitStatic(&ErrInfo);
1237
1238 /* Resolve optional imports */
1239 int rc = nemR3DarwinLoadHv(pErrInfo);
1240 if (RT_FAILURE(rc))
1241 return rc;
1242
1243 /*
1244 * Need to enable nested virt here if supported and reset the CFGM value to false
1245 * if not supported. This ASSUMES that NEM is initialized before CPUM.
1246 */
1247 PCFGMNODE pCfgCpum = CFGMR3GetChild(CFGMR3GetRoot(pVM), "CPUM/");
1248 hv_vm_config_t hVmCfg = hv_vm_config_create();
1249
1250 if (hv_vm_config_get_el2_supported)
1251 {
1252 bool fHvEl2Supported = false;
1253 hv_return_t hrc = hv_vm_config_get_el2_supported(&fHvEl2Supported);
1254 if ( hrc == HV_SUCCESS
1255 && fHvEl2Supported)
1256 {
1257 /** @cfgm{/CPUM/NestedHWVirt, bool, false}
1258 * Whether to expose the hardware virtualization (EL2/VHE) feature to the guest.
1259 * The default is false. Only supported on M3 and later and macOS 15.0+ (Sonoma).
1260 */
1261 bool fNestedHWVirt = false;
1262 rc = CFGMR3QueryBoolDef(pCfgCpum, "NestedHWVirt", &fNestedHWVirt, false);
1263 AssertLogRelRCReturn(rc, rc);
1264 if (fNestedHWVirt)
1265 {
1266 hrc = hv_vm_config_set_el2_enabled(hVmCfg, fNestedHWVirt);
1267 if (hrc != HV_SUCCESS)
1268 return VMSetError(pVM, VERR_CPUM_INVALID_HWVIRT_CONFIG, RT_SRC_POS,
1269 "Cannot enable nested virtualization (hrc=%#x)!\n", hrc);
1270 else
1271 {
1272 pVM->nem.s.fEl2Enabled = true;
1273 LogRel(("NEM: Enabled nested virtualization (EL2) support\n"));
1274 }
1275 }
1276 }
1277 else
1278 {
1279 /* Ensure nested virt is not set. */
1280 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1281
1282 LogRel(("NEM: The host doesn't supported nested virtualization! (hrc=%#x fHvEl2Supported=%RTbool)\n",
1283 hrc, fHvEl2Supported));
1284 }
1285 }
1286 else
1287 {
1288 /* Ensure nested virt is not set. */
1289 rc = CFGMR3RemoveValue(pCfgCpum, "NestedHWVirt");
1290 LogRel(("NEM: Hypervisor.framework doesn't supported nested virtualization!\n"));
1291 }
1292
1293 hv_return_t hrc = hv_vm_create(hVmCfg);
1294 os_release(hVmCfg);
1295 if (hrc == HV_SUCCESS)
1296 {
1297 pVM->nem.s.fCreatedVm = true;
1298 pVM->nem.s.u64CntFrqHz = ASMReadCntFrqEl0();
1299
1300 /* Will be initialized in NEMHCResumeCpuTickOnAll() before executing guest code. */
1301 pVM->nem.s.u64VTimerOff = 0;
1302
1303 VM_SET_MAIN_EXECUTION_ENGINE(pVM, VM_EXEC_ENGINE_NATIVE_API);
1304 Log(("NEM: Marked active!\n"));
1305 PGMR3EnableNemMode(pVM);
1306 }
1307 else
1308 rc = RTErrInfoSetF(pErrInfo, VERR_NEM_INIT_FAILED,
1309 "hv_vm_create() failed: %#x", hrc);
1310
1311 /*
1312 * We only fail if in forced mode, otherwise just log the complaint and return.
1313 */
1314 Assert(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API || RTErrInfoIsSet(pErrInfo));
1315 if ( (fForced || !fFallback)
1316 && pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NATIVE_API)
1317 return VMSetError(pVM, RT_SUCCESS_NP(rc) ? VERR_NEM_NOT_AVAILABLE : rc, RT_SRC_POS, "%s", pErrInfo->pszMsg);
1318
1319 if (RTErrInfoIsSet(pErrInfo))
1320 LogRel(("NEM: Not available: %s\n", pErrInfo->pszMsg));
1321 return VINF_SUCCESS;
1322}
1323
1324
1325/**
1326 * Worker to create the vCPU handle on the EMT running it later on (as required by HV).
1327 *
1328 * @returns VBox status code
1329 * @param pVM The VM handle.
1330 * @param pVCpu The vCPU handle.
1331 * @param idCpu ID of the CPU to create.
1332 */
1333static DECLCALLBACK(int) nemR3DarwinNativeInitVCpuOnEmt(PVM pVM, PVMCPU pVCpu, VMCPUID idCpu)
1334{
1335 if (idCpu == 0)
1336 {
1337 Assert(pVM->nem.s.hVCpuCfg == NULL);
1338
1339 /* Create a new vCPU config and query the ID registers. */
1340 pVM->nem.s.hVCpuCfg = hv_vcpu_config_create();
1341 if (!pVM->nem.s.hVCpuCfg)
1342 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1343 "Call to hv_vcpu_config_create failed on vCPU %u", idCpu);
1344
1345 /* Query ID registers and hand them to CPUM. */
1346 CPUMIDREGS IdRegs; RT_ZERO(IdRegs);
1347 for (uint32_t i = 0; i < RT_ELEMENTS(s_aIdRegs); i++)
1348 {
1349 uint64_t *pu64 = (uint64_t *)((uint8_t *)&IdRegs + s_aIdRegs[i].offIdStruct);
1350 hv_return_t hrc = hv_vcpu_config_get_feature_reg(pVM->nem.s.hVCpuCfg, s_aIdRegs[i].enmHvReg, pu64);
1351 if (hrc != HV_SUCCESS)
1352 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1353 "Call to hv_vcpu_get_feature_reg(, %#x, ) failed: %#x (%Rrc)", hrc, nemR3DarwinHvSts2Rc(hrc));
1354 }
1355
1356 int rc = CPUMR3PopulateFeaturesByIdRegisters(pVM, &IdRegs);
1357 if (RT_FAILURE(rc))
1358 return rc;
1359 }
1360
1361 hv_return_t hrc = hv_vcpu_create(&pVCpu->nem.s.hVCpu, &pVCpu->nem.s.pHvExit, pVM->nem.s.hVCpuCfg);
1362 if (hrc != HV_SUCCESS)
1363 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1364 "Call to hv_vcpu_create failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1365
1366 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_MPIDR_EL1, idCpu);
1367 if (hrc != HV_SUCCESS)
1368 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS,
1369 "Setting MPIDR_EL1 failed on vCPU %u: %#x (%Rrc)", idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
1370
1371 return VINF_SUCCESS;
1372}
1373
1374
1375/**
1376 * Worker to destroy the vCPU handle on the EMT running it later on (as required by HV).
1377 *
1378 * @returns VBox status code.
1379 * @param pVM The VM handle.
1380 * @param pVCpu The vCPU handle.
1381 */
1382static DECLCALLBACK(int) nemR3DarwinNativeTermVCpuOnEmt(PVM pVM, PVMCPU pVCpu)
1383{
1384 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1385 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1386
1387 if (pVCpu->idCpu == 0)
1388 {
1389 os_release(pVM->nem.s.hVCpuCfg);
1390 pVM->nem.s.hVCpuCfg = NULL;
1391 }
1392 return VINF_SUCCESS;
1393}
1394
1395
1396/**
1397 * This is called after CPUMR3Init is done.
1398 *
1399 * @returns VBox status code.
1400 * @param pVM The VM handle..
1401 */
1402int nemR3NativeInitAfterCPUM(PVM pVM)
1403{
1404 /*
1405 * Validate sanity.
1406 */
1407 AssertReturn(!pVM->nem.s.fCreatedEmts, VERR_WRONG_ORDER);
1408 AssertReturn(pVM->bMainExecutionEngine == VM_EXEC_ENGINE_NATIVE_API, VERR_WRONG_ORDER);
1409
1410 /* Need to create the GIC here before any vCPU is created according to the Apple docs. */
1411 if (hv_gic_create)
1412 {
1413 int rc = nemR3DarwinGicCreate(pVM);
1414 if (RT_FAILURE(rc))
1415 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Creating the GIC failed: %Rrc", rc);
1416 }
1417
1418 /*
1419 * Setup the EMTs.
1420 */
1421 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1422 {
1423 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1424
1425 int rc = VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeInitVCpuOnEmt, 3, pVM, pVCpu, idCpu);
1426 if (RT_FAILURE(rc))
1427 {
1428 /* Rollback. */
1429 while (idCpu--)
1430 VMR3ReqCallWait(pVM, idCpu, (PFNRT)nemR3DarwinNativeTermVCpuOnEmt, 2, pVM, pVCpu);
1431
1432 return VMSetError(pVM, VERR_NEM_VM_CREATE_FAILED, RT_SRC_POS, "Call to hv_vcpu_create failed: %Rrc", rc);
1433 }
1434 }
1435
1436 pVM->nem.s.fCreatedEmts = true;
1437 return VINF_SUCCESS;
1438}
1439
1440
1441int nemR3NativeInitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
1442{
1443 RT_NOREF(pVM, enmWhat);
1444 return VINF_SUCCESS;
1445}
1446
1447
1448int nemR3NativeTerm(PVM pVM)
1449{
1450 /*
1451 * Delete the VM.
1452 */
1453
1454 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu--)
1455 {
1456 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
1457
1458 /*
1459 * Apple's documentation states that the vCPU should be destroyed
1460 * on the thread running the vCPU but as all the other EMTs are gone
1461 * at this point, destroying the VM would hang.
1462 *
1463 * We seem to be at luck here though as destroying apparently works
1464 * from EMT(0) as well.
1465 */
1466 hv_return_t hrc = hv_vcpu_destroy(pVCpu->nem.s.hVCpu);
1467 Assert(hrc == HV_SUCCESS); RT_NOREF(hrc);
1468 }
1469
1470 pVM->nem.s.fCreatedEmts = false;
1471 if (pVM->nem.s.fCreatedVm)
1472 {
1473 hv_return_t hrc = hv_vm_destroy();
1474 if (hrc != HV_SUCCESS)
1475 LogRel(("NEM: hv_vm_destroy() failed with %#x\n", hrc));
1476
1477 pVM->nem.s.fCreatedVm = false;
1478 }
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * VM reset notification.
1485 *
1486 * @param pVM The cross context VM structure.
1487 */
1488void nemR3NativeReset(PVM pVM)
1489{
1490 RT_NOREF(pVM);
1491}
1492
1493
1494/**
1495 * Reset CPU due to INIT IPI or hot (un)plugging.
1496 *
1497 * @param pVCpu The cross context virtual CPU structure of the CPU being
1498 * reset.
1499 * @param fInitIpi Whether this is the INIT IPI or hot (un)plugging case.
1500 */
1501void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi)
1502{
1503 RT_NOREF(pVCpu, fInitIpi);
1504}
1505
1506
1507/**
1508 * Returns the byte size from the given access SAS value.
1509 *
1510 * @returns Number of bytes to transfer.
1511 * @param uSas The SAS value to convert.
1512 */
1513DECLINLINE(size_t) nemR3DarwinGetByteCountFromSas(uint8_t uSas)
1514{
1515 switch (uSas)
1516 {
1517 case ARMV8_EC_ISS_DATA_ABRT_SAS_BYTE: return sizeof(uint8_t);
1518 case ARMV8_EC_ISS_DATA_ABRT_SAS_HALFWORD: return sizeof(uint16_t);
1519 case ARMV8_EC_ISS_DATA_ABRT_SAS_WORD: return sizeof(uint32_t);
1520 case ARMV8_EC_ISS_DATA_ABRT_SAS_DWORD: return sizeof(uint64_t);
1521 default:
1522 AssertReleaseFailed();
1523 }
1524
1525 return 0;
1526}
1527
1528
1529/**
1530 * Sets the given general purpose register to the given value.
1531 *
1532 * @param pVCpu The cross context virtual CPU structure of the
1533 * calling EMT.
1534 * @param uReg The register index.
1535 * @param f64BitReg Flag whether to operate on a 64-bit or 32-bit register.
1536 * @param fSignExtend Flag whether to sign extend the value.
1537 * @param u64Val The value.
1538 */
1539DECLINLINE(void) nemR3DarwinSetGReg(PVMCPU pVCpu, uint8_t uReg, bool f64BitReg, bool fSignExtend, uint64_t u64Val)
1540{
1541 AssertReturnVoid(uReg < 31);
1542
1543 if (f64BitReg)
1544 pVCpu->cpum.GstCtx.aGRegs[uReg].x = fSignExtend ? (int64_t)u64Val : u64Val;
1545 else
1546 pVCpu->cpum.GstCtx.aGRegs[uReg].w = fSignExtend ? (int32_t)u64Val : u64Val; /** @todo Does this clear the upper half on real hardware? */
1547
1548 /* Mark the register as not extern anymore. */
1549 switch (uReg)
1550 {
1551 case 0:
1552 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X0;
1553 break;
1554 case 1:
1555 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X1;
1556 break;
1557 case 2:
1558 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X2;
1559 break;
1560 case 3:
1561 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_X3;
1562 break;
1563 default:
1564 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_X4_X28));
1565 /** @todo We need to import all missing registers in order to clear this flag (or just set it in HV from here). */
1566 }
1567}
1568
1569
1570/**
1571 * Gets the given general purpose register and returns the value.
1572 *
1573 * @returns Value from the given register.
1574 * @param pVCpu The cross context virtual CPU structure of the
1575 * calling EMT.
1576 * @param uReg The register index.
1577 */
1578DECLINLINE(uint64_t) nemR3DarwinGetGReg(PVMCPU pVCpu, uint8_t uReg)
1579{
1580 AssertReturn(uReg <= ARMV8_AARCH64_REG_ZR, 0);
1581
1582 if (uReg == ARMV8_AARCH64_REG_ZR)
1583 return 0;
1584
1585 /** @todo Import the register if extern. */
1586 AssertRelease(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_GPRS_MASK));
1587
1588 return pVCpu->cpum.GstCtx.aGRegs[uReg].x;
1589}
1590
1591
1592/**
1593 * Works on the data abort exception (which will be a MMIO access most of the time).
1594 *
1595 * @returns VBox strict status code.
1596 * @param pVM The cross context VM structure.
1597 * @param pVCpu The cross context virtual CPU structure of the
1598 * calling EMT.
1599 * @param uIss The instruction specific syndrome value.
1600 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1601 * @param GCPtrDataAbrt The virtual GC address causing the data abort.
1602 * @param GCPhysDataAbrt The physical GC address which caused the data abort.
1603 */
1604static VBOXSTRICTRC nemR3DarwinHandleExitExceptionDataAbort(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit,
1605 RTGCPTR GCPtrDataAbrt, RTGCPHYS GCPhysDataAbrt)
1606{
1607 bool fIsv = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_ISV);
1608 bool fL2Fault = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_S1PTW);
1609 bool fWrite = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_WNR);
1610 bool f64BitReg = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SF);
1611 bool fSignExtend = RT_BOOL(uIss & ARMV8_EC_ISS_DATA_ABRT_SSE);
1612 uint8_t uReg = ARMV8_EC_ISS_DATA_ABRT_SRT_GET(uIss);
1613 uint8_t uAcc = ARMV8_EC_ISS_DATA_ABRT_SAS_GET(uIss);
1614 size_t cbAcc = nemR3DarwinGetByteCountFromSas(uAcc);
1615 LogFlowFunc(("fIsv=%RTbool fL2Fault=%RTbool fWrite=%RTbool f64BitReg=%RTbool fSignExtend=%RTbool uReg=%u uAcc=%u GCPtrDataAbrt=%RGv GCPhysDataAbrt=%RGp\n",
1616 fIsv, fL2Fault, fWrite, f64BitReg, fSignExtend, uReg, uAcc, GCPtrDataAbrt, GCPhysDataAbrt));
1617
1618 RT_NOREF(fL2Fault, GCPtrDataAbrt);
1619
1620 if (fWrite)
1621 {
1622 /*
1623 * Check whether this is one of the dirty tracked regions, mark it as dirty
1624 * and enable write support for this region again.
1625 *
1626 * This is required for proper VRAM tracking or the display might not get updated
1627 * and it is impossible to use the PGM generic facility as it operates on guest page sizes
1628 * but setting protection flags with Hypervisor.framework works only host page sized regions, so
1629 * we have to cook our own. Additionally the VRAM region is marked as prefetchable (write-back)
1630 * which doesn't produce a valid instruction syndrome requiring restarting the instruction after enabling
1631 * write access again (due to a missing interpreter right now).
1632 */
1633 for (uint32_t idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
1634 {
1635 PNEMHVMMIO2REGION pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
1636
1637 if ( GCPhysDataAbrt >= pMmio2Region->GCPhysStart
1638 && GCPhysDataAbrt <= pMmio2Region->GCPhysLast)
1639 {
1640 pMmio2Region->fDirty = true;
1641
1642 uint8_t u2State;
1643 int rc = nemR3DarwinProtect(pMmio2Region->GCPhysStart, pMmio2Region->GCPhysLast - pMmio2Region->GCPhysStart + 1,
1644 NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE | NEM_PAGE_PROT_WRITE, &u2State);
1645
1646 /* Restart the instruction if there is no instruction syndrome available. */
1647 if (RT_FAILURE(rc) || !fIsv)
1648 return rc;
1649 }
1650 }
1651 }
1652
1653 AssertReturn(fIsv, VERR_NOT_SUPPORTED); /** @todo Implement using IEM when this should occur. */
1654
1655 EMHistoryAddExit(pVCpu,
1656 fWrite
1657 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_WRITE)
1658 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO_READ),
1659 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1660
1661 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1662 uint64_t u64Val = 0;
1663 if (fWrite)
1664 {
1665 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1666 rcStrict = PGMPhysWrite(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1667 Log4(("MmioExit/%u: %08RX64: WRITE %#x LB %u, %.*Rhxs -> rcStrict=%Rrc\n",
1668 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1669 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1670 }
1671 else
1672 {
1673 rcStrict = PGMPhysRead(pVM, GCPhysDataAbrt, &u64Val, cbAcc, PGMACCESSORIGIN_HM);
1674 Log4(("MmioExit/%u: %08RX64: READ %#x LB %u -> %.*Rhxs rcStrict=%Rrc\n",
1675 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, GCPhysDataAbrt, cbAcc, cbAcc,
1676 &u64Val, VBOXSTRICTRC_VAL(rcStrict) ));
1677 if (rcStrict == VINF_SUCCESS)
1678 nemR3DarwinSetGReg(pVCpu, uReg, f64BitReg, fSignExtend, u64Val);
1679 }
1680
1681 if (rcStrict == VINF_SUCCESS)
1682 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1683
1684 return rcStrict;
1685}
1686
1687
1688/**
1689 * Works on the trapped MRS, MSR and system instruction exception.
1690 *
1691 * @returns VBox strict status code.
1692 * @param pVM The cross context VM structure.
1693 * @param pVCpu The cross context virtual CPU structure of the
1694 * calling EMT.
1695 * @param uIss The instruction specific syndrome value.
1696 * @param fInsn32Bit Flag whether the exception was caused by a 32-bit or 16-bit instruction.
1697 */
1698static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedSysInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fInsn32Bit)
1699{
1700 bool fRead = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_DIRECTION_IS_READ(uIss);
1701 uint8_t uCRm = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRM_GET(uIss);
1702 uint8_t uReg = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_RT_GET(uIss);
1703 uint8_t uCRn = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_CRN_GET(uIss);
1704 uint8_t uOp1 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP1_GET(uIss);
1705 uint8_t uOp2 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP2_GET(uIss);
1706 uint8_t uOp0 = ARMV8_EC_ISS_AARCH64_TRAPPED_SYS_INSN_OP0_GET(uIss);
1707 uint16_t idSysReg = ARMV8_AARCH64_SYSREG_ID_CREATE(uOp0, uOp1, uCRn, uCRm, uOp2);
1708 LogFlowFunc(("fRead=%RTbool uCRm=%u uReg=%u uCRn=%u uOp1=%u uOp2=%u uOp0=%u idSysReg=%#x\n",
1709 fRead, uCRm, uReg, uCRn, uOp1, uOp2, uOp0, idSysReg));
1710
1711 /** @todo EMEXITTYPE_MSR_READ/EMEXITTYPE_MSR_WRITE are misnomers. */
1712 EMHistoryAddExit(pVCpu,
1713 fRead
1714 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1715 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1716 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1717
1718 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1719 uint64_t u64Val = 0;
1720 if (fRead)
1721 {
1722 RT_NOREF(pVM);
1723 rcStrict = CPUMQueryGuestSysReg(pVCpu, idSysReg, &u64Val);
1724 Log4(("SysInsnExit/%u: %08RX64: READ %u:%u:%u:%u:%u -> %#RX64 rcStrict=%Rrc\n",
1725 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1726 VBOXSTRICTRC_VAL(rcStrict) ));
1727 if (rcStrict == VINF_SUCCESS)
1728 nemR3DarwinSetGReg(pVCpu, uReg, true /*f64BitReg*/, false /*fSignExtend*/, u64Val);
1729 }
1730 else
1731 {
1732 u64Val = nemR3DarwinGetGReg(pVCpu, uReg);
1733 rcStrict = CPUMSetGuestSysReg(pVCpu, idSysReg, u64Val);
1734 Log4(("SysInsnExit/%u: %08RX64: WRITE %u:%u:%u:%u:%u %#RX64 -> rcStrict=%Rrc\n",
1735 pVCpu->idCpu, pVCpu->cpum.GstCtx.Pc.u64, uOp0, uOp1, uCRn, uCRm, uOp2, u64Val,
1736 VBOXSTRICTRC_VAL(rcStrict) ));
1737 }
1738
1739 if (rcStrict == VINF_SUCCESS)
1740 pVCpu->cpum.GstCtx.Pc.u64 += fInsn32Bit ? sizeof(uint32_t) : sizeof(uint16_t);
1741
1742 return rcStrict;
1743}
1744
1745
1746/**
1747 * Works on the trapped HVC instruction exception.
1748 *
1749 * @returns VBox strict status code.
1750 * @param pVM The cross context VM structure.
1751 * @param pVCpu The cross context virtual CPU structure of the
1752 * calling EMT.
1753 * @param uIss The instruction specific syndrome value.
1754 */
1755static VBOXSTRICTRC nemR3DarwinHandleExitExceptionTrappedHvcInsn(PVM pVM, PVMCPU pVCpu, uint32_t uIss, bool fAdvancePc = false)
1756{
1757 uint16_t u16Imm = ARMV8_EC_ISS_AARCH64_TRAPPED_HVC_INSN_IMM_GET(uIss);
1758 LogFlowFunc(("u16Imm=%#RX16\n", u16Imm));
1759
1760#if 0 /** @todo For later */
1761 EMHistoryAddExit(pVCpu,
1762 fRead
1763 ? EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_READ)
1764 : EMEXIT_MAKE_FT(EMEXIT_F_KIND_EM, EMEXITTYPE_MSR_WRITE),
1765 pVCpu->cpum.GstCtx.Pc.u64, ASMReadTSC());
1766#endif
1767
1768 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1769 if (u16Imm == 0)
1770 {
1771 /** @todo Raise exception to EL1 if PSCI not configured. */
1772 /** @todo Need a generic mechanism here to pass this to, GIM maybe?. */
1773 uint32_t uFunId = pVCpu->cpum.GstCtx.aGRegs[ARMV8_AARCH64_REG_X0].w;
1774 bool fHvc64 = RT_BOOL(uFunId & ARM_SMCCC_FUNC_ID_64BIT); RT_NOREF(fHvc64);
1775 uint32_t uEntity = ARM_SMCCC_FUNC_ID_ENTITY_GET(uFunId);
1776 uint32_t uFunNum = ARM_SMCCC_FUNC_ID_NUM_GET(uFunId);
1777 if (uEntity == ARM_SMCCC_FUNC_ID_ENTITY_STD_SEC_SERVICE)
1778 {
1779 switch (uFunNum)
1780 {
1781 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1782 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_FUNC_ID_PSCI_VERSION_SET(1, 2));
1783 break;
1784 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1785 rcStrict = VMR3PowerOff(pVM->pUVM);
1786 break;
1787 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1788 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1789 {
1790 bool fHaltOnReset;
1791 int rc = CFGMR3QueryBool(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PDM"), "HaltOnReset", &fHaltOnReset);
1792 if (RT_SUCCESS(rc) && fHaltOnReset)
1793 {
1794 Log(("nemR3DarwinHandleExitExceptionTrappedHvcInsn: Halt On Reset!\n"));
1795 rc = VINF_EM_HALT;
1796 }
1797 else
1798 {
1799 /** @todo pVM->pdm.s.fResetFlags = fFlags; */
1800 VM_FF_SET(pVM, VM_FF_RESET);
1801 rc = VINF_EM_RESET;
1802 }
1803 break;
1804 }
1805 case ARM_PSCI_FUNC_ID_CPU_ON:
1806 {
1807 uint64_t u64TgtCpu = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1808 RTGCPHYS GCPhysExecAddr = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X2);
1809 uint64_t u64CtxId = nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X3);
1810 VMMR3CpuOn(pVM, u64TgtCpu & 0xff, GCPhysExecAddr, u64CtxId);
1811 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, true /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_STS_SUCCESS);
1812 break;
1813 }
1814 case ARM_PSCI_FUNC_ID_PSCI_FEATURES:
1815 {
1816 uint32_t u32FunNum = (uint32_t)nemR3DarwinGetGReg(pVCpu, ARMV8_AARCH64_REG_X1);
1817 switch (u32FunNum)
1818 {
1819 case ARM_PSCI_FUNC_ID_PSCI_VERSION:
1820 case ARM_PSCI_FUNC_ID_SYSTEM_OFF:
1821 case ARM_PSCI_FUNC_ID_SYSTEM_RESET:
1822 case ARM_PSCI_FUNC_ID_SYSTEM_RESET2:
1823 case ARM_PSCI_FUNC_ID_CPU_ON:
1824 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1825 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1826 false /*f64BitReg*/, false /*fSignExtend*/,
1827 (uint64_t)ARM_PSCI_STS_SUCCESS);
1828 break;
1829 default:
1830 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0,
1831 false /*f64BitReg*/, false /*fSignExtend*/,
1832 (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1833 }
1834 break;
1835 }
1836 case ARM_PSCI_FUNC_ID_MIGRATE_INFO_TYPE:
1837 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, ARM_PSCI_MIGRATE_INFO_TYPE_TOS_NOT_PRESENT);
1838 break;
1839 default:
1840 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1841 }
1842 }
1843 else
1844 nemR3DarwinSetGReg(pVCpu, ARMV8_AARCH64_REG_X0, false /*f64BitReg*/, false /*fSignExtend*/, (uint64_t)ARM_PSCI_STS_NOT_SUPPORTED);
1845 }
1846 /** @todo What to do if immediate is != 0? */
1847
1848 if ( rcStrict == VINF_SUCCESS
1849 && fAdvancePc)
1850 pVCpu->cpum.GstCtx.Pc.u64 += sizeof(uint32_t);
1851
1852 return rcStrict;
1853}
1854
1855
1856/**
1857 * Handles an exception VM exit.
1858 *
1859 * @returns VBox strict status code.
1860 * @param pVM The cross context VM structure.
1861 * @param pVCpu The cross context virtual CPU structure of the
1862 * calling EMT.
1863 * @param pExit Pointer to the exit information.
1864 */
1865static VBOXSTRICTRC nemR3DarwinHandleExitException(PVM pVM, PVMCPU pVCpu, const hv_vcpu_exit_t *pExit)
1866{
1867 uint32_t uEc = ARMV8_ESR_EL2_EC_GET(pExit->exception.syndrome);
1868 uint32_t uIss = ARMV8_ESR_EL2_ISS_GET(pExit->exception.syndrome);
1869 bool fInsn32Bit = ARMV8_ESR_EL2_IL_IS_32BIT(pExit->exception.syndrome);
1870
1871 LogFlowFunc(("pVM=%p pVCpu=%p{.idCpu=%u} uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1872 pVM, pVCpu, pVCpu->idCpu, uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1873
1874 switch (uEc)
1875 {
1876 case ARMV8_ESR_EL2_DATA_ABORT_FROM_LOWER_EL:
1877 return nemR3DarwinHandleExitExceptionDataAbort(pVM, pVCpu, uIss, fInsn32Bit, pExit->exception.virtual_address,
1878 pExit->exception.physical_address);
1879 case ARMV8_ESR_EL2_EC_AARCH64_TRAPPED_SYS_INSN:
1880 return nemR3DarwinHandleExitExceptionTrappedSysInsn(pVM, pVCpu, uIss, fInsn32Bit);
1881 case ARMV8_ESR_EL2_EC_AARCH64_HVC_INSN:
1882 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss);
1883 case ARMV8_ESR_EL2_EC_AARCH64_SMC_INSN:
1884 return nemR3DarwinHandleExitExceptionTrappedHvcInsn(pVM, pVCpu, uIss, true);
1885 case ARMV8_ESR_EL2_EC_TRAPPED_WFX:
1886 {
1887 /* No need to halt if there is an interrupt pending already. */
1888 if (VMCPU_FF_IS_ANY_SET(pVCpu, (VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ)))
1889 return VINF_SUCCESS;
1890
1891 /* Set the vTimer expiration in order to get out of the halt at the right point in time. */
1892 if ( (pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE)
1893 && !(pVCpu->cpum.GstCtx.CntvCtlEl0 & ARMV8_CNTV_CTL_EL0_AARCH64_IMASK))
1894 {
1895 uint64_t cTicksVTimer = mach_absolute_time() - pVM->nem.s.u64VTimerOff;
1896
1897 /* Check whether it expired and start executing guest code. */
1898 if (cTicksVTimer >= pVCpu->cpum.GstCtx.CntvCValEl0)
1899 return VINF_SUCCESS;
1900
1901 uint64_t cTicksVTimerToExpire = pVCpu->cpum.GstCtx.CntvCValEl0 - cTicksVTimer;
1902 uint64_t cNanoSecsVTimerToExpire = ASMMultU64ByU32DivByU32(cTicksVTimerToExpire, RT_NS_1SEC, (uint32_t)pVM->nem.s.u64CntFrqHz);
1903
1904 /*
1905 * Our halt method doesn't work with sub millisecond granularity at the moment causing a huge slowdown
1906 * + scheduling overhead which would increase the wakeup latency.
1907 * So only halt when the threshold is exceeded (needs more experimentation but 5ms turned out to be a good compromise
1908 * between CPU load when the guest is idle and performance).
1909 */
1910 if (cNanoSecsVTimerToExpire < 2 * RT_NS_1MS)
1911 return VINF_SUCCESS;
1912
1913 LogFlowFunc(("Set vTimer activation to cNanoSecsVTimerToExpire=%#RX64 (CntvCValEl0=%#RX64, u64VTimerOff=%#RX64 cTicksVTimer=%#RX64 u64CntFrqHz=%#RX64)\n",
1914 cNanoSecsVTimerToExpire, pVCpu->cpum.GstCtx.CntvCValEl0, pVM->nem.s.u64VTimerOff, cTicksVTimer, pVM->nem.s.u64CntFrqHz));
1915 TMCpuSetVTimerNextActivation(pVCpu, cNanoSecsVTimerToExpire);
1916 }
1917 else
1918 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1919
1920 return VINF_EM_HALT;
1921 }
1922 case ARMV8_ESR_EL2_EC_UNKNOWN:
1923 default:
1924 LogRel(("NEM/Darwin: Unknown Exception Class in syndrome: uEc=%u{%s} uIss=%#RX32 fInsn32Bit=%RTbool\n",
1925 uEc, nemR3DarwinEsrEl2EcStringify(uEc), uIss, fInsn32Bit));
1926 AssertReleaseFailed();
1927 return VERR_NOT_IMPLEMENTED;
1928 }
1929
1930 return VINF_SUCCESS;
1931}
1932
1933
1934/**
1935 * Handles an exit from hv_vcpu_run().
1936 *
1937 * @returns VBox strict status code.
1938 * @param pVM The cross context VM structure.
1939 * @param pVCpu The cross context virtual CPU structure of the
1940 * calling EMT.
1941 */
1942static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu)
1943{
1944 int rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL);
1945 if (RT_FAILURE(rc))
1946 return rc;
1947
1948#ifdef LOG_ENABLED
1949 if (LogIs3Enabled())
1950 nemR3DarwinLogState(pVM, pVCpu);
1951#endif
1952
1953 hv_vcpu_exit_t *pExit = pVCpu->nem.s.pHvExit;
1954 switch (pExit->reason)
1955 {
1956 case HV_EXIT_REASON_CANCELED:
1957 return VINF_EM_RAW_INTERRUPT;
1958 case HV_EXIT_REASON_EXCEPTION:
1959 return nemR3DarwinHandleExitException(pVM, pVCpu, pExit);
1960 case HV_EXIT_REASON_VTIMER_ACTIVATED:
1961 {
1962 LogFlowFunc(("vTimer got activated\n"));
1963 TMCpuSetVTimerNextActivation(pVCpu, UINT64_MAX);
1964 pVCpu->nem.s.fVTimerActivated = true;
1965 return GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, true /*fAsserted*/);
1966 }
1967 default:
1968 AssertReleaseFailed();
1969 break;
1970 }
1971
1972 return VERR_INVALID_STATE;
1973}
1974
1975
1976/**
1977 * Runs the guest once until an exit occurs.
1978 *
1979 * @returns HV status code.
1980 * @param pVM The cross context VM structure.
1981 * @param pVCpu The cross context virtual CPU structure.
1982 */
1983static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu)
1984{
1985 TMNotifyStartOfExecution(pVM, pVCpu);
1986
1987 hv_return_t hrc = hv_vcpu_run(pVCpu->nem.s.hVCpu);
1988
1989 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC());
1990
1991 return hrc;
1992}
1993
1994
1995/**
1996 * Prepares the VM to run the guest.
1997 *
1998 * @returns Strict VBox status code.
1999 * @param pVM The cross context VM structure.
2000 * @param pVCpu The cross context virtual CPU structure.
2001 * @param fSingleStepping Flag whether we run in single stepping mode.
2002 */
2003static VBOXSTRICTRC nemR3DarwinPreRunGuest(PVM pVM, PVMCPU pVCpu, bool fSingleStepping)
2004{
2005#ifdef LOG_ENABLED
2006 bool fIrq = false;
2007 bool fFiq = false;
2008
2009 if (LogIs3Enabled())
2010 nemR3DarwinLogState(pVM, pVCpu);
2011#endif
2012
2013 /** @todo */ RT_NOREF(fSingleStepping);
2014 int rc = nemR3DarwinExportGuestState(pVM, pVCpu);
2015 AssertRCReturn(rc, rc);
2016
2017 /* Check whether the vTimer interrupt was handled by the guest and we can unmask the vTimer. */
2018 if (pVCpu->nem.s.fVTimerActivated)
2019 {
2020 /* Read the CNTV_CTL_EL0 register. */
2021 uint64_t u64CntvCtl = 0;
2022
2023 hv_return_t hrc = hv_vcpu_get_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, &u64CntvCtl);
2024 AssertRCReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2025
2026 if ( (u64CntvCtl & (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_IMASK | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2027 != (ARMV8_CNTV_CTL_EL0_AARCH64_ENABLE | ARMV8_CNTV_CTL_EL0_AARCH64_ISTATUS))
2028 {
2029 /* Clear the interrupt. */
2030 GICPpiSet(pVCpu, pVM->nem.s.u32GicPpiVTimer, false /*fAsserted*/);
2031
2032 pVCpu->nem.s.fVTimerActivated = false;
2033 hrc = hv_vcpu_set_vtimer_mask(pVCpu->nem.s.hVCpu, false /*vtimer_is_masked*/);
2034 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2035 }
2036 }
2037
2038 /* Set the pending interrupt state. */
2039 hv_return_t hrc = HV_SUCCESS;
2040 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ))
2041 {
2042 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, true);
2043 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2044#ifdef LOG_ENABLED
2045 fIrq = true;
2046#endif
2047 }
2048 else
2049 {
2050 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_IRQ, false);
2051 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2052 }
2053
2054 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ))
2055 {
2056 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, true);
2057 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2058#ifdef LOG_ENABLED
2059 fFiq = true;
2060#endif
2061 }
2062 else
2063 {
2064 hrc = hv_vcpu_set_pending_interrupt(pVCpu->nem.s.hVCpu, HV_INTERRUPT_TYPE_FIQ, false);
2065 AssertReturn(hrc == HV_SUCCESS, VERR_NEM_IPE_9);
2066 }
2067
2068 LogFlowFunc(("Running vCPU [%s,%s]\n", fIrq ? "I" : "nI", fFiq ? "F" : "nF"));
2069 pVCpu->nem.s.fEventPending = false;
2070 return VINF_SUCCESS;
2071}
2072
2073
2074/**
2075 * The normal runloop (no debugging features enabled).
2076 *
2077 * @returns Strict VBox status code.
2078 * @param pVM The cross context VM structure.
2079 * @param pVCpu The cross context virtual CPU structure.
2080 */
2081static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu)
2082{
2083 /*
2084 * The run loop.
2085 *
2086 * Current approach to state updating to use the sledgehammer and sync
2087 * everything every time. This will be optimized later.
2088 */
2089
2090 /* Update the vTimer offset after resuming if instructed. */
2091 if (pVCpu->nem.s.fVTimerOffUpdate)
2092 {
2093 hv_return_t hrc = hv_vcpu_set_vtimer_offset(pVCpu->nem.s.hVCpu, pVM->nem.s.u64VTimerOff);
2094 if (hrc != HV_SUCCESS)
2095 return nemR3DarwinHvSts2Rc(hrc);
2096
2097 pVCpu->nem.s.fVTimerOffUpdate = false;
2098
2099 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CTL_EL0, pVCpu->cpum.GstCtx.CntvCtlEl0);
2100 if (hrc == HV_SUCCESS)
2101 hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, HV_SYS_REG_CNTV_CVAL_EL0, pVCpu->cpum.GstCtx.CntvCValEl0);
2102 if (hrc != HV_SUCCESS)
2103 return nemR3DarwinHvSts2Rc(hrc);
2104 }
2105
2106 /*
2107 * Poll timers and run for a bit.
2108 */
2109 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing
2110 * the whole polling job when timers have changed... */
2111 uint64_t offDeltaIgnored;
2112 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt);
2113 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2114 for (unsigned iLoop = 0;; iLoop++)
2115 {
2116 rcStrict = nemR3DarwinPreRunGuest(pVM, pVCpu, false /* fSingleStepping */);
2117 if (rcStrict != VINF_SUCCESS)
2118 break;
2119
2120 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu);
2121 if (hrc == HV_SUCCESS)
2122 {
2123 /*
2124 * Deal with the message.
2125 */
2126 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu);
2127 if (rcStrict == VINF_SUCCESS)
2128 { /* hopefully likely */ }
2129 else
2130 {
2131 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExit -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
2132 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
2133 break;
2134 }
2135 }
2136 else
2137 {
2138 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x \n",
2139 pVCpu->idCpu, hrc), VERR_NEM_IPE_0);
2140 }
2141 } /* the run loop */
2142
2143 return rcStrict;
2144}
2145
2146
2147VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu)
2148{
2149#ifdef LOG_ENABLED
2150 if (LogIs3Enabled())
2151 nemR3DarwinLogState(pVM, pVCpu);
2152#endif
2153
2154 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9);
2155
2156 if (RT_UNLIKELY(!pVCpu->nem.s.fIdRegsSynced))
2157 {
2158 /*
2159 * Sync the guest ID registers which are per VM once (they are readonly and stay constant during VM lifetime).
2160 * Need to do it here and not during the init because loading a saved state might change the ID registers from what
2161 * done in the call to CPUMR3PopulateFeaturesByIdRegisters().
2162 */
2163 static const struct
2164 {
2165 const char *pszIdReg;
2166 hv_sys_reg_t enmHvReg;
2167 uint32_t offIdStruct;
2168 } s_aSysIdRegs[] =
2169 {
2170#define ID_SYS_REG_CREATE(a_IdReg, a_CpumIdReg) { #a_IdReg, HV_SYS_REG_##a_IdReg, RT_UOFFSETOF(CPUMIDREGS, a_CpumIdReg) }
2171 ID_SYS_REG_CREATE(ID_AA64DFR0_EL1, u64RegIdAa64Dfr0El1),
2172 ID_SYS_REG_CREATE(ID_AA64DFR1_EL1, u64RegIdAa64Dfr1El1),
2173 ID_SYS_REG_CREATE(ID_AA64ISAR0_EL1, u64RegIdAa64Isar0El1),
2174 ID_SYS_REG_CREATE(ID_AA64ISAR1_EL1, u64RegIdAa64Isar1El1),
2175 ID_SYS_REG_CREATE(ID_AA64MMFR0_EL1, u64RegIdAa64Mmfr0El1),
2176 ID_SYS_REG_CREATE(ID_AA64MMFR1_EL1, u64RegIdAa64Mmfr1El1),
2177 ID_SYS_REG_CREATE(ID_AA64MMFR2_EL1, u64RegIdAa64Mmfr2El1),
2178 ID_SYS_REG_CREATE(ID_AA64PFR0_EL1, u64RegIdAa64Pfr0El1),
2179 ID_SYS_REG_CREATE(ID_AA64PFR1_EL1, u64RegIdAa64Pfr1El1),
2180#undef ID_SYS_REG_CREATE
2181 };
2182
2183 PCCPUMIDREGS pIdRegsGst = NULL;
2184 int rc = CPUMR3QueryGuestIdRegs(pVM, &pIdRegsGst);
2185 AssertRCReturn(rc, rc);
2186
2187 for (uint32_t i = 0; i < RT_ELEMENTS(s_aSysIdRegs); i++)
2188 {
2189 uint64_t *pu64 = (uint64_t *)((uint8_t *)pIdRegsGst + s_aSysIdRegs[i].offIdStruct);
2190 hv_return_t hrc = hv_vcpu_set_sys_reg(pVCpu->nem.s.hVCpu, s_aSysIdRegs[i].enmHvReg, *pu64);
2191 if (hrc != HV_SUCCESS)
2192 return VMSetError(pVM, VERR_NEM_SET_REGISTERS_FAILED, RT_SRC_POS,
2193 "Setting %s failed on vCPU %u: %#x (%Rrc)", s_aSysIdRegs[i].pszIdReg, pVCpu->idCpu, hrc, nemR3DarwinHvSts2Rc(hrc));
2194 }
2195
2196 pVCpu->nem.s.fIdRegsSynced = true;
2197 }
2198
2199 /*
2200 * Try switch to NEM runloop state.
2201 */
2202 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
2203 { /* likely */ }
2204 else
2205 {
2206 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2207 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
2208 return VINF_SUCCESS;
2209 }
2210
2211 VBOXSTRICTRC rcStrict;
2212#if 0
2213 if ( !pVCpu->nem.s.fUseDebugLoop
2214 && !nemR3DarwinAnyExpensiveProbesEnabled()
2215 && !DBGFIsStepping(pVCpu)
2216 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints)
2217#endif
2218 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu);
2219#if 0
2220 else
2221 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu);
2222#endif
2223
2224 if (rcStrict == VINF_EM_RAW_TO_R3)
2225 rcStrict = VINF_SUCCESS;
2226
2227 /*
2228 * Convert any pending HM events back to TRPM due to premature exits.
2229 *
2230 * This is because execution may continue from IEM and we would need to inject
2231 * the event from there (hence place it back in TRPM).
2232 */
2233 if (pVCpu->nem.s.fEventPending)
2234 {
2235 /** @todo */
2236 }
2237
2238
2239 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
2240 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
2241
2242 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL))
2243 {
2244 /* Try anticipate what we might need. */
2245 uint64_t fImport = NEM_DARWIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2246 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
2247 || RT_FAILURE(rcStrict))
2248 fImport = CPUMCTX_EXTRN_ALL;
2249 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ
2250 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2251 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
2252
2253 if (pVCpu->cpum.GstCtx.fExtrn & fImport)
2254 {
2255 /* Only import what is external currently. */
2256 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport);
2257 if (RT_SUCCESS(rc2))
2258 pVCpu->cpum.GstCtx.fExtrn &= ~fImport;
2259 else if (RT_SUCCESS(rcStrict))
2260 rcStrict = rc2;
2261 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL))
2262 pVCpu->cpum.GstCtx.fExtrn = 0;
2263 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
2264 }
2265 else
2266 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2267 }
2268 else
2269 {
2270 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
2271 pVCpu->cpum.GstCtx.fExtrn = 0;
2272 }
2273
2274 return rcStrict;
2275}
2276
2277
2278VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu)
2279{
2280 RT_NOREF(pVM, pVCpu);
2281 return true; /** @todo Are there any cases where we have to emulate? */
2282}
2283
2284
2285bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
2286{
2287 VMCPU_ASSERT_EMT(pVCpu);
2288 bool fOld = pVCpu->nem.s.fSingleInstruction;
2289 pVCpu->nem.s.fSingleInstruction = fEnable;
2290 pVCpu->nem.s.fUseDebugLoop = fEnable || pVM->nem.s.fUseDebugLoop;
2291 return fOld;
2292}
2293
2294
2295void nemR3NativeNotifyFF(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
2296{
2297 LogFlowFunc(("pVM=%p pVCpu=%p fFlags=%#x\n", pVM, pVCpu, fFlags));
2298
2299 RT_NOREF(pVM, fFlags);
2300
2301 hv_return_t hrc = hv_vcpus_exit(&pVCpu->nem.s.hVCpu, 1);
2302 if (hrc != HV_SUCCESS)
2303 LogRel(("NEM: hv_vcpus_exit(%u, 1) failed with %#x\n", pVCpu->nem.s.hVCpu, hrc));
2304}
2305
2306
2307DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChanged(PVM pVM, bool fUseDebugLoop)
2308{
2309 RT_NOREF(pVM, fUseDebugLoop);
2310 //AssertReleaseFailed();
2311 return false;
2312}
2313
2314
2315DECLHIDDEN(bool) nemR3NativeNotifyDebugEventChangedPerCpu(PVM pVM, PVMCPU pVCpu, bool fUseDebugLoop)
2316{
2317 RT_NOREF(pVM, pVCpu, fUseDebugLoop);
2318 return fUseDebugLoop;
2319}
2320
2321
2322VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvR3,
2323 uint8_t *pu2State, uint32_t *puNemRange)
2324{
2325 RT_NOREF(pVM, puNemRange);
2326
2327 Log5(("NEMR3NotifyPhysRamRegister: %RGp LB %RGp, pvR3=%p\n", GCPhys, cb, pvR3));
2328#if defined(VBOX_WITH_PGM_NEM_MODE)
2329 if (pvR3)
2330 {
2331 int rc = nemR3DarwinMap(pVM, GCPhys, pvR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2332 if (RT_FAILURE(rc))
2333 {
2334 LogRel(("NEMR3NotifyPhysRamRegister: GCPhys=%RGp LB %RGp pvR3=%p rc=%Rrc\n", GCPhys, cb, pvR3, rc));
2335 return VERR_NEM_MAP_PAGES_FAILED;
2336 }
2337 }
2338 return VINF_SUCCESS;
2339#else
2340 RT_NOREF(pVM, GCPhys, cb, pvR3);
2341 return VERR_NEM_MAP_PAGES_FAILED;
2342#endif
2343}
2344
2345
2346VMMR3_INT_DECL(bool) NEMR3IsMmio2DirtyPageTrackingSupported(PVM pVM)
2347{
2348 RT_NOREF(pVM);
2349 return true;
2350}
2351
2352
2353VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2354 void *pvRam, void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2355{
2356 RT_NOREF(pvRam);
2357
2358 Log5(("NEMR3NotifyPhysMmioExMapEarly: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p (%d)\n",
2359 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, *pu2State));
2360
2361#if defined(VBOX_WITH_PGM_NEM_MODE)
2362 /*
2363 * Unmap the RAM we're replacing.
2364 */
2365 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2366 {
2367 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2368 if (RT_SUCCESS(rc))
2369 { /* likely */ }
2370 else if (pvMmio2)
2371 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rc(ignored)\n",
2372 GCPhys, cb, fFlags, rc));
2373 else
2374 {
2375 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2376 GCPhys, cb, fFlags, rc));
2377 return VERR_NEM_UNMAP_PAGES_FAILED;
2378 }
2379 }
2380
2381 /*
2382 * Map MMIO2 if any.
2383 */
2384 if (pvMmio2)
2385 {
2386 Assert(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2);
2387
2388 /* We need to set up our own dirty tracking due to Hypervisor.framework only working on host page sized aligned regions. */
2389 uint32_t fProt = NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE;
2390 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2391 {
2392 /* Find a slot for dirty tracking. */
2393 PNEMHVMMIO2REGION pMmio2Region = NULL;
2394 uint32_t idSlot;
2395 for (idSlot = 0; idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking); idSlot++)
2396 {
2397 if ( pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart == 0
2398 && pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast == 0)
2399 {
2400 pMmio2Region = &pVM->nem.s.aMmio2DirtyTracking[idSlot];
2401 break;
2402 }
2403 }
2404
2405 if (!pMmio2Region)
2406 {
2407 LogRel(("NEMR3NotifyPhysMmioExMapEarly: Out of dirty tracking structures -> VERR_NEM_MAP_PAGES_FAILED\n"));
2408 return VERR_NEM_MAP_PAGES_FAILED;
2409 }
2410
2411 pMmio2Region->GCPhysStart = GCPhys;
2412 pMmio2Region->GCPhysLast = GCPhys + cb - 1;
2413 pMmio2Region->fDirty = false;
2414 *puNemRange = idSlot;
2415 }
2416 else
2417 fProt |= NEM_PAGE_PROT_WRITE;
2418
2419 int rc = nemR3DarwinMap(pVM, GCPhys, pvMmio2, cb, fProt, pu2State);
2420 if (RT_FAILURE(rc))
2421 {
2422 LogRel(("NEMR3NotifyPhysMmioExMapEarly: GCPhys=%RGp LB %RGp fFlags=%#x pvMmio2=%p: Map -> rc=%Rrc\n",
2423 GCPhys, cb, fFlags, pvMmio2, rc));
2424 return VERR_NEM_MAP_PAGES_FAILED;
2425 }
2426 }
2427 else
2428 Assert(!(fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2));
2429
2430#else
2431 RT_NOREF(pVM, GCPhys, cb, pvRam, pvMmio2);
2432 *pu2State = (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE) ? UINT8_MAX : NEM_DARWIN_PAGE_STATE_UNMAPPED;
2433#endif
2434 return VINF_SUCCESS;
2435}
2436
2437
2438VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExMapLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags,
2439 void *pvRam, void *pvMmio2, uint32_t *puNemRange)
2440{
2441 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, puNemRange);
2442 return VINF_SUCCESS;
2443}
2444
2445
2446VMMR3_INT_DECL(int) NEMR3NotifyPhysMmioExUnmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvRam,
2447 void *pvMmio2, uint8_t *pu2State, uint32_t *puNemRange)
2448{
2449 RT_NOREF(pVM, puNemRange);
2450
2451 Log5(("NEMR3NotifyPhysMmioExUnmap: %RGp LB %RGp fFlags=%#x pvRam=%p pvMmio2=%p pu2State=%p uNemRange=%#x (%#x)\n",
2452 GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State, puNemRange, *puNemRange));
2453
2454 int rc = VINF_SUCCESS;
2455#if defined(VBOX_WITH_PGM_NEM_MODE)
2456 /*
2457 * Unmap the MMIO2 pages.
2458 */
2459 /** @todo If we implement aliasing (MMIO2 page aliased into MMIO range),
2460 * we may have more stuff to unmap even in case of pure MMIO... */
2461 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_MMIO2)
2462 {
2463 rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2464 if (RT_FAILURE(rc))
2465 {
2466 LogRel2(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp fFlags=%#x: Unmap -> rc=%Rrc\n",
2467 GCPhys, cb, fFlags, rc));
2468 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2469 }
2470
2471 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_TRACK_DIRTY_PAGES)
2472 {
2473 /* Reset tracking structure. */
2474 uint32_t idSlot = *puNemRange;
2475 *puNemRange = UINT32_MAX;
2476
2477 Assert(idSlot < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2478 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysStart = 0;
2479 pVM->nem.s.aMmio2DirtyTracking[idSlot].GCPhysLast = 0;
2480 pVM->nem.s.aMmio2DirtyTracking[idSlot].fDirty = false;
2481 }
2482 }
2483
2484 /* Ensure the page is masked as unmapped if relevant. */
2485 Assert(!pu2State || *pu2State == NEM_DARWIN_PAGE_STATE_UNMAPPED);
2486
2487 /*
2488 * Restore the RAM we replaced.
2489 */
2490 if (fFlags & NEM_NOTIFY_PHYS_MMIO_EX_F_REPLACE)
2491 {
2492 AssertPtr(pvRam);
2493 rc = nemR3DarwinMap(pVM, GCPhys, pvRam, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2494 if (RT_SUCCESS(rc))
2495 { /* likely */ }
2496 else
2497 {
2498 LogRel(("NEMR3NotifyPhysMmioExUnmap: GCPhys=%RGp LB %RGp pvMmio2=%p rc=%Rrc\n", GCPhys, cb, pvMmio2, rc));
2499 rc = VERR_NEM_MAP_PAGES_FAILED;
2500 }
2501 }
2502
2503 RT_NOREF(pvMmio2);
2504#else
2505 RT_NOREF(pVM, GCPhys, cb, fFlags, pvRam, pvMmio2, pu2State);
2506 if (pu2State)
2507 *pu2State = UINT8_MAX;
2508 rc = VERR_NEM_UNMAP_PAGES_FAILED;
2509#endif
2510 return rc;
2511}
2512
2513
2514VMMR3_INT_DECL(int) NEMR3PhysMmio2QueryAndResetDirtyBitmap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t uNemRange,
2515 void *pvBitmap, size_t cbBitmap)
2516{
2517 LogFlowFunc(("NEMR3PhysMmio2QueryAndResetDirtyBitmap: %RGp LB %RGp UnemRange=%u\n", GCPhys, cb, uNemRange));
2518 Assert(uNemRange < RT_ELEMENTS(pVM->nem.s.aMmio2DirtyTracking));
2519
2520 /* Keep it simple for now and mark everything as dirty if it is. */
2521 int rc = VINF_SUCCESS;
2522 if (pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty)
2523 {
2524 ASMBitSetRange(pvBitmap, 0, cbBitmap * 8);
2525
2526 pVM->nem.s.aMmio2DirtyTracking[uNemRange].fDirty = false;
2527 /* Restore as RX only. */
2528 uint8_t u2State;
2529 rc = nemR3DarwinProtect(GCPhys, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, &u2State);
2530 }
2531 else
2532 ASMBitClearRange(pvBitmap, 0, cbBitmap * 8);
2533
2534 return rc;
2535}
2536
2537
2538VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterEarly(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages, uint32_t fFlags,
2539 uint8_t *pu2State, uint32_t *puNemRange)
2540{
2541 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2542
2543 Log5(("NEMR3NotifyPhysRomRegisterEarly: %RGp LB %RGp pvPages=%p fFlags=%#x\n", GCPhys, cb, pvPages, fFlags));
2544 *pu2State = UINT8_MAX;
2545 *puNemRange = 0;
2546 return VINF_SUCCESS;
2547}
2548
2549
2550VMMR3_INT_DECL(int) NEMR3NotifyPhysRomRegisterLate(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, void *pvPages,
2551 uint32_t fFlags, uint8_t *pu2State, uint32_t *puNemRange)
2552{
2553 Log5(("NEMR3NotifyPhysRomRegisterLate: %RGp LB %RGp pvPages=%p fFlags=%#x pu2State=%p (%d) puNemRange=%p (%#x)\n",
2554 GCPhys, cb, pvPages, fFlags, pu2State, *pu2State, puNemRange, *puNemRange));
2555 *pu2State = UINT8_MAX;
2556
2557#if defined(VBOX_WITH_PGM_NEM_MODE)
2558 /*
2559 * (Re-)map readonly.
2560 */
2561 AssertPtrReturn(pvPages, VERR_INVALID_POINTER);
2562
2563 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2564 AssertRC(rc);
2565
2566 rc = nemR3DarwinMap(pVM, GCPhys, pvPages, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE, pu2State);
2567 if (RT_FAILURE(rc))
2568 {
2569 LogRel(("nemR3NativeNotifyPhysRomRegisterLate: GCPhys=%RGp LB %RGp pvPages=%p fFlags=%#x rc=%Rrc\n",
2570 GCPhys, cb, pvPages, fFlags, rc));
2571 return VERR_NEM_MAP_PAGES_FAILED;
2572 }
2573 RT_NOREF(fFlags, puNemRange);
2574 return VINF_SUCCESS;
2575#else
2576 RT_NOREF(pVM, GCPhys, cb, pvPages, fFlags, puNemRange);
2577 return VERR_NEM_MAP_PAGES_FAILED;
2578#endif
2579}
2580
2581
2582VMM_INT_DECL(void) NEMHCNotifyHandlerPhysicalDeregister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
2583 RTR3PTR pvMemR3, uint8_t *pu2State)
2584{
2585 Log5(("NEMHCNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d pvMemR3=%p pu2State=%p (%d)\n",
2586 GCPhys, cb, enmKind, pvMemR3, pu2State, *pu2State));
2587
2588 *pu2State = UINT8_MAX;
2589#if defined(VBOX_WITH_PGM_NEM_MODE)
2590 if (pvMemR3)
2591 {
2592 /* Unregister what was there before. */
2593 int rc = nemR3DarwinUnmap(pVM, GCPhys, cb, pu2State);
2594 AssertRC(rc);
2595
2596 rc = nemR3DarwinMap(pVM, GCPhys, pvMemR3, cb, NEM_PAGE_PROT_READ | NEM_PAGE_PROT_WRITE | NEM_PAGE_PROT_EXECUTE, pu2State);
2597 AssertLogRelMsgRC(rc, ("NEMHCNotifyHandlerPhysicalDeregister: nemR3DarwinMap(,%p,%RGp,%RGp,) -> %Rrc\n",
2598 pvMemR3, GCPhys, cb, rc));
2599 }
2600 RT_NOREF(enmKind);
2601#else
2602 RT_NOREF(pVM, enmKind, GCPhys, cb, pvMemR3);
2603 AssertFailed();
2604#endif
2605}
2606
2607
2608VMMR3_INT_DECL(void) NEMR3NotifySetA20(PVMCPU pVCpu, bool fEnabled)
2609{
2610 Log(("NEMR3NotifySetA20: fEnabled=%RTbool\n", fEnabled));
2611 RT_NOREF(pVCpu, fEnabled);
2612}
2613
2614
2615void nemHCNativeNotifyHandlerPhysicalRegister(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
2616{
2617 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
2618 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
2619}
2620
2621
2622void nemHCNativeNotifyHandlerPhysicalModify(PVMCC pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
2623 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
2624{
2625 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
2626 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
2627 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
2628}
2629
2630
2631int nemHCNativeNotifyPhysPageAllocated(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
2632 PGMPAGETYPE enmType, uint8_t *pu2State)
2633{
2634 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2635 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2636 RT_NOREF(pVM, GCPhys, HCPhys, fPageProt, enmType, pu2State);
2637
2638 AssertFailed();
2639 return VINF_SUCCESS;
2640}
2641
2642
2643VMM_INT_DECL(void) NEMHCNotifyPhysPageProtChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, RTR3PTR pvR3, uint32_t fPageProt,
2644 PGMPAGETYPE enmType, uint8_t *pu2State)
2645{
2646 Log5(("NEMHCNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2647 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
2648 RT_NOREF(pVM, GCPhys, HCPhys, pvR3, fPageProt, enmType, pu2State);
2649}
2650
2651
2652VMM_INT_DECL(void) NEMHCNotifyPhysPageChanged(PVMCC pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
2653 RTR3PTR pvNewR3, uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
2654{
2655 Log5(("NEMHCNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
2656 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
2657 RT_NOREF(pVM, GCPhys, HCPhysPrev, HCPhysNew, pvNewR3, fPageProt, enmType, pu2State);
2658
2659 AssertFailed();
2660}
2661
2662
2663/**
2664 * Interface for importing state on demand (used by IEM).
2665 *
2666 * @returns VBox status code.
2667 * @param pVCpu The cross context CPU structure.
2668 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
2669 */
2670VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPUCC pVCpu, uint64_t fWhat)
2671{
2672 LogFlowFunc(("pVCpu=%p fWhat=%RX64\n", pVCpu, fWhat));
2673 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
2674
2675 return nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat);
2676}
2677
2678
2679/**
2680 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
2681 *
2682 * @returns VBox status code.
2683 * @param pVCpu The cross context CPU structure.
2684 * @param pcTicks Where to return the CPU tick count.
2685 * @param puAux Where to return the TSC_AUX register value.
2686 */
2687VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPUCC pVCpu, uint64_t *pcTicks, uint32_t *puAux)
2688{
2689 LogFlowFunc(("pVCpu=%p pcTicks=%RX64 puAux=%RX32\n", pVCpu, pcTicks, puAux));
2690 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatQueryCpuTick);
2691
2692 if (puAux)
2693 *puAux = 0;
2694 *pcTicks = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff; /* This is the host timer minus the offset. */
2695 return VINF_SUCCESS;
2696}
2697
2698
2699/**
2700 * Resumes CPU clock (TSC) on all virtual CPUs.
2701 *
2702 * This is called by TM when the VM is started, restored, resumed or similar.
2703 *
2704 * @returns VBox status code.
2705 * @param pVM The cross context VM structure.
2706 * @param pVCpu The cross context CPU structure of the calling EMT.
2707 * @param uPausedTscValue The TSC value at the time of pausing.
2708 */
2709VMM_INT_DECL(int) NEMHCResumeCpuTickOnAll(PVMCC pVM, PVMCPUCC pVCpu, uint64_t uPausedTscValue)
2710{
2711 LogFlowFunc(("pVM=%p pVCpu=%p uPausedTscValue=%RX64\n", pVM, pVCpu, uPausedTscValue));
2712 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
2713 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
2714
2715 /*
2716 * Calculate the new offset, first get the new TSC value with the old vTimer offset and then adjust the
2717 * the new offset to let the guest not notice the pause.
2718 */
2719 uint64_t u64TscNew = mach_absolute_time() - pVCpu->pVMR3->nem.s.u64VTimerOff;
2720 Assert(u64TscNew >= uPausedTscValue);
2721 LogFlowFunc(("u64VTimerOffOld=%#RX64 u64TscNew=%#RX64 u64VTimerValuePaused=%#RX64 -> u64VTimerOff=%#RX64\n",
2722 pVM->nem.s.u64VTimerOff, u64TscNew, uPausedTscValue,
2723 pVM->nem.s.u64VTimerOff + (u64TscNew - uPausedTscValue)));
2724
2725 pVM->nem.s.u64VTimerOff += u64TscNew - uPausedTscValue;
2726
2727 /*
2728 * Set the flag to update the vTimer offset when the vCPU resumes for the first time
2729 * (needs to be done on the actual EMT).
2730 */
2731 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
2732 {
2733 PVMCPUCC pVCpuDst = pVM->apCpusR3[idCpu];
2734 pVCpuDst->nem.s.fVTimerOffUpdate = true;
2735 }
2736
2737 return VINF_SUCCESS;
2738}
2739
2740
2741/**
2742 * Returns features supported by the NEM backend.
2743 *
2744 * @returns Flags of features supported by the native NEM backend.
2745 * @param pVM The cross context VM structure.
2746 */
2747VMM_INT_DECL(uint32_t) NEMHCGetFeatures(PVMCC pVM)
2748{
2749 RT_NOREF(pVM);
2750 /*
2751 * Apple's Hypervisor.framework is not supported if the CPU doesn't support nested paging
2752 * and unrestricted guest execution support so we can safely return these flags here always.
2753 */
2754 return NEM_FEAT_F_NESTED_PAGING | NEM_FEAT_F_FULL_GST_EXEC | NEM_FEAT_F_XSAVE_XRSTOR;
2755}
2756
2757
2758/** @page pg_nem_darwin NEM/darwin - Native Execution Manager, macOS.
2759 *
2760 * @todo Add notes as the implementation progresses...
2761 */
2762
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette