VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/CPUM.cpp@ 54737

最後變更 在這個檔案從54737是 54737,由 vboxsync 提交於 10 年 前

VMM,REM: CPUID revamp - almost there now.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 89.7 KB
 
1/* $Id: CPUM.cpp 54737 2015-03-12 21:02:21Z vboxsync $ */
2/** @file
3 * CPUM - CPU Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_cpum CPUM - CPU Monitor / Manager
19 *
20 * The CPU Monitor / Manager keeps track of all the CPU registers. It is
21 * also responsible for lazy FPU handling and some of the context loading
22 * in raw mode.
23 *
24 * There are three CPU contexts, the most important one is the guest one (GC).
25 * When running in raw-mode (RC) there is a special hyper context for the VMM
26 * part that floats around inside the guest address space. When running in
27 * raw-mode, CPUM also maintains a host context for saving and restoring
28 * registers across world switches. This latter is done in cooperation with the
29 * world switcher (@see pg_vmm).
30 *
31 * @see grp_cpum
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_CPUM
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/cpumdis.h>
40#include <VBox/vmm/cpumctx-v1_6.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/pdmapi.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/selm.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/patm.h>
48#include <VBox/vmm/hm.h>
49#include <VBox/vmm/ssm.h>
50#include "CPUMInternal.h"
51#include <VBox/vmm/vm.h>
52
53#include <VBox/param.h>
54#include <VBox/dis.h>
55#include <VBox/err.h>
56#include <VBox/log.h>
57#include <iprt/asm-amd64-x86.h>
58#include <iprt/assert.h>
59#include <iprt/cpuset.h>
60#include <iprt/mem.h>
61#include <iprt/mp.h>
62#include <iprt/string.h>
63#include "internal/pgm.h"
64
65
66/*******************************************************************************
67* Defined Constants And Macros *
68*******************************************************************************/
69/**
70 * This was used in the saved state up to the early life of version 14.
71 *
72 * It indicates that we may have some out-of-sync hidden segement registers.
73 * It is only relevant for raw-mode.
74 */
75#define CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID RT_BIT(12)
76
77
78/*******************************************************************************
79* Structures and Typedefs *
80*******************************************************************************/
81
82/**
83 * What kind of cpu info dump to perform.
84 */
85typedef enum CPUMDUMPTYPE
86{
87 CPUMDUMPTYPE_TERSE,
88 CPUMDUMPTYPE_DEFAULT,
89 CPUMDUMPTYPE_VERBOSE
90} CPUMDUMPTYPE;
91/** Pointer to a cpu info dump type. */
92typedef CPUMDUMPTYPE *PCPUMDUMPTYPE;
93
94
95/*******************************************************************************
96* Internal Functions *
97*******************************************************************************/
98static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass);
99static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM);
100static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM);
101static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
102static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM);
103static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
104static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
105static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
106static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
107static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
108
109
110/*******************************************************************************
111* Global Variables *
112*******************************************************************************/
113/** Saved state field descriptors for CPUMCTX. */
114static const SSMFIELD g_aCpumCtxFields[] =
115{
116 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
117 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
118 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
119 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
120 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
121 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
122 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
123 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
124 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
125 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
126 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
127 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
128 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
129 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
130 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
131 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
132 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
133 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
134 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
135 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
136 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
137 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
138 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
139 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
140 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
141 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
142 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
143 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
144 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
145 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
146 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
147 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
148 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
149 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
150 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
151 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
152 SSMFIELD_ENTRY( CPUMCTX, rdi),
153 SSMFIELD_ENTRY( CPUMCTX, rsi),
154 SSMFIELD_ENTRY( CPUMCTX, rbp),
155 SSMFIELD_ENTRY( CPUMCTX, rax),
156 SSMFIELD_ENTRY( CPUMCTX, rbx),
157 SSMFIELD_ENTRY( CPUMCTX, rdx),
158 SSMFIELD_ENTRY( CPUMCTX, rcx),
159 SSMFIELD_ENTRY( CPUMCTX, rsp),
160 SSMFIELD_ENTRY( CPUMCTX, rflags),
161 SSMFIELD_ENTRY( CPUMCTX, rip),
162 SSMFIELD_ENTRY( CPUMCTX, r8),
163 SSMFIELD_ENTRY( CPUMCTX, r9),
164 SSMFIELD_ENTRY( CPUMCTX, r10),
165 SSMFIELD_ENTRY( CPUMCTX, r11),
166 SSMFIELD_ENTRY( CPUMCTX, r12),
167 SSMFIELD_ENTRY( CPUMCTX, r13),
168 SSMFIELD_ENTRY( CPUMCTX, r14),
169 SSMFIELD_ENTRY( CPUMCTX, r15),
170 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
171 SSMFIELD_ENTRY( CPUMCTX, es.ValidSel),
172 SSMFIELD_ENTRY( CPUMCTX, es.fFlags),
173 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
174 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
175 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
176 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
177 SSMFIELD_ENTRY( CPUMCTX, cs.ValidSel),
178 SSMFIELD_ENTRY( CPUMCTX, cs.fFlags),
179 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
180 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
181 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
182 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
183 SSMFIELD_ENTRY( CPUMCTX, ss.ValidSel),
184 SSMFIELD_ENTRY( CPUMCTX, ss.fFlags),
185 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
186 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
187 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
188 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
189 SSMFIELD_ENTRY( CPUMCTX, ds.ValidSel),
190 SSMFIELD_ENTRY( CPUMCTX, ds.fFlags),
191 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
192 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
193 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
194 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
195 SSMFIELD_ENTRY( CPUMCTX, fs.ValidSel),
196 SSMFIELD_ENTRY( CPUMCTX, fs.fFlags),
197 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
198 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
199 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
200 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
201 SSMFIELD_ENTRY( CPUMCTX, gs.ValidSel),
202 SSMFIELD_ENTRY( CPUMCTX, gs.fFlags),
203 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
204 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
205 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
206 SSMFIELD_ENTRY( CPUMCTX, cr0),
207 SSMFIELD_ENTRY( CPUMCTX, cr2),
208 SSMFIELD_ENTRY( CPUMCTX, cr3),
209 SSMFIELD_ENTRY( CPUMCTX, cr4),
210 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
211 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
212 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
213 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
214 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
215 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
216 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
217 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
218 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
219 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
220 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
221 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
222 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
223 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
224 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
225 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
226 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
227 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
228 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
229 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
230 /* msrApicBase is not included here, it resides in the APIC device state. */
231 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
232 SSMFIELD_ENTRY( CPUMCTX, ldtr.ValidSel),
233 SSMFIELD_ENTRY( CPUMCTX, ldtr.fFlags),
234 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
235 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
236 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
237 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
238 SSMFIELD_ENTRY( CPUMCTX, tr.ValidSel),
239 SSMFIELD_ENTRY( CPUMCTX, tr.fFlags),
240 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
241 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
242 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
243 SSMFIELD_ENTRY_TERM()
244};
245
246/** Saved state field descriptors for CPUMCTX in V4.1 before the hidden selector
247 * registeres changed. */
248static const SSMFIELD g_aCpumCtxFieldsMem[] =
249{
250 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
251 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
252 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
253 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
254 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
255 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
256 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
257 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
258 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
259 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
260 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
261 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
262 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
263 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
264 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
265 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
266 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
267 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
268 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
269 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
270 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
271 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
272 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
273 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
274 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
275 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
276 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
277 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
278 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
279 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
280 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
281 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
282 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
283 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
284 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
285 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
286 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
287 SSMFIELD_ENTRY( CPUMCTX, rdi),
288 SSMFIELD_ENTRY( CPUMCTX, rsi),
289 SSMFIELD_ENTRY( CPUMCTX, rbp),
290 SSMFIELD_ENTRY( CPUMCTX, rax),
291 SSMFIELD_ENTRY( CPUMCTX, rbx),
292 SSMFIELD_ENTRY( CPUMCTX, rdx),
293 SSMFIELD_ENTRY( CPUMCTX, rcx),
294 SSMFIELD_ENTRY( CPUMCTX, rsp),
295 SSMFIELD_ENTRY_OLD( lss_esp, sizeof(uint32_t)),
296 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
297 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
298 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
299 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
300 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
301 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
302 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
303 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
304 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
305 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
306 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
307 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
308 SSMFIELD_ENTRY( CPUMCTX, rflags),
309 SSMFIELD_ENTRY( CPUMCTX, rip),
310 SSMFIELD_ENTRY( CPUMCTX, r8),
311 SSMFIELD_ENTRY( CPUMCTX, r9),
312 SSMFIELD_ENTRY( CPUMCTX, r10),
313 SSMFIELD_ENTRY( CPUMCTX, r11),
314 SSMFIELD_ENTRY( CPUMCTX, r12),
315 SSMFIELD_ENTRY( CPUMCTX, r13),
316 SSMFIELD_ENTRY( CPUMCTX, r14),
317 SSMFIELD_ENTRY( CPUMCTX, r15),
318 SSMFIELD_ENTRY( CPUMCTX, es.u64Base),
319 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
320 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
321 SSMFIELD_ENTRY( CPUMCTX, cs.u64Base),
322 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
323 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
324 SSMFIELD_ENTRY( CPUMCTX, ss.u64Base),
325 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
326 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
327 SSMFIELD_ENTRY( CPUMCTX, ds.u64Base),
328 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
329 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
330 SSMFIELD_ENTRY( CPUMCTX, fs.u64Base),
331 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
332 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
333 SSMFIELD_ENTRY( CPUMCTX, gs.u64Base),
334 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
335 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
336 SSMFIELD_ENTRY( CPUMCTX, cr0),
337 SSMFIELD_ENTRY( CPUMCTX, cr2),
338 SSMFIELD_ENTRY( CPUMCTX, cr3),
339 SSMFIELD_ENTRY( CPUMCTX, cr4),
340 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
341 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
342 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
343 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
344 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
345 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
346 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
347 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
348 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
349 SSMFIELD_ENTRY( CPUMCTX, gdtr.pGdt),
350 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
351 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
352 SSMFIELD_ENTRY( CPUMCTX, idtr.pIdt),
353 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
354 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
355 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
356 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
357 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
358 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
359 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
360 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
361 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
362 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
363 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
364 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
365 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
366 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
367 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
368 SSMFIELD_ENTRY( CPUMCTX, ldtr.u64Base),
369 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
370 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
371 SSMFIELD_ENTRY( CPUMCTX, tr.u64Base),
372 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
373 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
374 SSMFIELD_ENTRY_TERM()
375};
376
377/** Saved state field descriptors for CPUMCTX_VER1_6. */
378static const SSMFIELD g_aCpumCtxFieldsV16[] =
379{
380 SSMFIELD_ENTRY( CPUMCTX, fpu.FCW),
381 SSMFIELD_ENTRY( CPUMCTX, fpu.FSW),
382 SSMFIELD_ENTRY( CPUMCTX, fpu.FTW),
383 SSMFIELD_ENTRY( CPUMCTX, fpu.FOP),
384 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUIP),
385 SSMFIELD_ENTRY( CPUMCTX, fpu.CS),
386 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd1),
387 SSMFIELD_ENTRY( CPUMCTX, fpu.FPUDP),
388 SSMFIELD_ENTRY( CPUMCTX, fpu.DS),
389 SSMFIELD_ENTRY( CPUMCTX, fpu.Rsrvd2),
390 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR),
391 SSMFIELD_ENTRY( CPUMCTX, fpu.MXCSR_MASK),
392 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[0]),
393 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[1]),
394 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[2]),
395 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[3]),
396 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[4]),
397 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[5]),
398 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[6]),
399 SSMFIELD_ENTRY( CPUMCTX, fpu.aRegs[7]),
400 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[0]),
401 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[1]),
402 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[2]),
403 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[3]),
404 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[4]),
405 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[5]),
406 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[6]),
407 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[7]),
408 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[8]),
409 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[9]),
410 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[10]),
411 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[11]),
412 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[12]),
413 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[13]),
414 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[14]),
415 SSMFIELD_ENTRY( CPUMCTX, fpu.aXMM[15]),
416 SSMFIELD_ENTRY_IGNORE( CPUMCTX, fpu.au32RsrvdRest),
417 SSMFIELD_ENTRY( CPUMCTX, rdi),
418 SSMFIELD_ENTRY( CPUMCTX, rsi),
419 SSMFIELD_ENTRY( CPUMCTX, rbp),
420 SSMFIELD_ENTRY( CPUMCTX, rax),
421 SSMFIELD_ENTRY( CPUMCTX, rbx),
422 SSMFIELD_ENTRY( CPUMCTX, rdx),
423 SSMFIELD_ENTRY( CPUMCTX, rcx),
424 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, rsp),
425 SSMFIELD_ENTRY( CPUMCTX, ss.Sel),
426 SSMFIELD_ENTRY_OLD( ssPadding, sizeof(uint16_t)),
427 SSMFIELD_ENTRY_OLD( CPUMCTX, sizeof(uint64_t) /*rsp_notused*/),
428 SSMFIELD_ENTRY( CPUMCTX, gs.Sel),
429 SSMFIELD_ENTRY_OLD( gsPadding, sizeof(uint16_t)),
430 SSMFIELD_ENTRY( CPUMCTX, fs.Sel),
431 SSMFIELD_ENTRY_OLD( fsPadding, sizeof(uint16_t)),
432 SSMFIELD_ENTRY( CPUMCTX, es.Sel),
433 SSMFIELD_ENTRY_OLD( esPadding, sizeof(uint16_t)),
434 SSMFIELD_ENTRY( CPUMCTX, ds.Sel),
435 SSMFIELD_ENTRY_OLD( dsPadding, sizeof(uint16_t)),
436 SSMFIELD_ENTRY( CPUMCTX, cs.Sel),
437 SSMFIELD_ENTRY_OLD( csPadding, sizeof(uint16_t)*3),
438 SSMFIELD_ENTRY( CPUMCTX, rflags),
439 SSMFIELD_ENTRY( CPUMCTX, rip),
440 SSMFIELD_ENTRY( CPUMCTX, r8),
441 SSMFIELD_ENTRY( CPUMCTX, r9),
442 SSMFIELD_ENTRY( CPUMCTX, r10),
443 SSMFIELD_ENTRY( CPUMCTX, r11),
444 SSMFIELD_ENTRY( CPUMCTX, r12),
445 SSMFIELD_ENTRY( CPUMCTX, r13),
446 SSMFIELD_ENTRY( CPUMCTX, r14),
447 SSMFIELD_ENTRY( CPUMCTX, r15),
448 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, es.u64Base),
449 SSMFIELD_ENTRY( CPUMCTX, es.u32Limit),
450 SSMFIELD_ENTRY( CPUMCTX, es.Attr),
451 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, cs.u64Base),
452 SSMFIELD_ENTRY( CPUMCTX, cs.u32Limit),
453 SSMFIELD_ENTRY( CPUMCTX, cs.Attr),
454 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ss.u64Base),
455 SSMFIELD_ENTRY( CPUMCTX, ss.u32Limit),
456 SSMFIELD_ENTRY( CPUMCTX, ss.Attr),
457 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ds.u64Base),
458 SSMFIELD_ENTRY( CPUMCTX, ds.u32Limit),
459 SSMFIELD_ENTRY( CPUMCTX, ds.Attr),
460 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, fs.u64Base),
461 SSMFIELD_ENTRY( CPUMCTX, fs.u32Limit),
462 SSMFIELD_ENTRY( CPUMCTX, fs.Attr),
463 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gs.u64Base),
464 SSMFIELD_ENTRY( CPUMCTX, gs.u32Limit),
465 SSMFIELD_ENTRY( CPUMCTX, gs.Attr),
466 SSMFIELD_ENTRY( CPUMCTX, cr0),
467 SSMFIELD_ENTRY( CPUMCTX, cr2),
468 SSMFIELD_ENTRY( CPUMCTX, cr3),
469 SSMFIELD_ENTRY( CPUMCTX, cr4),
470 SSMFIELD_ENTRY_OLD( cr8, sizeof(uint64_t)),
471 SSMFIELD_ENTRY( CPUMCTX, dr[0]),
472 SSMFIELD_ENTRY( CPUMCTX, dr[1]),
473 SSMFIELD_ENTRY( CPUMCTX, dr[2]),
474 SSMFIELD_ENTRY( CPUMCTX, dr[3]),
475 SSMFIELD_ENTRY_OLD( dr[4], sizeof(uint64_t)),
476 SSMFIELD_ENTRY_OLD( dr[5], sizeof(uint64_t)),
477 SSMFIELD_ENTRY( CPUMCTX, dr[6]),
478 SSMFIELD_ENTRY( CPUMCTX, dr[7]),
479 SSMFIELD_ENTRY( CPUMCTX, gdtr.cbGdt),
480 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, gdtr.pGdt),
481 SSMFIELD_ENTRY_OLD( gdtrPadding, sizeof(uint16_t)),
482 SSMFIELD_ENTRY_OLD( gdtrPadding64, sizeof(uint64_t)),
483 SSMFIELD_ENTRY( CPUMCTX, idtr.cbIdt),
484 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, idtr.pIdt),
485 SSMFIELD_ENTRY_OLD( idtrPadding, sizeof(uint16_t)),
486 SSMFIELD_ENTRY_OLD( idtrPadding64, sizeof(uint64_t)),
487 SSMFIELD_ENTRY( CPUMCTX, ldtr.Sel),
488 SSMFIELD_ENTRY_OLD( ldtrPadding, sizeof(uint16_t)),
489 SSMFIELD_ENTRY( CPUMCTX, tr.Sel),
490 SSMFIELD_ENTRY_OLD( trPadding, sizeof(uint16_t)),
491 SSMFIELD_ENTRY( CPUMCTX, SysEnter.cs),
492 SSMFIELD_ENTRY( CPUMCTX, SysEnter.eip),
493 SSMFIELD_ENTRY( CPUMCTX, SysEnter.esp),
494 SSMFIELD_ENTRY( CPUMCTX, msrEFER),
495 SSMFIELD_ENTRY( CPUMCTX, msrSTAR),
496 SSMFIELD_ENTRY( CPUMCTX, msrPAT),
497 SSMFIELD_ENTRY( CPUMCTX, msrLSTAR),
498 SSMFIELD_ENTRY( CPUMCTX, msrCSTAR),
499 SSMFIELD_ENTRY( CPUMCTX, msrSFMASK),
500 SSMFIELD_ENTRY_OLD( msrFSBASE, sizeof(uint64_t)),
501 SSMFIELD_ENTRY_OLD( msrGSBASE, sizeof(uint64_t)),
502 SSMFIELD_ENTRY( CPUMCTX, msrKERNELGSBASE),
503 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, ldtr.u64Base),
504 SSMFIELD_ENTRY( CPUMCTX, ldtr.u32Limit),
505 SSMFIELD_ENTRY( CPUMCTX, ldtr.Attr),
506 SSMFIELD_ENTRY_U32_ZX_U64( CPUMCTX, tr.u64Base),
507 SSMFIELD_ENTRY( CPUMCTX, tr.u32Limit),
508 SSMFIELD_ENTRY( CPUMCTX, tr.Attr),
509 SSMFIELD_ENTRY_OLD( padding, sizeof(uint32_t)*2),
510 SSMFIELD_ENTRY_TERM()
511};
512
513
514/**
515 * Checks for partial/leaky FXSAVE/FXRSTOR handling on AMD CPUs.
516 *
517 * AMD K7, K8 and newer AMD CPUs do not save/restore the x87 error
518 * pointers (last instruction pointer, last data pointer, last opcode)
519 * except when the ES bit (Exception Summary) in x87 FSW (FPU Status
520 * Word) is set. Thus if we don't clear these registers there is
521 * potential, local FPU leakage from a process using the FPU to
522 * another.
523 *
524 * See AMD Instruction Reference for FXSAVE, FXRSTOR.
525 *
526 * @param pVM Pointer to the VM.
527 */
528static void cpumR3CheckLeakyFpu(PVM pVM)
529{
530 uint32_t u32CpuVersion = ASMCpuId_EAX(1);
531 uint32_t const u32Family = u32CpuVersion >> 8;
532 if ( u32Family >= 6 /* K7 and higher */
533 && ASMIsAmdCpu())
534 {
535 uint32_t cExt = ASMCpuId_EAX(0x80000000);
536 if (ASMIsValidExtRange(cExt))
537 {
538 uint32_t fExtFeaturesEDX = ASMCpuId_EDX(0x80000001);
539 if (fExtFeaturesEDX & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
540 {
541 for (VMCPUID i = 0; i < pVM->cCpus; i++)
542 pVM->aCpus[i].cpum.s.fUseFlags |= CPUM_USE_FFXSR_LEAKY;
543 Log(("CPUMR3Init: host CPU has leaky fxsave/fxrstor behaviour\n"));
544 }
545 }
546 }
547}
548
549
550/**
551 * Initializes the CPUM.
552 *
553 * @returns VBox status code.
554 * @param pVM Pointer to the VM.
555 */
556VMMR3DECL(int) CPUMR3Init(PVM pVM)
557{
558 LogFlow(("CPUMR3Init\n"));
559
560 /*
561 * Assert alignment, sizes and tables.
562 */
563 AssertCompileMemberAlignment(VM, cpum.s, 32);
564 AssertCompile(sizeof(pVM->cpum.s) <= sizeof(pVM->cpum.padding));
565 AssertCompileSizeAlignment(CPUMCTX, 64);
566 AssertCompileSizeAlignment(CPUMCTXMSRS, 64);
567 AssertCompileSizeAlignment(CPUMHOSTCTX, 64);
568 AssertCompileMemberAlignment(VM, cpum, 64);
569 AssertCompileMemberAlignment(VM, aCpus, 64);
570 AssertCompileMemberAlignment(VMCPU, cpum.s, 64);
571 AssertCompileMemberSizeAlignment(VM, aCpus[0].cpum.s, 64);
572#ifdef VBOX_STRICT
573 int rc2 = cpumR3MsrStrictInitChecks();
574 AssertRCReturn(rc2, rc2);
575#endif
576
577 /* Calculate the offset from CPUM to CPUMCPU for the first CPU. */
578 pVM->cpum.s.offCPUMCPU0 = RT_OFFSETOF(VM, aCpus[0].cpum) - RT_OFFSETOF(VM, cpum);
579 Assert((uintptr_t)&pVM->cpum + pVM->cpum.s.offCPUMCPU0 == (uintptr_t)&pVM->aCpus[0].cpum);
580
581
582 /* Calculate the offset from CPUMCPU to CPUM. */
583 for (VMCPUID i = 0; i < pVM->cCpus; i++)
584 {
585 PVMCPU pVCpu = &pVM->aCpus[i];
586
587 pVCpu->cpum.s.offCPUM = RT_OFFSETOF(VM, aCpus[i].cpum) - RT_OFFSETOF(VM, cpum);
588 Assert((uintptr_t)&pVCpu->cpum - pVCpu->cpum.s.offCPUM == (uintptr_t)&pVM->cpum);
589 }
590
591 /*
592 * Check that the CPU supports the minimum features we require.
593 */
594 if (!ASMHasCpuId())
595 {
596 Log(("The CPU doesn't support CPUID!\n"));
597 return VERR_UNSUPPORTED_CPU;
598 }
599 ASMCpuId_ECX_EDX(1, &pVM->cpum.s.CPUFeatures.ecx, &pVM->cpum.s.CPUFeatures.edx);
600 ASMCpuId_ECX_EDX(0x80000001, &pVM->cpum.s.CPUFeaturesExt.ecx, &pVM->cpum.s.CPUFeaturesExt.edx);
601
602 /* Setup the CR4 AND and OR masks used in the switcher */
603 /* Depends on the presence of FXSAVE(SSE) support on the host CPU */
604 if (!pVM->cpum.s.CPUFeatures.edx.u1FXSR)
605 {
606 Log(("The CPU doesn't support FXSAVE/FXRSTOR!\n"));
607 /* No FXSAVE implies no SSE */
608 pVM->cpum.s.CR4.AndMask = X86_CR4_PVI | X86_CR4_VME;
609 pVM->cpum.s.CR4.OrMask = 0;
610 }
611 else
612 {
613 pVM->cpum.s.CR4.AndMask = X86_CR4_OSXMMEEXCPT | X86_CR4_PVI | X86_CR4_VME;
614 pVM->cpum.s.CR4.OrMask = X86_CR4_OSFSXR;
615 }
616
617 if (!pVM->cpum.s.CPUFeatures.edx.u1MMX)
618 {
619 Log(("The CPU doesn't support MMX!\n"));
620 return VERR_UNSUPPORTED_CPU;
621 }
622 if (!pVM->cpum.s.CPUFeatures.edx.u1TSC)
623 {
624 Log(("The CPU doesn't support TSC!\n"));
625 return VERR_UNSUPPORTED_CPU;
626 }
627 /* Bogus on AMD? */
628 if (!pVM->cpum.s.CPUFeatures.edx.u1SEP)
629 Log(("The CPU doesn't support SYSENTER/SYSEXIT!\n"));
630
631 /*
632 * Gather info about the host CPU.
633 */
634 PCPUMCPUIDLEAF paLeaves;
635 uint32_t cLeaves;
636 int rc = CPUMR3CpuIdCollectLeaves(&paLeaves, &cLeaves);
637 AssertLogRelRCReturn(rc, rc);
638
639 rc = cpumR3CpuIdExplodeFeatures(paLeaves, cLeaves, &pVM->cpum.s.HostFeatures);
640 RTMemFree(paLeaves);
641 AssertLogRelRCReturn(rc, rc);
642 pVM->cpum.s.GuestFeatures.enmCpuVendor = pVM->cpum.s.HostFeatures.enmCpuVendor;
643
644 /*
645 * Setup hypervisor startup values.
646 */
647
648 /*
649 * Register saved state data item.
650 */
651 rc = SSMR3RegisterInternal(pVM, "cpum", 1, CPUM_SAVED_STATE_VERSION, sizeof(CPUM),
652 NULL, cpumR3LiveExec, NULL,
653 NULL, cpumR3SaveExec, NULL,
654 cpumR3LoadPrep, cpumR3LoadExec, cpumR3LoadDone);
655 if (RT_FAILURE(rc))
656 return rc;
657
658 /*
659 * Register info handlers and registers with the debugger facility.
660 */
661 DBGFR3InfoRegisterInternal(pVM, "cpum", "Displays the all the cpu states.", &cpumR3InfoAll);
662 DBGFR3InfoRegisterInternal(pVM, "cpumguest", "Displays the guest cpu state.", &cpumR3InfoGuest);
663 DBGFR3InfoRegisterInternal(pVM, "cpumhyper", "Displays the hypervisor cpu state.", &cpumR3InfoHyper);
664 DBGFR3InfoRegisterInternal(pVM, "cpumhost", "Displays the host cpu state.", &cpumR3InfoHost);
665 DBGFR3InfoRegisterInternal(pVM, "cpuid", "Displays the guest cpuid leaves.", &cpumR3CpuIdInfo);
666 DBGFR3InfoRegisterInternal(pVM, "cpumguestinstr", "Displays the current guest instruction.", &cpumR3InfoGuestInstr);
667
668 rc = cpumR3DbgInit(pVM);
669 if (RT_FAILURE(rc))
670 return rc;
671
672 /*
673 * Check if we need to workaround partial/leaky FPU handling.
674 */
675 cpumR3CheckLeakyFpu(pVM);
676
677 /*
678 * Initialize the Guest CPUID and MSR states.
679 */
680 rc = cpumR3InitCpuIdAndMsrs(pVM);
681 if (RT_FAILURE(rc))
682 return rc;
683 CPUMR3Reset(pVM);
684 return VINF_SUCCESS;
685}
686
687
688/**
689 * Applies relocations to data and code managed by this
690 * component. This function will be called at init and
691 * whenever the VMM need to relocate it self inside the GC.
692 *
693 * The CPUM will update the addresses used by the switcher.
694 *
695 * @param pVM The VM.
696 */
697VMMR3DECL(void) CPUMR3Relocate(PVM pVM)
698{
699 LogFlow(("CPUMR3Relocate\n"));
700
701 pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paMsrRangesR3);
702 pVM->cpum.s.GuestInfo.paCpuIdLeavesRC = MMHyperR3ToRC(pVM, pVM->cpum.s.GuestInfo.paCpuIdLeavesR3);
703
704 /* Recheck the guest DRx values in raw-mode. */
705 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
706 CPUMRecalcHyperDRx(&pVM->aCpus[iCpu], UINT8_MAX, false);
707}
708
709
710/**
711 * Apply late CPUM property changes based on the fHWVirtEx setting
712 *
713 * @param pVM Pointer to the VM.
714 * @param fHWVirtExEnabled HWVirtEx enabled/disabled
715 */
716VMMR3DECL(void) CPUMR3SetHWVirtEx(PVM pVM, bool fHWVirtExEnabled)
717{
718 /*
719 * Workaround for missing cpuid(0) patches when leaf 4 returns GuestInfo.DefCpuId:
720 * If we miss to patch a cpuid(0).eax then Linux tries to determine the number
721 * of processors from (cpuid(4).eax >> 26) + 1.
722 *
723 * Note: this code is obsolete, but let's keep it here for reference.
724 * Purpose is valid when we artificially cap the max std id to less than 4.
725 */
726 if (!fHWVirtExEnabled)
727 {
728 Assert( pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax == 0
729 || pVM->cpum.s.aGuestCpuIdPatmStd[0].uEax < 0x4);
730 pVM->cpum.s.aGuestCpuIdPatmStd[4].uEax = 0;
731 }
732}
733
734/**
735 * Terminates the CPUM.
736 *
737 * Termination means cleaning up and freeing all resources,
738 * the VM it self is at this point powered off or suspended.
739 *
740 * @returns VBox status code.
741 * @param pVM Pointer to the VM.
742 */
743VMMR3DECL(int) CPUMR3Term(PVM pVM)
744{
745#ifdef VBOX_WITH_CRASHDUMP_MAGIC
746 for (VMCPUID i = 0; i < pVM->cCpus; i++)
747 {
748 PVMCPU pVCpu = &pVM->aCpus[i];
749 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
750
751 memset(pVCpu->cpum.s.aMagic, 0, sizeof(pVCpu->cpum.s.aMagic));
752 pVCpu->cpum.s.uMagic = 0;
753 pCtx->dr[5] = 0;
754 }
755#else
756 NOREF(pVM);
757#endif
758 return VINF_SUCCESS;
759}
760
761
762/**
763 * Resets a virtual CPU.
764 *
765 * Used by CPUMR3Reset and CPU hot plugging.
766 *
767 * @param pVM Pointer to the cross context VM structure.
768 * @param pVCpu Pointer to the cross context virtual CPU structure of
769 * the CPU that is being reset. This may differ from the
770 * current EMT.
771 */
772VMMR3DECL(void) CPUMR3ResetCpu(PVM pVM, PVMCPU pVCpu)
773{
774 /** @todo anything different for VCPU > 0? */
775 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
776
777 /*
778 * Initialize everything to ZERO first.
779 */
780 uint32_t fUseFlags = pVCpu->cpum.s.fUseFlags & ~CPUM_USED_FPU_SINCE_REM;
781 memset(pCtx, 0, sizeof(*pCtx));
782 pVCpu->cpum.s.fUseFlags = fUseFlags;
783
784 pCtx->cr0 = X86_CR0_CD | X86_CR0_NW | X86_CR0_ET; //0x60000010
785 pCtx->eip = 0x0000fff0;
786 pCtx->edx = 0x00000600; /* P6 processor */
787 pCtx->eflags.Bits.u1Reserved0 = 1;
788
789 pCtx->cs.Sel = 0xf000;
790 pCtx->cs.ValidSel = 0xf000;
791 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
792 pCtx->cs.u64Base = UINT64_C(0xffff0000);
793 pCtx->cs.u32Limit = 0x0000ffff;
794 pCtx->cs.Attr.n.u1DescType = 1; /* code/data segment */
795 pCtx->cs.Attr.n.u1Present = 1;
796 pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_ER_ACC;
797
798 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
799 pCtx->ds.u32Limit = 0x0000ffff;
800 pCtx->ds.Attr.n.u1DescType = 1; /* code/data segment */
801 pCtx->ds.Attr.n.u1Present = 1;
802 pCtx->ds.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
803
804 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
805 pCtx->es.u32Limit = 0x0000ffff;
806 pCtx->es.Attr.n.u1DescType = 1; /* code/data segment */
807 pCtx->es.Attr.n.u1Present = 1;
808 pCtx->es.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
809
810 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
811 pCtx->fs.u32Limit = 0x0000ffff;
812 pCtx->fs.Attr.n.u1DescType = 1; /* code/data segment */
813 pCtx->fs.Attr.n.u1Present = 1;
814 pCtx->fs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
815
816 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
817 pCtx->gs.u32Limit = 0x0000ffff;
818 pCtx->gs.Attr.n.u1DescType = 1; /* code/data segment */
819 pCtx->gs.Attr.n.u1Present = 1;
820 pCtx->gs.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
821
822 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
823 pCtx->ss.u32Limit = 0x0000ffff;
824 pCtx->ss.Attr.n.u1Present = 1;
825 pCtx->ss.Attr.n.u1DescType = 1; /* code/data segment */
826 pCtx->ss.Attr.n.u4Type = X86_SEL_TYPE_RW_ACC;
827
828 pCtx->idtr.cbIdt = 0xffff;
829 pCtx->gdtr.cbGdt = 0xffff;
830
831 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
832 pCtx->ldtr.u32Limit = 0xffff;
833 pCtx->ldtr.Attr.n.u1Present = 1;
834 pCtx->ldtr.Attr.n.u4Type = X86_SEL_TYPE_SYS_LDT;
835
836 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
837 pCtx->tr.u32Limit = 0xffff;
838 pCtx->tr.Attr.n.u1Present = 1;
839 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY; /* Deduction, not properly documented by Intel. */
840
841 pCtx->dr[6] = X86_DR6_INIT_VAL;
842 pCtx->dr[7] = X86_DR7_INIT_VAL;
843
844 pCtx->fpu.FTW = 0x00; /* All empty (abbridged tag reg edition). */
845 pCtx->fpu.FCW = 0x37f;
846
847 /* Intel 64 and IA-32 Architectures Software Developer's Manual Volume 3A, Table 8-1.
848 IA-32 Processor States Following Power-up, Reset, or INIT */
849 pCtx->fpu.MXCSR = 0x1F80;
850 pCtx->fpu.MXCSR_MASK = 0xffff; /** @todo REM always changed this for us. Should probably check if the HW really
851 supports all bits, since a zero value here should be read as 0xffbf. */
852
853 /*
854 * MSRs.
855 */
856 /* Init PAT MSR */
857 pCtx->msrPAT = UINT64_C(0x0007040600070406); /** @todo correct? */
858
859 /* EFER MBZ; see AMD64 Architecture Programmer's Manual Volume 2: Table 14-1. Initial Processor State.
860 * The Intel docs don't mention it. */
861 Assert(!pCtx->msrEFER);
862
863 /* IA32_MISC_ENABLE - not entirely sure what the init/reset state really
864 is supposed to be here, just trying provide useful/sensible values. */
865 PCPUMMSRRANGE pRange = cpumLookupMsrRange(pVM, MSR_IA32_MISC_ENABLE);
866 if (pRange)
867 {
868 pVCpu->cpum.s.GuestMsrs.msr.MiscEnable = MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
869 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL
870 | (pVM->cpum.s.GuestFeatures.fMonitorMWait ? MSR_IA32_MISC_ENABLE_MONITOR : 0)
871 | MSR_IA32_MISC_ENABLE_FAST_STRINGS;
872 pRange->fWrIgnMask |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL
873 | MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
874 pRange->fWrGpMask &= ~pVCpu->cpum.s.GuestMsrs.msr.MiscEnable;
875 }
876
877 /** @todo Wire IA32_MISC_ENABLE bit 22 to our NT 4 CPUID trick. */
878
879 /** @todo r=ramshankar: Currently broken for SMP as TMCpuTickSet() expects to be
880 * called from each EMT while we're getting called by CPUMR3Reset()
881 * iteratively on the same thread. Fix later. */
882#if 0 /** @todo r=bird: This we will do in TM, not here. */
883 /* TSC must be 0. Intel spec. Table 9-1. "IA-32 Processor States Following Power-up, Reset, or INIT." */
884 CPUMSetGuestMsr(pVCpu, MSR_IA32_TSC, 0);
885#endif
886
887
888 /* C-state control. Guesses. */
889 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
890
891
892 /*
893 * Get the APIC base MSR from the APIC device. For historical reasons (saved state), the APIC base
894 * continues to reside in the APIC device and we cache it here in the VCPU for all further accesses.
895 */
896 PDMApicGetBase(pVCpu, &pCtx->msrApicBase);
897}
898
899
900/**
901 * Resets the CPU.
902 *
903 * @returns VINF_SUCCESS.
904 * @param pVM Pointer to the VM.
905 */
906VMMR3DECL(void) CPUMR3Reset(PVM pVM)
907{
908 for (VMCPUID i = 0; i < pVM->cCpus; i++)
909 {
910 CPUMR3ResetCpu(pVM, &pVM->aCpus[i]);
911
912#ifdef VBOX_WITH_CRASHDUMP_MAGIC
913 PCPUMCTX pCtx = &pVM->aCpus[i].cpum.s.Guest;
914
915 /* Magic marker for searching in crash dumps. */
916 strcpy((char *)pVM->aCpus[i].cpum.s.aMagic, "CPUMCPU Magic");
917 pVM->aCpus[i].cpum.s.uMagic = UINT64_C(0xDEADBEEFDEADBEEF);
918 pCtx->dr[5] = UINT64_C(0xDEADBEEFDEADBEEF);
919#endif
920 }
921}
922
923
924
925
926/**
927 * Pass 0 live exec callback.
928 *
929 * @returns VINF_SSM_DONT_CALL_AGAIN.
930 * @param pVM Pointer to the VM.
931 * @param pSSM The saved state handle.
932 * @param uPass The pass (0).
933 */
934static DECLCALLBACK(int) cpumR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
935{
936 AssertReturn(uPass == 0, VERR_SSM_UNEXPECTED_PASS);
937 cpumR3SaveCpuId(pVM, pSSM);
938 return VINF_SSM_DONT_CALL_AGAIN;
939}
940
941
942/**
943 * Execute state save operation.
944 *
945 * @returns VBox status code.
946 * @param pVM Pointer to the VM.
947 * @param pSSM SSM operation handle.
948 */
949static DECLCALLBACK(int) cpumR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
950{
951 /*
952 * Save.
953 */
954 for (VMCPUID i = 0; i < pVM->cCpus; i++)
955 {
956 PVMCPU pVCpu = &pVM->aCpus[i];
957 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), 0, g_aCpumCtxFields, NULL);
958 }
959
960 SSMR3PutU32(pSSM, pVM->cCpus);
961 SSMR3PutU32(pSSM, sizeof(pVM->aCpus[0].cpum.s.GuestMsrs.msr));
962 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
963 {
964 PVMCPU pVCpu = &pVM->aCpus[iCpu];
965
966 SSMR3PutStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), 0, g_aCpumCtxFields, NULL);
967 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags);
968 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged);
969 AssertCompileSizeAlignment(pVCpu->cpum.s.GuestMsrs.msr, sizeof(uint64_t));
970 SSMR3PutMem(pSSM, &pVCpu->cpum.s.GuestMsrs, sizeof(pVCpu->cpum.s.GuestMsrs.msr));
971 }
972
973 cpumR3SaveCpuId(pVM, pSSM);
974 return VINF_SUCCESS;
975}
976
977
978/**
979 * @copydoc FNSSMINTLOADPREP
980 */
981static DECLCALLBACK(int) cpumR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
982{
983 NOREF(pSSM);
984 pVM->cpum.s.fPendingRestore = true;
985 return VINF_SUCCESS;
986}
987
988
989/**
990 * @copydoc FNSSMINTLOADEXEC
991 */
992static DECLCALLBACK(int) cpumR3LoadExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
993{
994 /*
995 * Validate version.
996 */
997 if ( uVersion != CPUM_SAVED_STATE_VERSION
998 && uVersion != CPUM_SAVED_STATE_VERSION_PUT_STRUCT
999 && uVersion != CPUM_SAVED_STATE_VERSION_MEM
1000 && uVersion != CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE
1001 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_2
1002 && uVersion != CPUM_SAVED_STATE_VERSION_VER3_0
1003 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR
1004 && uVersion != CPUM_SAVED_STATE_VERSION_VER2_0
1005 && uVersion != CPUM_SAVED_STATE_VERSION_VER1_6)
1006 {
1007 AssertMsgFailed(("cpumR3LoadExec: Invalid version uVersion=%d!\n", uVersion));
1008 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
1009 }
1010
1011 if (uPass == SSM_PASS_FINAL)
1012 {
1013 /*
1014 * Set the size of RTGCPTR for SSMR3GetGCPtr. (Only necessary for
1015 * really old SSM file versions.)
1016 */
1017 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1018 SSMR3HandleSetGCPtrSize(pSSM, sizeof(RTGCPTR32));
1019 else if (uVersion <= CPUM_SAVED_STATE_VERSION_VER3_0)
1020 SSMR3HandleSetGCPtrSize(pSSM, HC_ARCH_BITS == 32 ? sizeof(RTGCPTR32) : sizeof(RTGCPTR));
1021
1022 uint32_t const fLoad = uVersion > CPUM_SAVED_STATE_VERSION_MEM ? 0 : SSMSTRUCT_FLAGS_MEM_BAND_AID_RELAXED;
1023 PCSSMFIELD paCpumCtxFields = g_aCpumCtxFields;
1024 if (uVersion == CPUM_SAVED_STATE_VERSION_VER1_6)
1025 paCpumCtxFields = g_aCpumCtxFieldsV16;
1026 else if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1027 paCpumCtxFields = g_aCpumCtxFieldsMem;
1028
1029 /*
1030 * Restore.
1031 */
1032 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1033 {
1034 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1035 uint64_t uCR3 = pVCpu->cpum.s.Hyper.cr3;
1036 uint64_t uRSP = pVCpu->cpum.s.Hyper.rsp; /* see VMMR3Relocate(). */
1037 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Hyper, sizeof(pVCpu->cpum.s.Hyper), fLoad, paCpumCtxFields, NULL);
1038 pVCpu->cpum.s.Hyper.cr3 = uCR3;
1039 pVCpu->cpum.s.Hyper.rsp = uRSP;
1040 }
1041
1042 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER2_1_NOMSR)
1043 {
1044 uint32_t cCpus;
1045 int rc = SSMR3GetU32(pSSM, &cCpus); AssertRCReturn(rc, rc);
1046 AssertLogRelMsgReturn(cCpus == pVM->cCpus, ("Mismatching CPU counts: saved: %u; configured: %u \n", cCpus, pVM->cCpus),
1047 VERR_SSM_UNEXPECTED_DATA);
1048 }
1049 AssertLogRelMsgReturn( uVersion > CPUM_SAVED_STATE_VERSION_VER2_0
1050 || pVM->cCpus == 1,
1051 ("cCpus=%u\n", pVM->cCpus),
1052 VERR_SSM_UNEXPECTED_DATA);
1053
1054 uint32_t cbMsrs = 0;
1055 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
1056 {
1057 int rc = SSMR3GetU32(pSSM, &cbMsrs); AssertRCReturn(rc, rc);
1058 AssertLogRelMsgReturn(RT_ALIGN(cbMsrs, sizeof(uint64_t)) == cbMsrs, ("Size of MSRs is misaligned: %#x\n", cbMsrs),
1059 VERR_SSM_UNEXPECTED_DATA);
1060 AssertLogRelMsgReturn(cbMsrs <= sizeof(CPUMCTXMSRS) && cbMsrs > 0, ("Size of MSRs is out of range: %#x\n", cbMsrs),
1061 VERR_SSM_UNEXPECTED_DATA);
1062 }
1063
1064 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1065 {
1066 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1067 SSMR3GetStructEx(pSSM, &pVCpu->cpum.s.Guest, sizeof(pVCpu->cpum.s.Guest), fLoad,
1068 paCpumCtxFields, NULL);
1069 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fUseFlags);
1070 SSMR3GetU32(pSSM, &pVCpu->cpum.s.fChanged);
1071 if (uVersion > CPUM_SAVED_STATE_VERSION_NO_MSR_SIZE)
1072 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], cbMsrs);
1073 else if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_0)
1074 {
1075 SSMR3GetMem(pSSM, &pVCpu->cpum.s.GuestMsrs.au64[0], 2 * sizeof(uint64_t)); /* Restore two MSRs. */
1076 SSMR3Skip(pSSM, 62 * sizeof(uint64_t));
1077 }
1078
1079 /* REM and other may have cleared must-be-one fields in DR6 and
1080 DR7, fix these. */
1081 pVCpu->cpum.s.Guest.dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);
1082 pVCpu->cpum.s.Guest.dr[6] |= X86_DR6_RA1_MASK;
1083 pVCpu->cpum.s.Guest.dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);
1084 pVCpu->cpum.s.Guest.dr[7] |= X86_DR7_RA1_MASK;
1085 }
1086
1087 /* Older states does not have the internal selector register flags
1088 and valid selector value. Supply those. */
1089 if (uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1090 {
1091 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1092 {
1093 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1094 bool const fValid = HMIsEnabled(pVM)
1095 || ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
1096 && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID));
1097 PCPUMSELREG paSelReg = CPUMCTX_FIRST_SREG(&pVCpu->cpum.s.Guest);
1098 if (fValid)
1099 {
1100 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
1101 {
1102 paSelReg[iSelReg].fFlags = CPUMSELREG_FLAGS_VALID;
1103 paSelReg[iSelReg].ValidSel = paSelReg[iSelReg].Sel;
1104 }
1105
1106 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1107 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
1108 }
1109 else
1110 {
1111 for (uint32_t iSelReg = 0; iSelReg < X86_SREG_COUNT; iSelReg++)
1112 {
1113 paSelReg[iSelReg].fFlags = 0;
1114 paSelReg[iSelReg].ValidSel = 0;
1115 }
1116
1117 /* This might not be 104% correct, but I think it's close
1118 enough for all practical purposes... (REM always loaded
1119 LDTR registers.) */
1120 pVCpu->cpum.s.Guest.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1121 pVCpu->cpum.s.Guest.ldtr.ValidSel = pVCpu->cpum.s.Guest.ldtr.Sel;
1122 }
1123 pVCpu->cpum.s.Guest.tr.fFlags = CPUMSELREG_FLAGS_VALID;
1124 pVCpu->cpum.s.Guest.tr.ValidSel = pVCpu->cpum.s.Guest.tr.Sel;
1125 }
1126 }
1127
1128 /* Clear CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID. */
1129 if ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2
1130 && uVersion <= CPUM_SAVED_STATE_VERSION_MEM)
1131 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1132 pVM->aCpus[iCpu].cpum.s.fChanged &= CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID;
1133
1134 /*
1135 * A quick sanity check.
1136 */
1137 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1138 {
1139 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1140 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.es.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1141 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.cs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1142 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ss.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1143 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.ds.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1144 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.fs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1145 AssertLogRelReturn(!(pVCpu->cpum.s.Guest.gs.fFlags & !CPUMSELREG_FLAGS_VALID_MASK), VERR_SSM_UNEXPECTED_DATA);
1146 }
1147 }
1148
1149 pVM->cpum.s.fPendingRestore = false;
1150
1151 /*
1152 * Guest CPUIDs.
1153 */
1154 if (uVersion > CPUM_SAVED_STATE_VERSION_VER3_0)
1155 return cpumR3LoadCpuId(pVM, pSSM, uVersion);
1156
1157 /** @todo Merge the code below into cpumR3LoadCpuId when we've found out what is
1158 * actually required. */
1159
1160 /*
1161 * Restore the CPUID leaves.
1162 *
1163 * Note that we support restoring less than the current amount of standard
1164 * leaves because we've been allowed more is newer version of VBox.
1165 */
1166 uint32_t cElements;
1167 int rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1168 if (cElements > RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmStd))
1169 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1170 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmStd[0], cElements*sizeof(pVM->cpum.s.aGuestCpuIdPatmStd[0]));
1171
1172 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1173 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmExt))
1174 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1175 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmExt[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmExt));
1176
1177 rc = SSMR3GetU32(pSSM, &cElements); AssertRCReturn(rc, rc);
1178 if (cElements != RT_ELEMENTS(pVM->cpum.s.aGuestCpuIdPatmCentaur))
1179 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
1180 SSMR3GetMem(pSSM, &pVM->cpum.s.aGuestCpuIdPatmCentaur[0], sizeof(pVM->cpum.s.aGuestCpuIdPatmCentaur));
1181
1182 SSMR3GetMem(pSSM, &pVM->cpum.s.GuestInfo.DefCpuId, sizeof(pVM->cpum.s.GuestInfo.DefCpuId));
1183
1184 /*
1185 * Check that the basic cpuid id information is unchanged.
1186 */
1187 /** @todo we should check the 64 bits capabilities too! */
1188 uint32_t au32CpuId[8] = {0,0,0,0, 0,0,0,0};
1189 ASMCpuIdExSlow(0, 0, 0, 0, &au32CpuId[0], &au32CpuId[1], &au32CpuId[2], &au32CpuId[3]);
1190 ASMCpuIdExSlow(1, 0, 0, 0, &au32CpuId[4], &au32CpuId[5], &au32CpuId[6], &au32CpuId[7]);
1191 uint32_t au32CpuIdSaved[8];
1192 rc = SSMR3GetMem(pSSM, &au32CpuIdSaved[0], sizeof(au32CpuIdSaved));
1193 if (RT_SUCCESS(rc))
1194 {
1195 /* Ignore CPU stepping. */
1196 au32CpuId[4] &= 0xfffffff0;
1197 au32CpuIdSaved[4] &= 0xfffffff0;
1198
1199 /* Ignore APIC ID (AMD specs). */
1200 au32CpuId[5] &= ~0xff000000;
1201 au32CpuIdSaved[5] &= ~0xff000000;
1202
1203 /* Ignore the number of Logical CPUs (AMD specs). */
1204 au32CpuId[5] &= ~0x00ff0000;
1205 au32CpuIdSaved[5] &= ~0x00ff0000;
1206
1207 /* Ignore some advanced capability bits, that we don't expose to the guest. */
1208 au32CpuId[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1209 | X86_CPUID_FEATURE_ECX_VMX
1210 | X86_CPUID_FEATURE_ECX_SMX
1211 | X86_CPUID_FEATURE_ECX_EST
1212 | X86_CPUID_FEATURE_ECX_TM2
1213 | X86_CPUID_FEATURE_ECX_CNTXID
1214 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1215 | X86_CPUID_FEATURE_ECX_PDCM
1216 | X86_CPUID_FEATURE_ECX_DCA
1217 | X86_CPUID_FEATURE_ECX_X2APIC
1218 );
1219 au32CpuIdSaved[6] &= ~( X86_CPUID_FEATURE_ECX_DTES64
1220 | X86_CPUID_FEATURE_ECX_VMX
1221 | X86_CPUID_FEATURE_ECX_SMX
1222 | X86_CPUID_FEATURE_ECX_EST
1223 | X86_CPUID_FEATURE_ECX_TM2
1224 | X86_CPUID_FEATURE_ECX_CNTXID
1225 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1226 | X86_CPUID_FEATURE_ECX_PDCM
1227 | X86_CPUID_FEATURE_ECX_DCA
1228 | X86_CPUID_FEATURE_ECX_X2APIC
1229 );
1230
1231 /* Make sure we don't forget to update the masks when enabling
1232 * features in the future.
1233 */
1234 AssertRelease(!(pVM->cpum.s.aGuestCpuIdPatmStd[1].uEcx &
1235 ( X86_CPUID_FEATURE_ECX_DTES64
1236 | X86_CPUID_FEATURE_ECX_VMX
1237 | X86_CPUID_FEATURE_ECX_SMX
1238 | X86_CPUID_FEATURE_ECX_EST
1239 | X86_CPUID_FEATURE_ECX_TM2
1240 | X86_CPUID_FEATURE_ECX_CNTXID
1241 | X86_CPUID_FEATURE_ECX_TPRUPDATE
1242 | X86_CPUID_FEATURE_ECX_PDCM
1243 | X86_CPUID_FEATURE_ECX_DCA
1244 | X86_CPUID_FEATURE_ECX_X2APIC
1245 )));
1246 /* do the compare */
1247 if (memcmp(au32CpuIdSaved, au32CpuId, sizeof(au32CpuIdSaved)))
1248 {
1249 if (SSMR3HandleGetAfter(pSSM) == SSMAFTER_DEBUG_IT)
1250 LogRel(("cpumR3LoadExec: CpuId mismatch! (ignored due to SSMAFTER_DEBUG_IT)\n"
1251 "Saved=%.*Rhxs\n"
1252 "Real =%.*Rhxs\n",
1253 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1254 sizeof(au32CpuId), au32CpuId));
1255 else
1256 {
1257 LogRel(("cpumR3LoadExec: CpuId mismatch!\n"
1258 "Saved=%.*Rhxs\n"
1259 "Real =%.*Rhxs\n",
1260 sizeof(au32CpuIdSaved), au32CpuIdSaved,
1261 sizeof(au32CpuId), au32CpuId));
1262 rc = VERR_SSM_LOAD_CPUID_MISMATCH;
1263 }
1264 }
1265 }
1266
1267 return rc;
1268}
1269
1270
1271/**
1272 * @copydoc FNSSMINTLOADPREP
1273 */
1274static DECLCALLBACK(int) cpumR3LoadDone(PVM pVM, PSSMHANDLE pSSM)
1275{
1276 if (RT_FAILURE(SSMR3HandleGetStatus(pSSM)))
1277 return VINF_SUCCESS;
1278
1279 /* just check this since we can. */ /** @todo Add a SSM unit flag for indicating that it's mandatory during a restore. */
1280 if (pVM->cpum.s.fPendingRestore)
1281 {
1282 LogRel(("CPUM: Missing state!\n"));
1283 return VERR_INTERNAL_ERROR_2;
1284 }
1285
1286 bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
1287 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++)
1288 {
1289 PVMCPU pVCpu = &pVM->aCpus[iCpu];
1290
1291 /* Notify PGM of the NXE states in case they've changed. */
1292 PGMNotifyNxeChanged(pVCpu, RT_BOOL(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_NXE));
1293
1294 /* Cache the local APIC base from the APIC device. During init. this is done in CPUMR3ResetCpu(). */
1295 PDMApicGetBase(pVCpu, &pVCpu->cpum.s.Guest.msrApicBase);
1296
1297 /* During init. this is done in CPUMR3InitCompleted(). */
1298 if (fSupportsLongMode)
1299 pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
1300 }
1301 return VINF_SUCCESS;
1302}
1303
1304
1305/**
1306 * Checks if the CPUM state restore is still pending.
1307 *
1308 * @returns true / false.
1309 * @param pVM Pointer to the VM.
1310 */
1311VMMDECL(bool) CPUMR3IsStateRestorePending(PVM pVM)
1312{
1313 return pVM->cpum.s.fPendingRestore;
1314}
1315
1316
1317/**
1318 * Formats the EFLAGS value into mnemonics.
1319 *
1320 * @param pszEFlags Where to write the mnemonics. (Assumes sufficient buffer space.)
1321 * @param efl The EFLAGS value.
1322 */
1323static void cpumR3InfoFormatFlags(char *pszEFlags, uint32_t efl)
1324{
1325 /*
1326 * Format the flags.
1327 */
1328 static const struct
1329 {
1330 const char *pszSet; const char *pszClear; uint32_t fFlag;
1331 } s_aFlags[] =
1332 {
1333 { "vip",NULL, X86_EFL_VIP },
1334 { "vif",NULL, X86_EFL_VIF },
1335 { "ac", NULL, X86_EFL_AC },
1336 { "vm", NULL, X86_EFL_VM },
1337 { "rf", NULL, X86_EFL_RF },
1338 { "nt", NULL, X86_EFL_NT },
1339 { "ov", "nv", X86_EFL_OF },
1340 { "dn", "up", X86_EFL_DF },
1341 { "ei", "di", X86_EFL_IF },
1342 { "tf", NULL, X86_EFL_TF },
1343 { "nt", "pl", X86_EFL_SF },
1344 { "nz", "zr", X86_EFL_ZF },
1345 { "ac", "na", X86_EFL_AF },
1346 { "po", "pe", X86_EFL_PF },
1347 { "cy", "nc", X86_EFL_CF },
1348 };
1349 char *psz = pszEFlags;
1350 for (unsigned i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1351 {
1352 const char *pszAdd = s_aFlags[i].fFlag & efl ? s_aFlags[i].pszSet : s_aFlags[i].pszClear;
1353 if (pszAdd)
1354 {
1355 strcpy(psz, pszAdd);
1356 psz += strlen(pszAdd);
1357 *psz++ = ' ';
1358 }
1359 }
1360 psz[-1] = '\0';
1361}
1362
1363
1364/**
1365 * Formats a full register dump.
1366 *
1367 * @param pVM Pointer to the VM.
1368 * @param pCtx The context to format.
1369 * @param pCtxCore The context core to format.
1370 * @param pHlp Output functions.
1371 * @param enmType The dump type.
1372 * @param pszPrefix Register name prefix.
1373 */
1374static void cpumR3InfoOne(PVM pVM, PCPUMCTX pCtx, PCCPUMCTXCORE pCtxCore, PCDBGFINFOHLP pHlp, CPUMDUMPTYPE enmType,
1375 const char *pszPrefix)
1376{
1377 NOREF(pVM);
1378
1379 /*
1380 * Format the EFLAGS.
1381 */
1382 uint32_t efl = pCtxCore->eflags.u32;
1383 char szEFlags[80];
1384 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1385
1386 /*
1387 * Format the registers.
1388 */
1389 switch (enmType)
1390 {
1391 case CPUMDUMPTYPE_TERSE:
1392 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1393 pHlp->pfnPrintf(pHlp,
1394 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1395 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1396 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1397 "%sr14=%016RX64 %sr15=%016RX64\n"
1398 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1399 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1400 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1401 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1402 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1403 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1404 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1405 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
1406 else
1407 pHlp->pfnPrintf(pHlp,
1408 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1409 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1410 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %seflags=%08x\n",
1411 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1412 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1413 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1414 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, efl);
1415 break;
1416
1417 case CPUMDUMPTYPE_DEFAULT:
1418 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1419 pHlp->pfnPrintf(pHlp,
1420 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1421 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1422 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1423 "%sr14=%016RX64 %sr15=%016RX64\n"
1424 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1425 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1426 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%016RX64:%04x %sldtr=%04x\n"
1427 ,
1428 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1429 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1430 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1431 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1432 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1433 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
1434 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1435 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
1436 else
1437 pHlp->pfnPrintf(pHlp,
1438 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1439 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1440 "%scs=%04x %sss=%04x %sds=%04x %ses=%04x %sfs=%04x %sgs=%04x %str=%04x %seflags=%08x\n"
1441 "%scr0=%08RX64 %scr2=%08RX64 %scr3=%08RX64 %scr4=%08RX64 %sgdtr=%08RX64:%04x %sldtr=%04x\n"
1442 ,
1443 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1444 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1445 pszPrefix, pCtxCore->cs.Sel, pszPrefix, pCtxCore->ss.Sel, pszPrefix, pCtxCore->ds.Sel, pszPrefix, pCtxCore->es.Sel,
1446 pszPrefix, pCtxCore->fs.Sel, pszPrefix, pCtxCore->gs.Sel, pszPrefix, pCtx->tr.Sel, pszPrefix, efl,
1447 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1448 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->ldtr.Sel);
1449 break;
1450
1451 case CPUMDUMPTYPE_VERBOSE:
1452 if (CPUMIsGuestIn64BitCodeEx(pCtx))
1453 pHlp->pfnPrintf(pHlp,
1454 "%srax=%016RX64 %srbx=%016RX64 %srcx=%016RX64 %srdx=%016RX64\n"
1455 "%srsi=%016RX64 %srdi=%016RX64 %sr8 =%016RX64 %sr9 =%016RX64\n"
1456 "%sr10=%016RX64 %sr11=%016RX64 %sr12=%016RX64 %sr13=%016RX64\n"
1457 "%sr14=%016RX64 %sr15=%016RX64\n"
1458 "%srip=%016RX64 %srsp=%016RX64 %srbp=%016RX64 %siopl=%d %*s\n"
1459 "%scs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1460 "%sds={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1461 "%ses={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1462 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1463 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1464 "%sss={%04x base=%016RX64 limit=%08x flags=%08x}\n"
1465 "%scr0=%016RX64 %scr2=%016RX64 %scr3=%016RX64 %scr4=%016RX64\n"
1466 "%sdr0=%016RX64 %sdr1=%016RX64 %sdr2=%016RX64 %sdr3=%016RX64\n"
1467 "%sdr4=%016RX64 %sdr5=%016RX64 %sdr6=%016RX64 %sdr7=%016RX64\n"
1468 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1469 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1470 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1471 "%sSysEnter={cs=%04llx eip=%016RX64 esp=%016RX64}\n"
1472 ,
1473 pszPrefix, pCtxCore->rax, pszPrefix, pCtxCore->rbx, pszPrefix, pCtxCore->rcx, pszPrefix, pCtxCore->rdx, pszPrefix, pCtxCore->rsi, pszPrefix, pCtxCore->rdi,
1474 pszPrefix, pCtxCore->r8, pszPrefix, pCtxCore->r9, pszPrefix, pCtxCore->r10, pszPrefix, pCtxCore->r11, pszPrefix, pCtxCore->r12, pszPrefix, pCtxCore->r13,
1475 pszPrefix, pCtxCore->r14, pszPrefix, pCtxCore->r15,
1476 pszPrefix, pCtxCore->rip, pszPrefix, pCtxCore->rsp, pszPrefix, pCtxCore->rbp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1477 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u,
1478 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u,
1479 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u,
1480 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u,
1481 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u,
1482 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u,
1483 pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1484 pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1], pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1485 pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5], pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1486 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1487 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1488 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1489 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1490 else
1491 pHlp->pfnPrintf(pHlp,
1492 "%seax=%08x %sebx=%08x %secx=%08x %sedx=%08x %sesi=%08x %sedi=%08x\n"
1493 "%seip=%08x %sesp=%08x %sebp=%08x %siopl=%d %*s\n"
1494 "%scs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr0=%08RX64 %sdr1=%08RX64\n"
1495 "%sds={%04x base=%016RX64 limit=%08x flags=%08x} %sdr2=%08RX64 %sdr3=%08RX64\n"
1496 "%ses={%04x base=%016RX64 limit=%08x flags=%08x} %sdr4=%08RX64 %sdr5=%08RX64\n"
1497 "%sfs={%04x base=%016RX64 limit=%08x flags=%08x} %sdr6=%08RX64 %sdr7=%08RX64\n"
1498 "%sgs={%04x base=%016RX64 limit=%08x flags=%08x} %scr0=%08RX64 %scr2=%08RX64\n"
1499 "%sss={%04x base=%016RX64 limit=%08x flags=%08x} %scr3=%08RX64 %scr4=%08RX64\n"
1500 "%sgdtr=%016RX64:%04x %sidtr=%016RX64:%04x %seflags=%08x\n"
1501 "%sldtr={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1502 "%str ={%04x base=%08RX64 limit=%08x flags=%08x}\n"
1503 "%sSysEnter={cs=%04llx eip=%08llx esp=%08llx}\n"
1504 ,
1505 pszPrefix, pCtxCore->eax, pszPrefix, pCtxCore->ebx, pszPrefix, pCtxCore->ecx, pszPrefix, pCtxCore->edx, pszPrefix, pCtxCore->esi, pszPrefix, pCtxCore->edi,
1506 pszPrefix, pCtxCore->eip, pszPrefix, pCtxCore->esp, pszPrefix, pCtxCore->ebp, pszPrefix, X86_EFL_GET_IOPL(efl), *pszPrefix ? 33 : 31, szEFlags,
1507 pszPrefix, pCtxCore->cs.Sel, pCtx->cs.u64Base, pCtx->cs.u32Limit, pCtx->cs.Attr.u, pszPrefix, pCtx->dr[0], pszPrefix, pCtx->dr[1],
1508 pszPrefix, pCtxCore->ds.Sel, pCtx->ds.u64Base, pCtx->ds.u32Limit, pCtx->ds.Attr.u, pszPrefix, pCtx->dr[2], pszPrefix, pCtx->dr[3],
1509 pszPrefix, pCtxCore->es.Sel, pCtx->es.u64Base, pCtx->es.u32Limit, pCtx->es.Attr.u, pszPrefix, pCtx->dr[4], pszPrefix, pCtx->dr[5],
1510 pszPrefix, pCtxCore->fs.Sel, pCtx->fs.u64Base, pCtx->fs.u32Limit, pCtx->fs.Attr.u, pszPrefix, pCtx->dr[6], pszPrefix, pCtx->dr[7],
1511 pszPrefix, pCtxCore->gs.Sel, pCtx->gs.u64Base, pCtx->gs.u32Limit, pCtx->gs.Attr.u, pszPrefix, pCtx->cr0, pszPrefix, pCtx->cr2,
1512 pszPrefix, pCtxCore->ss.Sel, pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u, pszPrefix, pCtx->cr3, pszPrefix, pCtx->cr4,
1513 pszPrefix, pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, pszPrefix, pCtx->idtr.pIdt, pCtx->idtr.cbIdt, pszPrefix, efl,
1514 pszPrefix, pCtx->ldtr.Sel, pCtx->ldtr.u64Base, pCtx->ldtr.u32Limit, pCtx->ldtr.Attr.u,
1515 pszPrefix, pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
1516 pszPrefix, pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1517
1518 pHlp->pfnPrintf(pHlp,
1519 "%sFCW=%04x %sFSW=%04x %sFTW=%04x %sFOP=%04x %sMXCSR=%08x %sMXCSR_MASK=%08x\n"
1520 "%sFPUIP=%08x %sCS=%04x %sRsrvd1=%04x %sFPUDP=%08x %sDS=%04x %sRsvrd2=%04x\n"
1521 ,
1522 pszPrefix, pCtx->fpu.FCW, pszPrefix, pCtx->fpu.FSW, pszPrefix, pCtx->fpu.FTW, pszPrefix, pCtx->fpu.FOP,
1523 pszPrefix, pCtx->fpu.MXCSR, pszPrefix, pCtx->fpu.MXCSR_MASK,
1524 pszPrefix, pCtx->fpu.FPUIP, pszPrefix, pCtx->fpu.CS, pszPrefix, pCtx->fpu.Rsrvd1,
1525 pszPrefix, pCtx->fpu.FPUDP, pszPrefix, pCtx->fpu.DS, pszPrefix, pCtx->fpu.Rsrvd2
1526 );
1527 unsigned iShift = (pCtx->fpu.FSW >> 11) & 7;
1528 for (unsigned iST = 0; iST < RT_ELEMENTS(pCtx->fpu.aRegs); iST++)
1529 {
1530 unsigned iFPR = (iST + iShift) % RT_ELEMENTS(pCtx->fpu.aRegs);
1531 unsigned uTag = pCtx->fpu.FTW & (1 << iFPR) ? 1 : 0;
1532 char chSign = pCtx->fpu.aRegs[0].au16[4] & 0x8000 ? '-' : '+';
1533 unsigned iInteger = (unsigned)(pCtx->fpu.aRegs[0].au64[0] >> 63);
1534 uint64_t u64Fraction = pCtx->fpu.aRegs[0].au64[0] & UINT64_C(0x7fffffffffffffff);
1535 unsigned uExponent = pCtx->fpu.aRegs[0].au16[4] & 0x7fff;
1536 /** @todo This isn't entirenly correct and needs more work! */
1537 pHlp->pfnPrintf(pHlp,
1538 "%sST(%u)=%sFPR%u={%04RX16'%08RX32'%08RX32} t%d %c%u.%022llu ^ %u",
1539 pszPrefix, iST, pszPrefix, iFPR,
1540 pCtx->fpu.aRegs[0].au16[4], pCtx->fpu.aRegs[0].au32[1], pCtx->fpu.aRegs[0].au32[0],
1541 uTag, chSign, iInteger, u64Fraction, uExponent);
1542 if (pCtx->fpu.aRegs[0].au16[5] || pCtx->fpu.aRegs[0].au16[6] || pCtx->fpu.aRegs[0].au16[7])
1543 pHlp->pfnPrintf(pHlp, " res={%04RX16,%04RX16,%04RX16}\n",
1544 pCtx->fpu.aRegs[0].au16[5], pCtx->fpu.aRegs[0].au16[6], pCtx->fpu.aRegs[0].au16[7]);
1545 else
1546 pHlp->pfnPrintf(pHlp, "\n");
1547 }
1548 for (unsigned iXMM = 0; iXMM < RT_ELEMENTS(pCtx->fpu.aXMM); iXMM++)
1549 pHlp->pfnPrintf(pHlp,
1550 iXMM & 1
1551 ? "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32\n"
1552 : "%sXMM%u%s=%08RX32'%08RX32'%08RX32'%08RX32 ",
1553 pszPrefix, iXMM, iXMM < 10 ? " " : "",
1554 pCtx->fpu.aXMM[iXMM].au32[3],
1555 pCtx->fpu.aXMM[iXMM].au32[2],
1556 pCtx->fpu.aXMM[iXMM].au32[1],
1557 pCtx->fpu.aXMM[iXMM].au32[0]);
1558 for (unsigned i = 0; i < RT_ELEMENTS(pCtx->fpu.au32RsrvdRest); i++)
1559 if (pCtx->fpu.au32RsrvdRest[i])
1560 pHlp->pfnPrintf(pHlp, "%sRsrvdRest[i]=%RX32 (offset=%#x)\n",
1561 pszPrefix, i, pCtx->fpu.au32RsrvdRest[i], RT_OFFSETOF(X86FXSTATE, au32RsrvdRest[i]) );
1562
1563 pHlp->pfnPrintf(pHlp,
1564 "%sEFER =%016RX64\n"
1565 "%sPAT =%016RX64\n"
1566 "%sSTAR =%016RX64\n"
1567 "%sCSTAR =%016RX64\n"
1568 "%sLSTAR =%016RX64\n"
1569 "%sSFMASK =%016RX64\n"
1570 "%sKERNELGSBASE =%016RX64\n",
1571 pszPrefix, pCtx->msrEFER,
1572 pszPrefix, pCtx->msrPAT,
1573 pszPrefix, pCtx->msrSTAR,
1574 pszPrefix, pCtx->msrCSTAR,
1575 pszPrefix, pCtx->msrLSTAR,
1576 pszPrefix, pCtx->msrSFMASK,
1577 pszPrefix, pCtx->msrKERNELGSBASE);
1578 break;
1579 }
1580}
1581
1582
1583/**
1584 * Display all cpu states and any other cpum info.
1585 *
1586 * @param pVM Pointer to the VM.
1587 * @param pHlp The info helper functions.
1588 * @param pszArgs Arguments, ignored.
1589 */
1590static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1591{
1592 cpumR3InfoGuest(pVM, pHlp, pszArgs);
1593 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs);
1594 cpumR3InfoHyper(pVM, pHlp, pszArgs);
1595 cpumR3InfoHost(pVM, pHlp, pszArgs);
1596}
1597
1598
1599/**
1600 * Parses the info argument.
1601 *
1602 * The argument starts with 'verbose', 'terse' or 'default' and then
1603 * continues with the comment string.
1604 *
1605 * @param pszArgs The pointer to the argument string.
1606 * @param penmType Where to store the dump type request.
1607 * @param ppszComment Where to store the pointer to the comment string.
1608 */
1609static void cpumR3InfoParseArg(const char *pszArgs, CPUMDUMPTYPE *penmType, const char **ppszComment)
1610{
1611 if (!pszArgs)
1612 {
1613 *penmType = CPUMDUMPTYPE_DEFAULT;
1614 *ppszComment = "";
1615 }
1616 else
1617 {
1618 if (!strncmp(pszArgs, RT_STR_TUPLE("verbose")))
1619 {
1620 pszArgs += 7;
1621 *penmType = CPUMDUMPTYPE_VERBOSE;
1622 }
1623 else if (!strncmp(pszArgs, RT_STR_TUPLE("terse")))
1624 {
1625 pszArgs += 5;
1626 *penmType = CPUMDUMPTYPE_TERSE;
1627 }
1628 else if (!strncmp(pszArgs, RT_STR_TUPLE("default")))
1629 {
1630 pszArgs += 7;
1631 *penmType = CPUMDUMPTYPE_DEFAULT;
1632 }
1633 else
1634 *penmType = CPUMDUMPTYPE_DEFAULT;
1635 *ppszComment = RTStrStripL(pszArgs);
1636 }
1637}
1638
1639
1640/**
1641 * Display the guest cpu state.
1642 *
1643 * @param pVM Pointer to the VM.
1644 * @param pHlp The info helper functions.
1645 * @param pszArgs Arguments, ignored.
1646 */
1647static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1648{
1649 CPUMDUMPTYPE enmType;
1650 const char *pszComment;
1651 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1652
1653 /* @todo SMP support! */
1654 PVMCPU pVCpu = VMMGetCpu(pVM);
1655 if (!pVCpu)
1656 pVCpu = &pVM->aCpus[0];
1657
1658 pHlp->pfnPrintf(pHlp, "Guest CPUM (VCPU %d) state: %s\n", pVCpu->idCpu, pszComment);
1659
1660 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest;
1661 cpumR3InfoOne(pVM, pCtx, CPUMCTX2CORE(pCtx), pHlp, enmType, "");
1662}
1663
1664
1665/**
1666 * Display the current guest instruction
1667 *
1668 * @param pVM Pointer to the VM.
1669 * @param pHlp The info helper functions.
1670 * @param pszArgs Arguments, ignored.
1671 */
1672static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1673{
1674 NOREF(pszArgs);
1675
1676 /** @todo SMP support! */
1677 PVMCPU pVCpu = VMMGetCpu(pVM);
1678 if (!pVCpu)
1679 pVCpu = &pVM->aCpus[0];
1680
1681 char szInstruction[256];
1682 szInstruction[0] = '\0';
1683 DBGFR3DisasInstrCurrent(pVCpu, szInstruction, sizeof(szInstruction));
1684 pHlp->pfnPrintf(pHlp, "\nCPUM: %s\n\n", szInstruction);
1685}
1686
1687
1688/**
1689 * Display the hypervisor cpu state.
1690 *
1691 * @param pVM Pointer to the VM.
1692 * @param pHlp The info helper functions.
1693 * @param pszArgs Arguments, ignored.
1694 */
1695static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1696{
1697 CPUMDUMPTYPE enmType;
1698 const char *pszComment;
1699 /* @todo SMP */
1700 PVMCPU pVCpu = &pVM->aCpus[0];
1701
1702 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1703 pHlp->pfnPrintf(pHlp, "Hypervisor CPUM state: %s\n", pszComment);
1704 cpumR3InfoOne(pVM, &pVCpu->cpum.s.Hyper, CPUMCTX2CORE(&pVCpu->cpum.s.Hyper), pHlp, enmType, ".");
1705 pHlp->pfnPrintf(pHlp, "CR4OrMask=%#x CR4AndMask=%#x\n", pVM->cpum.s.CR4.OrMask, pVM->cpum.s.CR4.AndMask);
1706}
1707
1708
1709/**
1710 * Display the host cpu state.
1711 *
1712 * @param pVM Pointer to the VM.
1713 * @param pHlp The info helper functions.
1714 * @param pszArgs Arguments, ignored.
1715 */
1716static DECLCALLBACK(void) cpumR3InfoHost(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1717{
1718 CPUMDUMPTYPE enmType;
1719 const char *pszComment;
1720 cpumR3InfoParseArg(pszArgs, &enmType, &pszComment);
1721 pHlp->pfnPrintf(pHlp, "Host CPUM state: %s\n", pszComment);
1722
1723 /*
1724 * Format the EFLAGS.
1725 */
1726 /* @todo SMP */
1727 PCPUMHOSTCTX pCtx = &pVM->aCpus[0].cpum.s.Host;
1728#if HC_ARCH_BITS == 32
1729 uint32_t efl = pCtx->eflags.u32;
1730#else
1731 uint64_t efl = pCtx->rflags;
1732#endif
1733 char szEFlags[80];
1734 cpumR3InfoFormatFlags(&szEFlags[0], efl);
1735
1736 /*
1737 * Format the registers.
1738 */
1739#if HC_ARCH_BITS == 32
1740# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1741 if (!(pCtx->efer & MSR_K6_EFER_LMA))
1742# endif
1743 {
1744 pHlp->pfnPrintf(pHlp,
1745 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n"
1746 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n"
1747 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n"
1748 "cr0=%08RX64 cr2=xxxxxxxx cr3=%08RX64 cr4=%08RX64 gdtr=%08x:%04x ldtr=%04x\n"
1749 "dr[0]=%08RX64 dr[1]=%08RX64x dr[2]=%08RX64 dr[3]=%08RX64x dr[6]=%08RX64 dr[7]=%08RX64\n"
1750 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1751 ,
1752 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi,
1753 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags,
1754 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
1755 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4,
1756 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7,
1757 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->ldtr,
1758 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp);
1759 }
1760# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1761 else
1762# endif
1763#endif
1764#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1765 {
1766 pHlp->pfnPrintf(pHlp,
1767 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n"
1768 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n"
1769 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n"
1770 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n"
1771 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n"
1772 "r14=%016RX64 r15=%016RX64\n"
1773 "iopl=%d %31s\n"
1774 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n"
1775 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n"
1776 "cr4=%016RX64 ldtr=%04x tr=%04x\n"
1777 "dr[0]=%016RX64 dr[1]=%016RX64 dr[2]=%016RX64\n"
1778 "dr[3]=%016RX64 dr[6]=%016RX64 dr[7]=%016RX64\n"
1779 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n"
1780 "SysEnter={cs=%04x eip=%08x esp=%08x}\n"
1781 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n"
1782 ,
1783 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx,
1784 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi,
1785 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp,
1786 /*pCtx->r8, pCtx->r9,*/ pCtx->r10,
1787 pCtx->r11, pCtx->r12, pCtx->r13,
1788 pCtx->r14, pCtx->r15,
1789 X86_EFL_GET_IOPL(efl), szEFlags,
1790 pCtx->cs, pCtx->ds, pCtx->es, pCtx->fs, pCtx->gs, efl,
1791 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3,
1792 pCtx->cr4, pCtx->ldtr, pCtx->tr,
1793 pCtx->dr0, pCtx->dr1, pCtx->dr2,
1794 pCtx->dr3, pCtx->dr6, pCtx->dr7,
1795 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb,
1796 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp,
1797 pCtx->FSbase, pCtx->GSbase, pCtx->efer);
1798 }
1799#endif
1800}
1801
1802/**
1803 * Structure used when disassembling and instructions in DBGF.
1804 * This is used so the reader function can get the stuff it needs.
1805 */
1806typedef struct CPUMDISASSTATE
1807{
1808 /** Pointer to the CPU structure. */
1809 PDISCPUSTATE pCpu;
1810 /** Pointer to the VM. */
1811 PVM pVM;
1812 /** Pointer to the VMCPU. */
1813 PVMCPU pVCpu;
1814 /** Pointer to the first byte in the segment. */
1815 RTGCUINTPTR GCPtrSegBase;
1816 /** Pointer to the byte after the end of the segment. (might have wrapped!) */
1817 RTGCUINTPTR GCPtrSegEnd;
1818 /** The size of the segment minus 1. */
1819 RTGCUINTPTR cbSegLimit;
1820 /** Pointer to the current page - R3 Ptr. */
1821 void const *pvPageR3;
1822 /** Pointer to the current page - GC Ptr. */
1823 RTGCPTR pvPageGC;
1824 /** The lock information that PGMPhysReleasePageMappingLock needs. */
1825 PGMPAGEMAPLOCK PageMapLock;
1826 /** Whether the PageMapLock is valid or not. */
1827 bool fLocked;
1828 /** 64 bits mode or not. */
1829 bool f64Bits;
1830} CPUMDISASSTATE, *PCPUMDISASSTATE;
1831
1832
1833/**
1834 * @callback_method_impl{FNDISREADBYTES}
1835 */
1836static DECLCALLBACK(int) cpumR3DisasInstrRead(PDISCPUSTATE pDis, uint8_t offInstr, uint8_t cbMinRead, uint8_t cbMaxRead)
1837{
1838 PCPUMDISASSTATE pState = (PCPUMDISASSTATE)pDis->pvUser;
1839 for (;;)
1840 {
1841 RTGCUINTPTR GCPtr = pDis->uInstrAddr + offInstr + pState->GCPtrSegBase;
1842
1843 /*
1844 * Need to update the page translation?
1845 */
1846 if ( !pState->pvPageR3
1847 || (GCPtr >> PAGE_SHIFT) != (pState->pvPageGC >> PAGE_SHIFT))
1848 {
1849 int rc = VINF_SUCCESS;
1850
1851 /* translate the address */
1852 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK;
1853 if ( !HMIsEnabled(pState->pVM)
1854 && MMHyperIsInsideArea(pState->pVM, pState->pvPageGC))
1855 {
1856 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC);
1857 if (!pState->pvPageR3)
1858 rc = VERR_INVALID_POINTER;
1859 }
1860 else
1861 {
1862 /* Release mapping lock previously acquired. */
1863 if (pState->fLocked)
1864 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock);
1865 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->pvPageGC, &pState->pvPageR3, &pState->PageMapLock);
1866 pState->fLocked = RT_SUCCESS_NP(rc);
1867 }
1868 if (RT_FAILURE(rc))
1869 {
1870 pState->pvPageR3 = NULL;
1871 return rc;
1872 }
1873 }
1874
1875 /*
1876 * Check the segment limit.
1877 */
1878 if (!pState->f64Bits && pDis->uInstrAddr + offInstr > pState->cbSegLimit)
1879 return VERR_OUT_OF_SELECTOR_BOUNDS;
1880
1881 /*
1882 * Calc how much we can read.
1883 */
1884 uint32_t cb = PAGE_SIZE - (GCPtr & PAGE_OFFSET_MASK);
1885 if (!pState->f64Bits)
1886 {
1887 RTGCUINTPTR cbSeg = pState->GCPtrSegEnd - GCPtr;
1888 if (cb > cbSeg && cbSeg)
1889 cb = cbSeg;
1890 }
1891 if (cb > cbMaxRead)
1892 cb = cbMaxRead;
1893
1894 /*
1895 * Read and advance or exit.
1896 */
1897 memcpy(&pDis->abInstr[offInstr], (uint8_t *)pState->pvPageR3 + (GCPtr & PAGE_OFFSET_MASK), cb);
1898 offInstr += (uint8_t)cb;
1899 if (cb >= cbMinRead)
1900 {
1901 pDis->cbCachedInstr = offInstr;
1902 return VINF_SUCCESS;
1903 }
1904 cbMinRead -= (uint8_t)cb;
1905 cbMaxRead -= (uint8_t)cb;
1906 }
1907}
1908
1909
1910/**
1911 * Disassemble an instruction and return the information in the provided structure.
1912 *
1913 * @returns VBox status code.
1914 * @param pVM Pointer to the VM.
1915 * @param pVCpu Pointer to the VMCPU.
1916 * @param pCtx Pointer to the guest CPU context.
1917 * @param GCPtrPC Program counter (relative to CS) to disassemble from.
1918 * @param pCpu Disassembly state.
1919 * @param pszPrefix String prefix for logging (debug only).
1920 *
1921 */
1922VMMR3DECL(int) CPUMR3DisasmInstrCPU(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPTR GCPtrPC, PDISCPUSTATE pCpu, const char *pszPrefix)
1923{
1924 CPUMDISASSTATE State;
1925 int rc;
1926
1927 const PGMMODE enmMode = PGMGetGuestMode(pVCpu);
1928 State.pCpu = pCpu;
1929 State.pvPageGC = 0;
1930 State.pvPageR3 = NULL;
1931 State.pVM = pVM;
1932 State.pVCpu = pVCpu;
1933 State.fLocked = false;
1934 State.f64Bits = false;
1935
1936 /*
1937 * Get selector information.
1938 */
1939 DISCPUMODE enmDisCpuMode;
1940 if ( (pCtx->cr0 & X86_CR0_PE)
1941 && pCtx->eflags.Bits.u1VM == 0)
1942 {
1943 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
1944 {
1945# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1946 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtx->cs);
1947# endif
1948 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs))
1949 return VERR_CPUM_HIDDEN_CS_LOAD_ERROR;
1950 }
1951 State.f64Bits = enmMode >= PGMMODE_AMD64 && pCtx->cs.Attr.n.u1Long;
1952 State.GCPtrSegBase = pCtx->cs.u64Base;
1953 State.GCPtrSegEnd = pCtx->cs.u32Limit + 1 + (RTGCUINTPTR)pCtx->cs.u64Base;
1954 State.cbSegLimit = pCtx->cs.u32Limit;
1955 enmDisCpuMode = (State.f64Bits)
1956 ? DISCPUMODE_64BIT
1957 : pCtx->cs.Attr.n.u1DefBig
1958 ? DISCPUMODE_32BIT
1959 : DISCPUMODE_16BIT;
1960 }
1961 else
1962 {
1963 /* real or V86 mode */
1964 enmDisCpuMode = DISCPUMODE_16BIT;
1965 State.GCPtrSegBase = pCtx->cs.Sel * 16;
1966 State.GCPtrSegEnd = 0xFFFFFFFF;
1967 State.cbSegLimit = 0xFFFFFFFF;
1968 }
1969
1970 /*
1971 * Disassemble the instruction.
1972 */
1973 uint32_t cbInstr;
1974#ifndef LOG_ENABLED
1975 rc = DISInstrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State, pCpu, &cbInstr);
1976 if (RT_SUCCESS(rc))
1977 {
1978#else
1979 char szOutput[160];
1980 rc = DISInstrToStrWithReader(GCPtrPC, enmDisCpuMode, cpumR3DisasInstrRead, &State,
1981 pCpu, &cbInstr, szOutput, sizeof(szOutput));
1982 if (RT_SUCCESS(rc))
1983 {
1984 /* log it */
1985 if (pszPrefix)
1986 Log(("%s-CPU%d: %s", pszPrefix, pVCpu->idCpu, szOutput));
1987 else
1988 Log(("%s", szOutput));
1989#endif
1990 rc = VINF_SUCCESS;
1991 }
1992 else
1993 Log(("CPUMR3DisasmInstrCPU: DISInstr failed for %04X:%RGv rc=%Rrc\n", pCtx->cs.Sel, GCPtrPC, rc));
1994
1995 /* Release mapping lock acquired in cpumR3DisasInstrRead. */
1996 if (State.fLocked)
1997 PGMPhysReleasePageMappingLock(pVM, &State.PageMapLock);
1998
1999 return rc;
2000}
2001
2002
2003
2004/**
2005 * API for controlling a few of the CPU features found in CR4.
2006 *
2007 * Currently only X86_CR4_TSD is accepted as input.
2008 *
2009 * @returns VBox status code.
2010 *
2011 * @param pVM Pointer to the VM.
2012 * @param fOr The CR4 OR mask.
2013 * @param fAnd The CR4 AND mask.
2014 */
2015VMMR3DECL(int) CPUMR3SetCR4Feature(PVM pVM, RTHCUINTREG fOr, RTHCUINTREG fAnd)
2016{
2017 AssertMsgReturn(!(fOr & ~(X86_CR4_TSD)), ("%#x\n", fOr), VERR_INVALID_PARAMETER);
2018 AssertMsgReturn((fAnd & ~(X86_CR4_TSD)) == ~(X86_CR4_TSD), ("%#x\n", fAnd), VERR_INVALID_PARAMETER);
2019
2020 pVM->cpum.s.CR4.OrMask &= fAnd;
2021 pVM->cpum.s.CR4.OrMask |= fOr;
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Enters REM, gets and resets the changed flags (CPUM_CHANGED_*).
2029 *
2030 * Only REM should ever call this function!
2031 *
2032 * @returns The changed flags.
2033 * @param pVCpu Pointer to the VMCPU.
2034 * @param puCpl Where to return the current privilege level (CPL).
2035 */
2036VMMR3DECL(uint32_t) CPUMR3RemEnter(PVMCPU pVCpu, uint32_t *puCpl)
2037{
2038 Assert(!pVCpu->cpum.s.fRawEntered);
2039 Assert(!pVCpu->cpum.s.fRemEntered);
2040
2041 /*
2042 * Get the CPL first.
2043 */
2044 *puCpl = CPUMGetGuestCPL(pVCpu);
2045
2046 /*
2047 * Get and reset the flags.
2048 */
2049 uint32_t fFlags = pVCpu->cpum.s.fChanged;
2050 pVCpu->cpum.s.fChanged = 0;
2051
2052 /** @todo change the switcher to use the fChanged flags. */
2053 if (pVCpu->cpum.s.fUseFlags & CPUM_USED_FPU_SINCE_REM)
2054 {
2055 fFlags |= CPUM_CHANGED_FPU_REM;
2056 pVCpu->cpum.s.fUseFlags &= ~CPUM_USED_FPU_SINCE_REM;
2057 }
2058
2059 pVCpu->cpum.s.fRemEntered = true;
2060 return fFlags;
2061}
2062
2063
2064/**
2065 * Leaves REM.
2066 *
2067 * @param pVCpu Pointer to the VMCPU.
2068 * @param fNoOutOfSyncSels This is @c false if there are out of sync
2069 * registers.
2070 */
2071VMMR3DECL(void) CPUMR3RemLeave(PVMCPU pVCpu, bool fNoOutOfSyncSels)
2072{
2073 Assert(!pVCpu->cpum.s.fRawEntered);
2074 Assert(pVCpu->cpum.s.fRemEntered);
2075
2076 pVCpu->cpum.s.fRemEntered = false;
2077}
2078
2079
2080/**
2081 * Called when the ring-3 init phase completes.
2082 *
2083 * @returns VBox status code.
2084 * @param pVM Pointer to the VM.
2085 */
2086VMMR3DECL(int) CPUMR3InitCompleted(PVM pVM)
2087{
2088 /*
2089 * Figure out if the guest uses 32-bit or 64-bit FPU state at runtime for 64-bit capable VMs.
2090 * Only applicable/used on 64-bit hosts, refer CPUMR0A.asm. See @bugref{7138}.
2091 */
2092 bool const fSupportsLongMode = VMR3IsLongModeAllowed(pVM);
2093 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2094 {
2095 PVMCPU pVCpu = &pVM->aCpus[i];
2096
2097 /* Cache the APIC base (from the APIC device) once it has been initialized. */
2098 PDMApicGetBase(pVCpu, &pVCpu->cpum.s.Guest.msrApicBase);
2099 Log(("CPUMR3InitCompleted pVM=%p APIC base[%u]=%RX64\n", pVM, (unsigned)i, pVCpu->cpum.s.Guest.msrApicBase));
2100
2101 /* While loading a saved-state we fix it up in, cpumR3LoadDone(). */
2102 if (fSupportsLongMode)
2103 pVCpu->cpum.s.fUseFlags |= CPUM_USE_SUPPORTS_LONGMODE;
2104 }
2105 return VINF_SUCCESS;
2106}
2107
2108
2109/**
2110 * Called when the ring-0 init phases comleted.
2111 *
2112 * @param pVM Pointer to the VM.
2113 */
2114VMMR3DECL(void) CPUMR3LogCpuIds(PVM pVM)
2115{
2116 /*
2117 * Log the cpuid.
2118 */
2119 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/);
2120 RTCPUSET OnlineSet;
2121 LogRel(("Logical host processors: %u present, %u max, %u online, online mask: %016RX64\n",
2122 (unsigned)RTMpGetPresentCount(), (unsigned)RTMpGetCount(), (unsigned)RTMpGetOnlineCount(),
2123 RTCpuSetToU64(RTMpGetOnlineSet(&OnlineSet)) ));
2124 RTCPUID cCores = RTMpGetCoreCount();
2125 if (cCores)
2126 LogRel(("Physical host cores: %u\n", (unsigned)cCores));
2127 LogRel(("************************* CPUID dump ************************\n"));
2128 DBGFR3Info(pVM->pUVM, "cpuid", "verbose", DBGFR3InfoLogRelHlp());
2129 LogRel(("\n"));
2130 DBGFR3_INFO_LOG(pVM, "cpuid", "verbose"); /* macro */
2131 RTLogRelSetBuffering(fOldBuffered);
2132 LogRel(("******************** End of CPUID dump **********************\n"));
2133}
2134
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette