VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 97231

最後變更 在這個檔案從97231是 97213,由 vboxsync 提交於 2 年 前

VMM,VBox/types.h: Removed the CPUMCTXCORE type.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 24.1 KB
 
1; $Id: CPUMInternal.mac 97213 2022-10-18 15:00:16Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2022 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.alldomusa.eu.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28%include "VBox/asmdefs.mac"
29%include "VBox/vmm/cpum.mac"
30
31;; Check sanity.
32%ifdef VBOX_WITH_KERNEL_USING_XMM
33 %ifndef IN_RING0
34 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
35 %endif
36%endif
37
38;; For numeric expressions
39%ifdef RT_ARCH_AMD64
40 %define CPUM_IS_AMD64 1
41%else
42 %define CPUM_IS_AMD64 0
43%endif
44
45
46;;
47; CPU info
48struc CPUMINFO
49 .cMsrRanges resd 1 ; uint32_t
50 .fMsrMask resd 1 ; uint32_t
51 .fMxCsrMask resd 1 ; uint32_t
52 .cCpuIdLeaves resd 1 ; uint32_t
53 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
54 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
55 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
56 .uScalableBusFreq resq 1 ; uint64_t
57 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
58 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
59 .aCpuIdLeaves resb 256*32
60 .aMsrRanges resb 8192*128
61endstruc
62
63
64%define CPUM_USED_FPU_HOST RT_BIT(0)
65%define CPUM_USED_FPU_GUEST RT_BIT(10)
66%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
67%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
68%define CPUM_USE_SYSENTER RT_BIT(3)
69%define CPUM_USE_SYSCALL RT_BIT(4)
70%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
71%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
72%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
73%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
74%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
75%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
76%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
77
78
79struc CPUM
80 ;...
81 .fHostUseFlags resd 1
82
83 ; CR4 masks
84 .CR4.AndMask resd 1
85 .CR4.OrMask resd 1
86 .u8PortableCpuIdLevel resb 1
87 .fPendingRestore resb 1
88
89 alignb 8
90 .fXStateGuestMask resq 1
91 .fXStateHostMask resq 1
92
93 alignb 64
94 .HostFeatures resb 48
95 .GuestFeatures resb 48
96 .GuestInfo resb CPUMINFO_size
97
98 ; Patch manager saved state compatability CPUID leaf arrays
99 .aGuestCpuIdPatmStd resb 16*6
100 .aGuestCpuIdPatmExt resb 16*10
101 .aGuestCpuIdPatmCentaur resb 16*4
102
103 alignb 8
104 .cMsrWrites resq 1
105 .cMsrWritesToIgnoredBits resq 1
106 .cMsrWritesRaiseGp resq 1
107 .cMsrWritesUnknown resq 1
108 .cMsrReads resq 1
109 .cMsrReadsRaiseGp resq 1
110 .cMsrReadsUnknown resq 1
111endstruc
112
113struc CPUMCPU
114 ;
115 ; Guest context state
116 ;
117 .Guest resq 0
118 .Guest.eax resq 1
119 .Guest.ecx resq 1
120 .Guest.edx resq 1
121 .Guest.ebx resq 1
122 .Guest.esp resq 1
123 .Guest.ebp resq 1
124 .Guest.esi resq 1
125 .Guest.edi resq 1
126 .Guest.r8 resq 1
127 .Guest.r9 resq 1
128 .Guest.r10 resq 1
129 .Guest.r11 resq 1
130 .Guest.r12 resq 1
131 .Guest.r13 resq 1
132 .Guest.r14 resq 1
133 .Guest.r15 resq 1
134 .Guest.es.Sel resw 1
135 .Guest.es.PaddingSel resw 1
136 .Guest.es.ValidSel resw 1
137 .Guest.es.fFlags resw 1
138 .Guest.es.u64Base resq 1
139 .Guest.es.u32Limit resd 1
140 .Guest.es.Attr resd 1
141 .Guest.cs.Sel resw 1
142 .Guest.cs.PaddingSel resw 1
143 .Guest.cs.ValidSel resw 1
144 .Guest.cs.fFlags resw 1
145 .Guest.cs.u64Base resq 1
146 .Guest.cs.u32Limit resd 1
147 .Guest.cs.Attr resd 1
148 .Guest.ss.Sel resw 1
149 .Guest.ss.PaddingSel resw 1
150 .Guest.ss.ValidSel resw 1
151 .Guest.ss.fFlags resw 1
152 .Guest.ss.u64Base resq 1
153 .Guest.ss.u32Limit resd 1
154 .Guest.ss.Attr resd 1
155 .Guest.ds.Sel resw 1
156 .Guest.ds.PaddingSel resw 1
157 .Guest.ds.ValidSel resw 1
158 .Guest.ds.fFlags resw 1
159 .Guest.ds.u64Base resq 1
160 .Guest.ds.u32Limit resd 1
161 .Guest.ds.Attr resd 1
162 .Guest.fs.Sel resw 1
163 .Guest.fs.PaddingSel resw 1
164 .Guest.fs.ValidSel resw 1
165 .Guest.fs.fFlags resw 1
166 .Guest.fs.u64Base resq 1
167 .Guest.fs.u32Limit resd 1
168 .Guest.fs.Attr resd 1
169 .Guest.gs.Sel resw 1
170 .Guest.gs.PaddingSel resw 1
171 .Guest.gs.ValidSel resw 1
172 .Guest.gs.fFlags resw 1
173 .Guest.gs.u64Base resq 1
174 .Guest.gs.u32Limit resd 1
175 .Guest.gs.Attr resd 1
176 .Guest.ldtr.Sel resw 1
177 .Guest.ldtr.PaddingSel resw 1
178 .Guest.ldtr.ValidSel resw 1
179 .Guest.ldtr.fFlags resw 1
180 .Guest.ldtr.u64Base resq 1
181 .Guest.ldtr.u32Limit resd 1
182 .Guest.ldtr.Attr resd 1
183 .Guest.tr.Sel resw 1
184 .Guest.tr.PaddingSel resw 1
185 .Guest.tr.ValidSel resw 1
186 .Guest.tr.fFlags resw 1
187 .Guest.tr.u64Base resq 1
188 .Guest.tr.u32Limit resd 1
189 .Guest.tr.Attr resd 1
190 .Guest.eip resq 1
191 .Guest.eflags resq 1
192 .Guest.fInhibit resb 1
193 alignb 8
194 .Guest.uRipInhibitInt resq 1
195 .Guest.cr0 resq 1
196 .Guest.cr2 resq 1
197 .Guest.cr3 resq 1
198 .Guest.cr4 resq 1
199 .Guest.dr resq 8
200 .Guest.gdtrPadding resw 3
201 .Guest.gdtr resw 0
202 .Guest.gdtr.cbGdt resw 1
203 .Guest.gdtr.pGdt resq 1
204 .Guest.idtrPadding resw 3
205 .Guest.idtr resw 0
206 .Guest.idtr.cbIdt resw 1
207 .Guest.idtr.pIdt resq 1
208 .Guest.SysEnter.cs resb 8
209 .Guest.SysEnter.eip resb 8
210 .Guest.SysEnter.esp resb 8
211 .Guest.msrEFER resb 8
212 .Guest.msrSTAR resb 8
213 .Guest.msrPAT resb 8
214 .Guest.msrLSTAR resb 8
215 .Guest.msrCSTAR resb 8
216 .Guest.msrSFMASK resb 8
217 .Guest.msrKERNELGSBASE resb 8
218
219 alignb 8
220 .Guest.fExtrn resq 1
221
222 alignb 32
223 .Guest.aPaePdpes resq 4
224
225 alignb 8
226 .Guest.aXcr resq 2
227 .Guest.fXStateMask resq 1
228 .Guest.fUsedFpuGuest resb 1
229 alignb 8
230 .Guest.aoffXState resw 64
231 alignb 256
232 .Guest.abXState resb 0x4000-0x300
233 .Guest.XState EQU .Guest.abXState
234
235;;
236 alignb 4096
237 .Guest.hwvirt resb 0
238 .Guest.hwvirt.svm resb 0
239 .Guest.hwvirt.vmx resb 0
240
241 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
242 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
243 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
244 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
245 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
246 alignb 8
247 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
248 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
249 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
250 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
251 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
252
253 .Guest.hwvirt.vmx.Vmcs resb 0x1000
254 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
255 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
256 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
257 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
258 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
259 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
260 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
261 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
262 alignb 8
263 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
264 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
265 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
266 .Guest.hwvirt.vmx.enmDiag resd 1
267 .Guest.hwvirt.vmx.enmAbort resd 1
268 .Guest.hwvirt.vmx.uDiagAux resq 1
269 .Guest.hwvirt.vmx.uAbortAux resd 1
270 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
271 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
272 .Guest.hwvirt.vmx.fInterceptEvents resb 1
273 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
274 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
275 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
276 .Guest.hwvirt.vmx.uEntryTick resq 1
277 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
278 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
279 alignb 8
280 .Guest.hwvirt.vmx.Msrs resb 224
281
282 alignb 8
283 .Guest.hwvirt.enmHwvirt resd 1
284 .Guest.hwvirt.fGif resb 1
285 alignb 4
286 .Guest.hwvirt.fSavedInhibit resd 1
287 alignb 64
288
289 .GuestMsrs resq 0
290 .GuestMsrs.au64 resq 64
291
292 ;
293 ; Other stuff.
294 ;
295 .hNestedVmxPreemptTimer resq 1
296
297 .fUseFlags resd 1
298 .fChanged resd 1
299 .u32RetCode resd 1
300 .fCpuIdApicFeatureVisible resb 1
301
302 ;
303 ; Host context state
304 ;
305 alignb 64
306 .Host resb 0
307 .Host.abXState resb 0x4000-0x300
308 .Host.XState EQU .Host.abXState
309 ;.Host.rax resq 1 - scratch
310 .Host.rbx resq 1
311 ;.Host.rcx resq 1 - scratch
312 ;.Host.rdx resq 1 - scratch
313 .Host.rdi resq 1
314 .Host.rsi resq 1
315 .Host.rbp resq 1
316 .Host.rsp resq 1
317 ;.Host.r8 resq 1 - scratch
318 ;.Host.r9 resq 1 - scratch
319 .Host.r10 resq 1
320 .Host.r11 resq 1
321 .Host.r12 resq 1
322 .Host.r13 resq 1
323 .Host.r14 resq 1
324 .Host.r15 resq 1
325 ;.Host.rip resd 1 - scratch
326 .Host.rflags resq 1
327 .Host.ss resw 1
328 .Host.ssPadding resw 1
329 .Host.gs resw 1
330 .Host.gsPadding resw 1
331 .Host.fs resw 1
332 .Host.fsPadding resw 1
333 .Host.es resw 1
334 .Host.esPadding resw 1
335 .Host.ds resw 1
336 .Host.dsPadding resw 1
337 .Host.cs resw 1
338 .Host.csPadding resw 1
339
340 .Host.cr0Fpu:
341 .Host.cr0 resq 1
342 ;.Host.cr2 resq 1 - scratch
343 .Host.cr3 resq 1
344 .Host.cr4 resq 1
345 .Host.cr8 resq 1
346
347 .Host.dr0 resq 1
348 .Host.dr1 resq 1
349 .Host.dr2 resq 1
350 .Host.dr3 resq 1
351 .Host.dr6 resq 1
352 .Host.dr7 resq 1
353
354 .Host.gdtr resb 10 ; GDT limit + linear address
355 .Host.gdtrPadding resw 1
356 .Host.idtr resb 10 ; IDT limit + linear address
357 .Host.idtrPadding resw 1
358 .Host.ldtr resw 1
359 .Host.ldtrPadding resw 1
360 .Host.tr resw 1
361 .Host.trPadding resw 1
362
363 .Host.SysEnter.cs resq 1
364 .Host.SysEnter.eip resq 1
365 .Host.SysEnter.esp resq 1
366 .Host.FSbase resq 1
367 .Host.GSbase resq 1
368 .Host.efer resq 1
369 alignb 8
370 .Host.xcr0 resq 1
371 .Host.fXStateMask resq 1
372
373 ;
374 ; Hypervisor Context.
375 ;
376 alignb 64
377 .Hyper resq 0
378 .Hyper.dr resq 8
379 .Hyper.cr3 resq 1
380 alignb 64
381
382%ifdef VBOX_WITH_CRASHDUMP_MAGIC
383 .aMagic resb 56
384 .uMagic resq 1
385%endif
386endstruc
387
388
389
390%if 0 ; Currently not used anywhere.
391;;
392; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
393;
394; Cleans the FPU state, if necessary, before restoring the FPU.
395;
396; This macro ASSUMES CR0.TS is not set!
397;
398; @param xDX Pointer to CPUMCPU.
399; @uses xAX, EFLAGS
400;
401; Changes here should also be reflected in CPUMRCA.asm's copy!
402;
403%macro CLEANFPU 0
404 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
405 jz .nothing_to_clean
406
407 xor eax, eax
408 fnstsw ax ; FSW -> AX.
409 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
410 ; while clearing & loading the FPU bits in 'clean_fpu' below.
411 jz .clean_fpu
412 fnclex
413
414.clean_fpu:
415 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
416 ; for the upcoming push (load)
417 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
418.nothing_to_clean:
419%endmacro
420%endif ; Unused.
421
422
423;;
424; Makes sure we don't trap (#NM) accessing the FPU.
425;
426; In ring-0 this is a bit of work since we may have try convince the host kernel
427; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
428; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
429;
430; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
431; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
432; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
433;
434; In raw-mode we will always have to clear TS and it will be recalculated
435; elsewhere and thus needs no saving.
436;
437; @param %1 Register to return the return status code in.
438; @param %2 Temporary scratch register.
439; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
440; of the EMT we're on.
441; @uses EFLAGS, CR0, %1, %2
442;
443%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
444 ;
445 ; ring-0 - slightly complicated (than old raw-mode).
446 ;
447 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
448 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
449
450 mov %2, cr0
451 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
452 jz %%no_cr0_change
453
454 %ifdef VMM_R0_TOUCH_FPU
455 ; Touch the state and check that the kernel updated CR0 for us.
456 movdqa xmm0, xmm0
457 mov %2, cr0
458 test %2, X86_CR0_TS | X86_CR0_EM
459 jz %%cr0_changed
460 %endif
461
462 ; Save CR0 and clear them flags ourselves.
463 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
464 and %2, ~(X86_CR0_TS | X86_CR0_EM)
465 mov cr0, %2
466
467%%cr0_changed:
468 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
469%%no_cr0_change:
470%endmacro
471
472
473;;
474; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
475;
476; @param %1 The original state to restore (or zero).
477;
478%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
479 test %1, X86_CR0_TS | X86_CR0_EM
480 jz %%skip_cr0_restore
481 mov cr0, %1
482%%skip_cr0_restore:
483%endmacro
484
485
486;;
487; Saves the host state.
488;
489; @uses rax, rdx
490; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
491; @param pXState Define for the register containing the extended state pointer.
492;
493%macro CPUMR0_SAVE_HOST 0
494 ;
495 ; Load a couple of registers we'll use later in all branches.
496 ;
497 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
498 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
499
500 ;
501 ; XSAVE or FXSAVE?
502 ;
503 or eax, eax
504 jz %%host_fxsave
505
506 ; XSAVE
507 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
508 %ifdef RT_ARCH_AMD64
509 o64 xsave [pXState]
510 %else
511 xsave [pXState]
512 %endif
513 jmp %%host_done
514
515 ; FXSAVE
516%%host_fxsave:
517 %ifdef RT_ARCH_AMD64
518 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
519 %else
520 fxsave [pXState]
521 %endif
522
523%%host_done:
524%endmacro ; CPUMR0_SAVE_HOST
525
526
527;;
528; Loads the host state.
529;
530; @uses rax, rdx
531; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
532; @param pXState Define for the register containing the extended state pointer.
533;
534%macro CPUMR0_LOAD_HOST 0
535 ;
536 ; Load a couple of registers we'll use later in all branches.
537 ;
538 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
539 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
540
541 ;
542 ; XRSTOR or FXRSTOR?
543 ;
544 or eax, eax
545 jz %%host_fxrstor
546
547 ; XRSTOR
548 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
549 %ifdef RT_ARCH_AMD64
550 o64 xrstor [pXState]
551 %else
552 xrstor [pXState]
553 %endif
554 jmp %%host_done
555
556 ; FXRSTOR
557%%host_fxrstor:
558 %ifdef RT_ARCH_AMD64
559 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
560 %else
561 fxrstor [pXState]
562 %endif
563
564%%host_done:
565%endmacro ; CPUMR0_LOAD_HOST
566
567
568
569;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
570; save the 32-bit FPU state or 64-bit FPU state.
571;
572; @param %1 Pointer to CPUMCPU.
573; @param %2 Pointer to XState.
574; @param %3 Force AMD64
575; @param %4 The instruction to use (xsave or fxsave)
576; @uses xAX, xDX, EFLAGS, 20h of stack.
577;
578%macro SAVE_32_OR_64_FPU 4
579%if CPUM_IS_AMD64 || %3
580 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
581 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
582 jnz short %%save_long_mode_guest
583%endif
584 %4 [pXState]
585%if CPUM_IS_AMD64 || %3
586 jmp %%save_done_32bit_cs_ds
587
588%%save_long_mode_guest:
589 o64 %4 [pXState]
590
591 xor edx, edx
592 cmp dword [pXState + X86FXSTATE.FPUCS], 0
593 jne short %%save_done
594
595 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
596 fnstenv [rsp]
597 movzx eax, word [rsp + 10h]
598 mov [pXState + X86FXSTATE.FPUCS], eax
599 movzx eax, word [rsp + 18h]
600 add rsp, 20h
601 mov [pXState + X86FXSTATE.FPUDS], eax
602%endif
603%%save_done_32bit_cs_ds:
604 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
605%%save_done:
606 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
607%endmacro ; SAVE_32_OR_64_FPU
608
609
610;;
611; Save the guest state.
612;
613; @uses rax, rdx
614; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
615; @param pXState Define for the register containing the extended state pointer.
616;
617%macro CPUMR0_SAVE_GUEST 0
618 ;
619 ; Load a couple of registers we'll use later in all branches.
620 ;
621 %ifdef IN_RING0
622 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
623 %else
624 %error "Unsupported context!"
625 %endif
626 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
627
628 ;
629 ; XSAVE or FXSAVE?
630 ;
631 or eax, eax
632 jz %%guest_fxsave
633
634 ; XSAVE
635 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
636 %ifdef VBOX_WITH_KERNEL_USING_XMM
637 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
638 %endif
639 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
640 jmp %%guest_done
641
642 ; FXSAVE
643%%guest_fxsave:
644 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
645
646%%guest_done:
647%endmacro ; CPUMR0_SAVE_GUEST
648
649
650;;
651; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
652;
653; @param %1 Pointer to CPUMCPU.
654; @param %2 Pointer to XState.
655; @param %3 Force AMD64.
656; @param %4 The instruction to use (xrstor or fxrstor).
657; @uses xAX, xDX, EFLAGS
658;
659%macro RESTORE_32_OR_64_FPU 4
660%if CPUM_IS_AMD64 || %3
661 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
662 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
663 jz %%restore_32bit_fpu
664 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
665 jne short %%restore_64bit_fpu
666%%restore_32bit_fpu:
667%endif
668 %4 [pXState]
669%if CPUM_IS_AMD64 || %3
670 ; TODO: Restore XMM8-XMM15!
671 jmp short %%restore_fpu_done
672%%restore_64bit_fpu:
673 o64 %4 [pXState]
674%%restore_fpu_done:
675%endif
676%endmacro ; RESTORE_32_OR_64_FPU
677
678
679;;
680; Loads the guest state.
681;
682; @uses rax, rdx
683; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
684; @param pXState Define for the register containing the extended state pointer.
685;
686%macro CPUMR0_LOAD_GUEST 0
687 ;
688 ; Load a couple of registers we'll use later in all branches.
689 ;
690 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
691 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
692
693 ;
694 ; XRSTOR or FXRSTOR?
695 ;
696 or eax, eax
697 jz %%guest_fxrstor
698
699 ; XRSTOR
700 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
701 %ifdef VBOX_WITH_KERNEL_USING_XMM
702 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
703 %endif
704 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
705 jmp %%guest_done
706
707 ; FXRSTOR
708%%guest_fxrstor:
709 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
710
711%%guest_done:
712%endmacro ; CPUMR0_LOAD_GUEST
713
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette