VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMR0A.asm@ 47115

最後變更 在這個檔案從47115是 46942,由 vboxsync 提交於 12 年 前

VMM: Fix LDTR restoration to be done dynamically. This is required for 64-bit Darwin hosts.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 44.0 KB
 
1; $Id: HMR0A.asm 46942 2013-07-03 14:53:47Z vboxsync $
2;; @file
3; VMXM - R0 vmx helpers
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.alldomusa.eu.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%include "VBox/asmdefs.mac"
22%include "VBox/err.mac"
23%include "VBox/vmm/hm_vmx.mac"
24%include "VBox/vmm/cpum.mac"
25%include "iprt/x86.mac"
26%include "HMInternal.mac"
27
28%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
29 %macro vmwrite 2,
30 int3
31 %endmacro
32 %define vmlaunch int3
33 %define vmresume int3
34 %define vmsave int3
35 %define vmload int3
36 %define vmrun int3
37 %define clgi int3
38 %define stgi int3
39 %macro invlpga 2,
40 int3
41 %endmacro
42%endif
43
44;*******************************************************************************
45;* Defined Constants And Macros *
46;*******************************************************************************
47%ifdef RT_ARCH_AMD64
48 %define MAYBE_64_BIT
49%endif
50%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
51 %define MAYBE_64_BIT
52%else
53 %ifdef RT_OS_DARWIN
54 %ifdef RT_ARCH_AMD64
55 ;;
56 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
57 ; risk loading a stale LDT value or something invalid.
58 %define HM_64_BIT_USE_NULL_SEL
59 %endif
60 %endif
61%endif
62
63%ifndef VBOX_WITH_OLD_VTX_CODE
64 %ifdef RT_ARCH_AMD64
65 %define VBOX_SKIP_RESTORE_SEG
66 %endif
67%endif
68
69;; The offset of the XMM registers in X86FXSTATE.
70; Use define because I'm too lazy to convert the struct.
71%define XMM_OFF_IN_X86FXSTATE 160
72
73;; @def MYPUSHAD
74; Macro generating an equivalent to pushad
75
76;; @def MYPOPAD
77; Macro generating an equivalent to popad
78
79;; @def MYPUSHSEGS
80; Macro saving all segment registers on the stack.
81; @param 1 full width register name
82; @param 2 16-bit register name for \a 1.
83
84;; @def MYPOPSEGS
85; Macro restoring all segment registers on the stack
86; @param 1 full width register name
87; @param 2 16-bit register name for \a 1.
88
89%ifdef MAYBE_64_BIT
90 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
91 %macro LOADGUESTMSR 2
92 mov rcx, %1
93 rdmsr
94 push rdx
95 push rax
96 mov edx, dword [xSI + %2 + 4]
97 mov eax, dword [xSI + %2]
98 wrmsr
99 %endmacro
100
101 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
102 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
103 %macro LOADHOSTMSREX 2
104 mov rcx, %1
105 rdmsr
106 mov dword [xSI + %2], eax
107 mov dword [xSI + %2 + 4], edx
108 pop rax
109 pop rdx
110 wrmsr
111 %endmacro
112
113 ; Load the corresponding host MSR (trashes rdx & rcx)
114 %macro LOADHOSTMSR 1
115 mov rcx, %1
116 pop rax
117 pop rdx
118 wrmsr
119 %endmacro
120%endif
121
122%ifdef ASM_CALL64_GCC
123 %macro MYPUSHAD64 0
124 push r15
125 push r14
126 push r13
127 push r12
128 push rbx
129 %endmacro
130 %macro MYPOPAD64 0
131 pop rbx
132 pop r12
133 pop r13
134 pop r14
135 pop r15
136 %endmacro
137
138%else ; ASM_CALL64_MSC
139 %macro MYPUSHAD64 0
140 push r15
141 push r14
142 push r13
143 push r12
144 push rbx
145 push rsi
146 push rdi
147 %endmacro
148 %macro MYPOPAD64 0
149 pop rdi
150 pop rsi
151 pop rbx
152 pop r12
153 pop r13
154 pop r14
155 pop r15
156 %endmacro
157%endif
158
159%ifdef VBOX_SKIP_RESTORE_SEG
160%macro MYPUSHSEGS64 2
161%endmacro
162
163%macro MYPOPSEGS64 2
164%endmacro
165%else ; !VBOX_SKIP_RESTORE_SEG
166; trashes, rax, rdx & rcx
167%macro MYPUSHSEGS64 2
168 %ifndef HM_64_BIT_USE_NULL_SEL
169 mov %2, es
170 push %1
171 mov %2, ds
172 push %1
173 %endif
174
175 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
176 mov ecx, MSR_K8_FS_BASE
177 rdmsr
178 push rdx
179 push rax
180 %ifndef HM_64_BIT_USE_NULL_SEL
181 push fs
182 %endif
183
184 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
185 mov ecx, MSR_K8_GS_BASE
186 rdmsr
187 push rdx
188 push rax
189 %ifndef HM_64_BIT_USE_NULL_SEL
190 push gs
191 %endif
192%endmacro
193
194; trashes, rax, rdx & rcx
195%macro MYPOPSEGS64 2
196 ; Note: do not step through this code with a debugger!
197 %ifndef HM_64_BIT_USE_NULL_SEL
198 xor eax, eax
199 mov ds, ax
200 mov es, ax
201 mov fs, ax
202 mov gs, ax
203 %endif
204
205 %ifndef HM_64_BIT_USE_NULL_SEL
206 pop gs
207 %endif
208 pop rax
209 pop rdx
210 mov ecx, MSR_K8_GS_BASE
211 wrmsr
212
213 %ifndef HM_64_BIT_USE_NULL_SEL
214 pop fs
215 %endif
216 pop rax
217 pop rdx
218 mov ecx, MSR_K8_FS_BASE
219 wrmsr
220 ; Now it's safe to step again
221
222 %ifndef HM_64_BIT_USE_NULL_SEL
223 pop %1
224 mov ds, %2
225 pop %1
226 mov es, %2
227 %endif
228%endmacro
229%endif ; VBOX_SKIP_RESTORE_SEG
230
231%macro MYPUSHAD32 0
232 pushad
233%endmacro
234%macro MYPOPAD32 0
235 popad
236%endmacro
237
238%macro MYPUSHSEGS32 2
239 push ds
240 push es
241 push fs
242 push gs
243%endmacro
244%macro MYPOPSEGS32 2
245 pop gs
246 pop fs
247 pop es
248 pop ds
249%endmacro
250
251
252;*******************************************************************************
253;* External Symbols *
254;*******************************************************************************
255%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
256extern NAME(SUPR0AbsIs64bit)
257extern NAME(SUPR0Abs64bitKernelCS)
258extern NAME(SUPR0Abs64bitKernelSS)
259extern NAME(SUPR0Abs64bitKernelDS)
260extern NAME(SUPR0AbsKernelCS)
261%endif
262%ifdef VBOX_WITH_KERNEL_USING_XMM
263extern NAME(CPUMIsGuestFPUStateActive)
264%endif
265
266
267;*******************************************************************************
268;* Global Variables *
269;*******************************************************************************
270%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
271BEGINDATA
272;;
273; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
274; needing to clobber a register. (This trick doesn't quite work for PE btw.
275; but that's not relevant atm.)
276GLOBALNAME g_fVMXIs64bitHost
277 dd NAME(SUPR0AbsIs64bit)
278%endif
279
280
281BEGINCODE
282
283
284;/**
285; * Restores host-state fields.
286; *
287; * @returns VBox status code
288; * @param u32RestoreHostFlags x86: [ebp + 08h] msc: rcx gcc: rdi u32RestoreHost - RestoreHost flags.
289; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi pRestoreHost - Pointer to the RestoreHost struct.
290; */
291ALIGNCODE(16)
292BEGINPROC VMXRestoreHostState
293%ifdef RT_ARCH_AMD64
294 %ifndef ASM_CALL64_GCC
295 ; On msc R10, R11 are scratch, RDI and RSI are not. So we must save and restore them!
296 mov r10, rdi
297 mov r11, rsi
298 ; Switch to common register usage (i.e. gcc's in this function)
299 mov rdi, rcx
300 mov rsi, rdx
301 %endif
302
303 test edi, VMX_RESTORE_HOST_GDTR
304 jz near .test_idtr
305 lgdt [rsi + VMXRESTOREHOST.HostGdtr]
306
307.test_idtr:
308 test edi, VMX_RESTORE_HOST_IDTR
309 jz near .test_ds
310 lidt [rsi + VMXRESTOREHOST.HostIdtr]
311
312.test_ds:
313 test edi, VMX_RESTORE_HOST_SEL_DS
314 jz near .test_es
315 mov ax, word [rsi + VMXRESTOREHOST.uHostSelDS]
316 mov ds, ax
317
318.test_es:
319 test edi, VMX_RESTORE_HOST_SEL_ES
320 jz near .test_tr
321 mov ax, word [rsi + VMXRESTOREHOST.uHostSelES]
322 mov es, ax
323
324.test_tr:
325 test edi, VMX_RESTORE_HOST_SEL_TR
326 jz near .test_fs
327 mov dx, word [rsi + VMXRESTOREHOST.uHostSelTR]
328 xor xAX, xAX
329 mov ax, dx
330 and al, ~(X86_SEL_LDT | X86_SEL_RPL) ; Mask away TI and RPL bits leaving only the descriptor offset.
331 add xAX, qword [rsi + VMXRESTOREHOST.HostGdtr + 2] ; xAX <- descriptor offset + GDTR.pGdt.
332 and dword [ss:xAX + 4], ~RT_BIT(9) ; Clear the busy flag in TSS (bits 0-7=base, bit 9=busy bit).
333 ltr dx
334
335.test_fs:
336 ; We're only restoring the selector. The base is valid and restored by VT-x. If we get an interrupt in between FS & GS
337 ; below, we are fine as the base is what is relevant in 64-bit mode. We need to disable interrupts only during
338 ; writing of the selector as that zaps (trashes) the upper-part of the base until we wrmsr the full 64-bit base.
339
340 test edi, VMX_RESTORE_HOST_SEL_FS
341 jz near .test_gs
342 mov ax, word [rsi + VMXRESTOREHOST.uHostSelFS]
343 cli ; Disable interrupts as mov fs, ax will zap the upper part of the base
344 mov fs, ax
345 mov eax, dword [rsi + VMXRESTOREHOST.uHostFSBase] ; uHostFSBase - Lo
346 mov edx, dword [rsi + VMXRESTOREHOST.uHostFSBase + 4h] ; uHostFSBase - Hi
347 mov ecx, MSR_K8_FS_BASE
348 wrmsr
349 sti ; Re-enable interrupts as fsbase is consistent now
350
351.test_gs:
352 test edi, VMX_RESTORE_HOST_SEL_GS
353 jz near .restore_success
354 mov ax, word [rsi + VMXRESTOREHOST.uHostSelGS]
355 cli ; Disable interrupts as mov gs, ax will zap the upper part of the base
356 mov gs, ax
357 mov eax, dword [rsi + VMXRESTOREHOST.uHostGSBase] ; uHostGSBase - Lo
358 mov edx, dword [rsi + VMXRESTOREHOST.uHostGSBase + 4h] ; uHostGSBase - Hi
359 mov ecx, MSR_K8_GS_BASE
360 wrmsr
361 sti ; Re-enable interrupts as gsbase is consistent now
362
363.restore_success:
364 mov eax, VINF_SUCCESS
365 %ifndef ASM_CALL64_GCC
366 ; Restore RDI and RSI on MSC.
367 mov rdi, r10
368 mov rsi, r11
369 %endif
370%else ; RT_ARCH_X86
371 mov eax, VERR_NOT_IMPLEMENTED
372%endif
373 ret
374ENDPROC VMXRestoreHostState
375
376
377;/**
378; * Executes VMWRITE, 64-bit value.
379; *
380; * @returns VBox status code
381; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index
382; * @param u64Data x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value
383; */
384ALIGNCODE(16)
385BEGINPROC VMXWriteVmcs64
386%ifdef RT_ARCH_AMD64
387 %ifdef ASM_CALL64_GCC
388 and edi, 0ffffffffh
389 xor rax, rax
390 vmwrite rdi, rsi
391 %else
392 and ecx, 0ffffffffh
393 xor rax, rax
394 vmwrite rcx, rdx
395 %endif
396%else ; RT_ARCH_X86
397 mov ecx, [esp + 4] ; idxField
398 lea edx, [esp + 8] ; &u64Data
399 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
400 cmp byte [NAME(g_fVMXIs64bitHost)], 0
401 jz .legacy_mode
402 db 0xea ; jmp far .sixtyfourbit_mode
403 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
404.legacy_mode:
405 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
406 vmwrite ecx, [edx] ; low dword
407 jz .done
408 jc .done
409 inc ecx
410 xor eax, eax
411 vmwrite ecx, [edx + 4] ; high dword
412.done:
413%endif ; RT_ARCH_X86
414 jnc .valid_vmcs
415 mov eax, VERR_VMX_INVALID_VMCS_PTR
416 ret
417.valid_vmcs:
418 jnz .the_end
419 mov eax, VERR_VMX_INVALID_VMCS_FIELD
420.the_end:
421 ret
422
423%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
424ALIGNCODE(16)
425BITS 64
426.sixtyfourbit_mode:
427 and edx, 0ffffffffh
428 and ecx, 0ffffffffh
429 xor eax, eax
430 vmwrite rcx, [rdx]
431 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
432 cmovz eax, r8d
433 mov r9d, VERR_VMX_INVALID_VMCS_PTR
434 cmovc eax, r9d
435 jmp far [.fpret wrt rip]
436.fpret: ; 16:32 Pointer to .the_end.
437 dd .the_end, NAME(SUPR0AbsKernelCS)
438BITS 32
439%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
440ENDPROC VMXWriteVmcs64
441
442
443;/**
444; * Executes VMREAD, 64-bit value
445; *
446; * @returns VBox status code
447; * @param idxField VMCS index
448; * @param pData Ptr to store VM field value
449; */
450;DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
451ALIGNCODE(16)
452BEGINPROC VMXReadVmcs64
453%ifdef RT_ARCH_AMD64
454 %ifdef ASM_CALL64_GCC
455 and edi, 0ffffffffh
456 xor rax, rax
457 vmread [rsi], rdi
458 %else
459 and ecx, 0ffffffffh
460 xor rax, rax
461 vmread [rdx], rcx
462 %endif
463%else ; RT_ARCH_X86
464 mov ecx, [esp + 4] ; idxField
465 mov edx, [esp + 8] ; pData
466 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
467 cmp byte [NAME(g_fVMXIs64bitHost)], 0
468 jz .legacy_mode
469 db 0xea ; jmp far .sixtyfourbit_mode
470 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
471.legacy_mode:
472 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
473 vmread [edx], ecx ; low dword
474 jz .done
475 jc .done
476 inc ecx
477 xor eax, eax
478 vmread [edx + 4], ecx ; high dword
479.done:
480%endif ; RT_ARCH_X86
481 jnc .valid_vmcs
482 mov eax, VERR_VMX_INVALID_VMCS_PTR
483 ret
484.valid_vmcs:
485 jnz .the_end
486 mov eax, VERR_VMX_INVALID_VMCS_FIELD
487.the_end:
488 ret
489
490%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
491ALIGNCODE(16)
492BITS 64
493.sixtyfourbit_mode:
494 and edx, 0ffffffffh
495 and ecx, 0ffffffffh
496 xor eax, eax
497 vmread [rdx], rcx
498 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
499 cmovz eax, r8d
500 mov r9d, VERR_VMX_INVALID_VMCS_PTR
501 cmovc eax, r9d
502 jmp far [.fpret wrt rip]
503.fpret: ; 16:32 Pointer to .the_end.
504 dd .the_end, NAME(SUPR0AbsKernelCS)
505BITS 32
506%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
507ENDPROC VMXReadVmcs64
508
509
510;/**
511; * Executes VMREAD, 32-bit value.
512; *
513; * @returns VBox status code
514; * @param idxField VMCS index
515; * @param pu32Data Ptr to store VM field value
516; */
517;DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pu32Data);
518ALIGNCODE(16)
519BEGINPROC VMXReadVmcs32
520%ifdef RT_ARCH_AMD64
521 %ifdef ASM_CALL64_GCC
522 and edi, 0ffffffffh
523 xor rax, rax
524 vmread r10, rdi
525 mov [rsi], r10d
526 %else
527 and ecx, 0ffffffffh
528 xor rax, rax
529 vmread r10, rcx
530 mov [rdx], r10d
531 %endif
532%else ; RT_ARCH_X86
533 mov ecx, [esp + 4] ; idxField
534 mov edx, [esp + 8] ; pu32Data
535 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
536 cmp byte [NAME(g_fVMXIs64bitHost)], 0
537 jz .legacy_mode
538 db 0xea ; jmp far .sixtyfourbit_mode
539 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
540.legacy_mode:
541 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
542 xor eax, eax
543 vmread [edx], ecx
544%endif ; RT_ARCH_X86
545 jnc .valid_vmcs
546 mov eax, VERR_VMX_INVALID_VMCS_PTR
547 ret
548.valid_vmcs:
549 jnz .the_end
550 mov eax, VERR_VMX_INVALID_VMCS_FIELD
551.the_end:
552 ret
553
554%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
555ALIGNCODE(16)
556BITS 64
557.sixtyfourbit_mode:
558 and edx, 0ffffffffh
559 and ecx, 0ffffffffh
560 xor eax, eax
561 vmread r10, rcx
562 mov [rdx], r10d
563 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
564 cmovz eax, r8d
565 mov r9d, VERR_VMX_INVALID_VMCS_PTR
566 cmovc eax, r9d
567 jmp far [.fpret wrt rip]
568.fpret: ; 16:32 Pointer to .the_end.
569 dd .the_end, NAME(SUPR0AbsKernelCS)
570BITS 32
571%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
572ENDPROC VMXReadVmcs32
573
574
575;/**
576; * Executes VMWRITE, 32-bit value.
577; *
578; * @returns VBox status code
579; * @param idxField VMCS index
580; * @param u32Data Ptr to store VM field value
581; */
582;DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Data);
583ALIGNCODE(16)
584BEGINPROC VMXWriteVmcs32
585%ifdef RT_ARCH_AMD64
586 %ifdef ASM_CALL64_GCC
587 and edi, 0ffffffffh
588 and esi, 0ffffffffh
589 xor rax, rax
590 vmwrite rdi, rsi
591 %else
592 and ecx, 0ffffffffh
593 and edx, 0ffffffffh
594 xor rax, rax
595 vmwrite rcx, rdx
596 %endif
597%else ; RT_ARCH_X86
598 mov ecx, [esp + 4] ; idxField
599 mov edx, [esp + 8] ; u32Data
600 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
601 cmp byte [NAME(g_fVMXIs64bitHost)], 0
602 jz .legacy_mode
603 db 0xea ; jmp far .sixtyfourbit_mode
604 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
605.legacy_mode:
606 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
607 xor eax, eax
608 vmwrite ecx, edx
609%endif ; RT_ARCH_X86
610 jnc .valid_vmcs
611 mov eax, VERR_VMX_INVALID_VMCS_PTR
612 ret
613.valid_vmcs:
614 jnz .the_end
615 mov eax, VERR_VMX_INVALID_VMCS_FIELD
616.the_end:
617 ret
618
619%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
620ALIGNCODE(16)
621BITS 64
622.sixtyfourbit_mode:
623 and edx, 0ffffffffh
624 and ecx, 0ffffffffh
625 xor eax, eax
626 vmwrite rcx, rdx
627 mov r8d, VERR_VMX_INVALID_VMCS_FIELD
628 cmovz eax, r8d
629 mov r9d, VERR_VMX_INVALID_VMCS_PTR
630 cmovc eax, r9d
631 jmp far [.fpret wrt rip]
632.fpret: ; 16:32 Pointer to .the_end.
633 dd .the_end, NAME(SUPR0AbsKernelCS)
634BITS 32
635%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
636ENDPROC VMXWriteVmcs32
637
638
639;/**
640; * Executes VMXON
641; *
642; * @returns VBox status code
643; * @param HCPhysVMXOn Physical address of VMXON structure
644; */
645;DECLASM(int) VMXEnable(RTHCPHYS HCPhysVMXOn);
646BEGINPROC VMXEnable
647%ifdef RT_ARCH_AMD64
648 xor rax, rax
649 %ifdef ASM_CALL64_GCC
650 push rdi
651 %else
652 push rcx
653 %endif
654 vmxon [rsp]
655%else ; RT_ARCH_X86
656 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
657 cmp byte [NAME(g_fVMXIs64bitHost)], 0
658 jz .legacy_mode
659 db 0xea ; jmp far .sixtyfourbit_mode
660 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
661.legacy_mode:
662 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
663 xor eax, eax
664 vmxon [esp + 4]
665%endif ; RT_ARCH_X86
666 jnc .good
667 mov eax, VERR_VMX_INVALID_VMXON_PTR
668 jmp .the_end
669
670.good:
671 jnz .the_end
672 mov eax, VERR_VMX_VMXON_FAILED
673
674.the_end:
675%ifdef RT_ARCH_AMD64
676 add rsp, 8
677%endif
678 ret
679
680%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
681ALIGNCODE(16)
682BITS 64
683.sixtyfourbit_mode:
684 lea rdx, [rsp + 4] ; &HCPhysVMXOn.
685 and edx, 0ffffffffh
686 xor eax, eax
687 vmxon [rdx]
688 mov r8d, VERR_VMX_VMXON_FAILED
689 cmovz eax, r8d
690 mov r9d, VERR_VMX_INVALID_VMXON_PTR
691 cmovc eax, r9d
692 jmp far [.fpret wrt rip]
693.fpret: ; 16:32 Pointer to .the_end.
694 dd .the_end, NAME(SUPR0AbsKernelCS)
695BITS 32
696%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
697ENDPROC VMXEnable
698
699
700;/**
701; * Executes VMXOFF
702; */
703;DECLASM(void) VMXDisable(void);
704BEGINPROC VMXDisable
705%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
706 cmp byte [NAME(g_fVMXIs64bitHost)], 0
707 jz .legacy_mode
708 db 0xea ; jmp far .sixtyfourbit_mode
709 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
710.legacy_mode:
711%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
712 vmxoff
713.the_end:
714 ret
715
716%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
717ALIGNCODE(16)
718BITS 64
719.sixtyfourbit_mode:
720 vmxoff
721 jmp far [.fpret wrt rip]
722.fpret: ; 16:32 Pointer to .the_end.
723 dd .the_end, NAME(SUPR0AbsKernelCS)
724BITS 32
725%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
726ENDPROC VMXDisable
727
728
729;/**
730; * Executes VMCLEAR
731; *
732; * @returns VBox status code
733; * @param HCPhysVmcs Physical address of VM control structure
734; */
735;DECLASM(int) VMXClearVMCS(RTHCPHYS HCPhysVmcs);
736ALIGNCODE(16)
737BEGINPROC VMXClearVMCS
738%ifdef RT_ARCH_AMD64
739 xor rax, rax
740 %ifdef ASM_CALL64_GCC
741 push rdi
742 %else
743 push rcx
744 %endif
745 vmclear [rsp]
746%else ; RT_ARCH_X86
747 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
748 cmp byte [NAME(g_fVMXIs64bitHost)], 0
749 jz .legacy_mode
750 db 0xea ; jmp far .sixtyfourbit_mode
751 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
752.legacy_mode:
753 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
754 xor eax, eax
755 vmclear [esp + 4]
756%endif ; RT_ARCH_X86
757 jnc .the_end
758 mov eax, VERR_VMX_INVALID_VMCS_PTR
759.the_end:
760%ifdef RT_ARCH_AMD64
761 add rsp, 8
762%endif
763 ret
764
765%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
766ALIGNCODE(16)
767BITS 64
768.sixtyfourbit_mode:
769 lea rdx, [rsp + 4] ; &HCPhysVmcs
770 and edx, 0ffffffffh
771 xor eax, eax
772 vmclear [rdx]
773 mov r9d, VERR_VMX_INVALID_VMCS_PTR
774 cmovc eax, r9d
775 jmp far [.fpret wrt rip]
776.fpret: ; 16:32 Pointer to .the_end.
777 dd .the_end, NAME(SUPR0AbsKernelCS)
778BITS 32
779%endif
780ENDPROC VMXClearVMCS
781
782
783;/**
784; * Executes VMPTRLD
785; *
786; * @returns VBox status code
787; * @param HCPhysVmcs Physical address of VMCS structure
788; */
789;DECLASM(int) VMXActivateVMCS(RTHCPHYS HCPhysVmcs);
790ALIGNCODE(16)
791BEGINPROC VMXActivateVMCS
792%ifdef RT_ARCH_AMD64
793 xor rax, rax
794 %ifdef ASM_CALL64_GCC
795 push rdi
796 %else
797 push rcx
798 %endif
799 vmptrld [rsp]
800%else
801 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
802 cmp byte [NAME(g_fVMXIs64bitHost)], 0
803 jz .legacy_mode
804 db 0xea ; jmp far .sixtyfourbit_mode
805 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
806.legacy_mode:
807 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
808 xor eax, eax
809 vmptrld [esp + 4]
810%endif
811 jnc .the_end
812 mov eax, VERR_VMX_INVALID_VMCS_PTR
813.the_end:
814%ifdef RT_ARCH_AMD64
815 add rsp, 8
816%endif
817 ret
818
819%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
820ALIGNCODE(16)
821BITS 64
822.sixtyfourbit_mode:
823 lea rdx, [rsp + 4] ; &HCPhysVmcs
824 and edx, 0ffffffffh
825 xor eax, eax
826 vmptrld [rdx]
827 mov r9d, VERR_VMX_INVALID_VMCS_PTR
828 cmovc eax, r9d
829 jmp far [.fpret wrt rip]
830.fpret: ; 16:32 Pointer to .the_end.
831 dd .the_end, NAME(SUPR0AbsKernelCS)
832BITS 32
833%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
834ENDPROC VMXActivateVMCS
835
836
837;/**
838; * Executes VMPTRST
839; *
840; * @returns VBox status code
841; * @param [esp + 04h] gcc:rdi msc:rcx Param 1 - First parameter - Address that will receive the current pointer
842; */
843;DECLASM(int) VMXGetActivateVMCS(RTHCPHYS *pVMCS);
844BEGINPROC VMXGetActivateVMCS
845%ifdef RT_OS_OS2
846 mov eax, VERR_NOT_SUPPORTED
847 ret
848%else
849 %ifdef RT_ARCH_AMD64
850 %ifdef ASM_CALL64_GCC
851 vmptrst qword [rdi]
852 %else
853 vmptrst qword [rcx]
854 %endif
855 %else
856 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
857 cmp byte [NAME(g_fVMXIs64bitHost)], 0
858 jz .legacy_mode
859 db 0xea ; jmp far .sixtyfourbit_mode
860 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
861.legacy_mode:
862 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
863 vmptrst qword [esp+04h]
864 %endif
865 xor eax, eax
866.the_end:
867 ret
868
869 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
870ALIGNCODE(16)
871BITS 64
872.sixtyfourbit_mode:
873 lea rdx, [rsp + 4] ; &HCPhysVmcs
874 and edx, 0ffffffffh
875 vmptrst qword [rdx]
876 xor eax, eax
877 jmp far [.fpret wrt rip]
878.fpret: ; 16:32 Pointer to .the_end.
879 dd .the_end, NAME(SUPR0AbsKernelCS)
880BITS 32
881 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
882%endif
883ENDPROC VMXGetActivateVMCS
884
885;/**
886; * Invalidate a page using invept
887; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
888; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
889; */
890;DECLASM(int) VMXR0InvEPT(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
891BEGINPROC VMXR0InvEPT
892%ifdef RT_ARCH_AMD64
893 %ifdef ASM_CALL64_GCC
894 and edi, 0ffffffffh
895 xor rax, rax
896; invept rdi, qword [rsi]
897 DB 0x66, 0x0F, 0x38, 0x80, 0x3E
898 %else
899 and ecx, 0ffffffffh
900 xor rax, rax
901; invept rcx, qword [rdx]
902 DB 0x66, 0x0F, 0x38, 0x80, 0xA
903 %endif
904%else
905 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
906 cmp byte [NAME(g_fVMXIs64bitHost)], 0
907 jz .legacy_mode
908 db 0xea ; jmp far .sixtyfourbit_mode
909 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
910.legacy_mode:
911 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
912 mov ecx, [esp + 4]
913 mov edx, [esp + 8]
914 xor eax, eax
915; invept ecx, qword [edx]
916 DB 0x66, 0x0F, 0x38, 0x80, 0xA
917%endif
918 jnc .valid_vmcs
919 mov eax, VERR_VMX_INVALID_VMCS_PTR
920 ret
921.valid_vmcs:
922 jnz .the_end
923 mov eax, VERR_INVALID_PARAMETER
924.the_end:
925 ret
926
927%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
928ALIGNCODE(16)
929BITS 64
930.sixtyfourbit_mode:
931 and esp, 0ffffffffh
932 mov ecx, [rsp + 4] ; enmFlush
933 mov edx, [rsp + 8] ; pDescriptor
934 xor eax, eax
935; invept rcx, qword [rdx]
936 DB 0x66, 0x0F, 0x38, 0x80, 0xA
937 mov r8d, VERR_INVALID_PARAMETER
938 cmovz eax, r8d
939 mov r9d, VERR_VMX_INVALID_VMCS_PTR
940 cmovc eax, r9d
941 jmp far [.fpret wrt rip]
942.fpret: ; 16:32 Pointer to .the_end.
943 dd .the_end, NAME(SUPR0AbsKernelCS)
944BITS 32
945%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
946ENDPROC VMXR0InvEPT
947
948
949;/**
950; * Invalidate a page using invvpid
951; @param enmFlush msc:ecx gcc:edi x86:[esp+04] Type of flush
952; @param pDescriptor msc:edx gcc:esi x86:[esp+08] Descriptor pointer
953; */
954;DECLASM(int) VMXR0InvVPID(VMX_FLUSH enmFlush, uint64_t *pDescriptor);
955BEGINPROC VMXR0InvVPID
956%ifdef RT_ARCH_AMD64
957 %ifdef ASM_CALL64_GCC
958 and edi, 0ffffffffh
959 xor rax, rax
960; invvpid rdi, qword [rsi]
961 DB 0x66, 0x0F, 0x38, 0x81, 0x3E
962 %else
963 and ecx, 0ffffffffh
964 xor rax, rax
965; invvpid rcx, qword [rdx]
966 DB 0x66, 0x0F, 0x38, 0x81, 0xA
967 %endif
968%else
969 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
970 cmp byte [NAME(g_fVMXIs64bitHost)], 0
971 jz .legacy_mode
972 db 0xea ; jmp far .sixtyfourbit_mode
973 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
974.legacy_mode:
975 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
976 mov ecx, [esp + 4]
977 mov edx, [esp + 8]
978 xor eax, eax
979; invvpid ecx, qword [edx]
980 DB 0x66, 0x0F, 0x38, 0x81, 0xA
981%endif
982 jnc .valid_vmcs
983 mov eax, VERR_VMX_INVALID_VMCS_PTR
984 ret
985.valid_vmcs:
986 jnz .the_end
987 mov eax, VERR_INVALID_PARAMETER
988.the_end:
989 ret
990
991%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
992ALIGNCODE(16)
993BITS 64
994.sixtyfourbit_mode:
995 and esp, 0ffffffffh
996 mov ecx, [rsp + 4] ; enmFlush
997 mov edx, [rsp + 8] ; pDescriptor
998 xor eax, eax
999; invvpid rcx, qword [rdx]
1000 DB 0x66, 0x0F, 0x38, 0x81, 0xA
1001 mov r8d, VERR_INVALID_PARAMETER
1002 cmovz eax, r8d
1003 mov r9d, VERR_VMX_INVALID_VMCS_PTR
1004 cmovc eax, r9d
1005 jmp far [.fpret wrt rip]
1006.fpret: ; 16:32 Pointer to .the_end.
1007 dd .the_end, NAME(SUPR0AbsKernelCS)
1008BITS 32
1009%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1010ENDPROC VMXR0InvVPID
1011
1012
1013%if GC_ARCH_BITS == 64
1014;;
1015; Executes INVLPGA
1016;
1017; @param pPageGC msc:rcx gcc:rdi x86:[esp+04] Virtual page to invalidate
1018; @param uASID msc:rdx gcc:rsi x86:[esp+0C] Tagged TLB id
1019;
1020;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1021BEGINPROC SVMR0InvlpgA
1022%ifdef RT_ARCH_AMD64
1023 %ifdef ASM_CALL64_GCC
1024 mov rax, rdi
1025 mov rcx, rsi
1026 %else
1027 mov rax, rcx
1028 mov rcx, rdx
1029 %endif
1030%else
1031 mov eax, [esp + 4]
1032 mov ecx, [esp + 0Ch]
1033%endif
1034 invlpga [xAX], ecx
1035 ret
1036ENDPROC SVMR0InvlpgA
1037
1038%else ; GC_ARCH_BITS != 64
1039;;
1040; Executes INVLPGA
1041;
1042; @param pPageGC msc:ecx gcc:edi x86:[esp+04] Virtual page to invalidate
1043; @param uASID msc:edx gcc:esi x86:[esp+08] Tagged TLB id
1044;
1045;DECLASM(void) SVMR0InvlpgA(RTGCPTR pPageGC, uint32_t uASID);
1046BEGINPROC SVMR0InvlpgA
1047%ifdef RT_ARCH_AMD64
1048 %ifdef ASM_CALL64_GCC
1049 movzx rax, edi
1050 mov ecx, esi
1051 %else
1052 ; from http://www.cs.cmu.edu/~fp/courses/15213-s06/misc/asm64-handout.pdf:
1053 ; ``Perhaps unexpectedly, instructions that move or generate 32-bit register
1054 ; values also set the upper 32 bits of the register to zero. Consequently
1055 ; there is no need for an instruction movzlq.''
1056 mov eax, ecx
1057 mov ecx, edx
1058 %endif
1059%else
1060 mov eax, [esp + 4]
1061 mov ecx, [esp + 8]
1062%endif
1063 invlpga [xAX], ecx
1064 ret
1065ENDPROC SVMR0InvlpgA
1066
1067%endif ; GC_ARCH_BITS != 64
1068
1069%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1070
1071;/**
1072; * Gets 64-bit GDTR and IDTR on darwin.
1073; * @param pGdtr Where to store the 64-bit GDTR.
1074; * @param pIdtr Where to store the 64-bit IDTR.
1075; */
1076;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
1077ALIGNCODE(16)
1078BEGINPROC HMR0Get64bitGdtrAndIdtr
1079 db 0xea ; jmp far .sixtyfourbit_mode
1080 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1081.the_end:
1082 ret
1083
1084ALIGNCODE(16)
1085BITS 64
1086.sixtyfourbit_mode:
1087 and esp, 0ffffffffh
1088 mov ecx, [rsp + 4] ; pGdtr
1089 mov edx, [rsp + 8] ; pIdtr
1090 sgdt [rcx]
1091 sidt [rdx]
1092 jmp far [.fpret wrt rip]
1093.fpret: ; 16:32 Pointer to .the_end.
1094 dd .the_end, NAME(SUPR0AbsKernelCS)
1095BITS 32
1096ENDPROC HMR0Get64bitGdtrAndIdtr
1097
1098
1099;/**
1100; * Gets 64-bit CR3 on darwin.
1101; * @returns CR3
1102; */
1103;DECLASM(uint64_t) HMR0Get64bitCR3(void);
1104ALIGNCODE(16)
1105BEGINPROC HMR0Get64bitCR3
1106 db 0xea ; jmp far .sixtyfourbit_mode
1107 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
1108.the_end:
1109 ret
1110
1111ALIGNCODE(16)
1112BITS 64
1113.sixtyfourbit_mode:
1114 mov rax, cr3
1115 mov rdx, rax
1116 shr rdx, 32
1117 jmp far [.fpret wrt rip]
1118.fpret: ; 16:32 Pointer to .the_end.
1119 dd .the_end, NAME(SUPR0AbsKernelCS)
1120BITS 32
1121ENDPROC HMR0Get64bitCR3
1122
1123%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
1124
1125%ifdef VBOX_WITH_KERNEL_USING_XMM
1126
1127;;
1128; Wrapper around vmx.pfnStartVM that preserves host XMM registers and
1129; load the guest ones when necessary.
1130;
1131; @cproto DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM);
1132;
1133; @returns eax
1134;
1135; @param fResumeVM msc:rcx
1136; @param pCtx msc:rdx
1137; @param pVMCSCache msc:r8
1138; @param pVM msc:r9
1139; @param pVCpu msc:[rbp+30h]
1140; @param pfnStartVM msc:[rbp+38h]
1141;
1142; @remarks This is essentially the same code as HMR0SVMRunWrapXMM, only the parameters differ a little bit.
1143;
1144; ASSUMING 64-bit and windows for now.
1145ALIGNCODE(16)
1146BEGINPROC HMR0VMXStartVMWrapXMM
1147 push xBP
1148 mov xBP, xSP
1149 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1150
1151 ; spill input parameters.
1152 mov [xBP + 010h], rcx ; fResumeVM
1153 mov [xBP + 018h], rdx ; pCtx
1154 mov [xBP + 020h], r8 ; pVMCSCache
1155 mov [xBP + 028h], r9 ; pVM
1156
1157 ; Ask CPUM whether we've started using the FPU yet.
1158 mov rcx, [xBP + 30h] ; pVCpu
1159 call NAME(CPUMIsGuestFPUStateActive)
1160 test al, al
1161 jnz .guest_fpu_state_active
1162
1163 ; No need to mess with XMM registers just call the start routine and return.
1164 mov r11, [xBP + 38h] ; pfnStartVM
1165 mov r10, [xBP + 30h] ; pVCpu
1166 mov [xSP + 020h], r10
1167 mov rcx, [xBP + 010h] ; fResumeVM
1168 mov rdx, [xBP + 018h] ; pCtx
1169 mov r8, [xBP + 020h] ; pVMCSCache
1170 mov r9, [xBP + 028h] ; pVM
1171 call r11
1172
1173 leave
1174 ret
1175
1176ALIGNCODE(8)
1177.guest_fpu_state_active:
1178 ; Save the host XMM registers.
1179 movdqa [rsp + 040h + 000h], xmm6
1180 movdqa [rsp + 040h + 010h], xmm7
1181 movdqa [rsp + 040h + 020h], xmm8
1182 movdqa [rsp + 040h + 030h], xmm9
1183 movdqa [rsp + 040h + 040h], xmm10
1184 movdqa [rsp + 040h + 050h], xmm11
1185 movdqa [rsp + 040h + 060h], xmm12
1186 movdqa [rsp + 040h + 070h], xmm13
1187 movdqa [rsp + 040h + 080h], xmm14
1188 movdqa [rsp + 040h + 090h], xmm15
1189
1190 ; Load the full guest XMM register state.
1191 mov r10, [xBP + 018h] ; pCtx
1192 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1193 movdqa xmm0, [r10 + 000h]
1194 movdqa xmm1, [r10 + 010h]
1195 movdqa xmm2, [r10 + 020h]
1196 movdqa xmm3, [r10 + 030h]
1197 movdqa xmm4, [r10 + 040h]
1198 movdqa xmm5, [r10 + 050h]
1199 movdqa xmm6, [r10 + 060h]
1200 movdqa xmm7, [r10 + 070h]
1201 movdqa xmm8, [r10 + 080h]
1202 movdqa xmm9, [r10 + 090h]
1203 movdqa xmm10, [r10 + 0a0h]
1204 movdqa xmm11, [r10 + 0b0h]
1205 movdqa xmm12, [r10 + 0c0h]
1206 movdqa xmm13, [r10 + 0d0h]
1207 movdqa xmm14, [r10 + 0e0h]
1208 movdqa xmm15, [r10 + 0f0h]
1209
1210 ; Make the call (same as in the other case ).
1211 mov r11, [xBP + 38h] ; pfnStartVM
1212 mov r10, [xBP + 30h] ; pVCpu
1213 mov [xSP + 020h], r10
1214 mov rcx, [xBP + 010h] ; fResumeVM
1215 mov rdx, [xBP + 018h] ; pCtx
1216 mov r8, [xBP + 020h] ; pVMCSCache
1217 mov r9, [xBP + 028h] ; pVM
1218 call r11
1219
1220 ; Save the guest XMM registers.
1221 mov r10, [xBP + 018h] ; pCtx
1222 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1223 movdqa [r10 + 000h], xmm0
1224 movdqa [r10 + 010h], xmm1
1225 movdqa [r10 + 020h], xmm2
1226 movdqa [r10 + 030h], xmm3
1227 movdqa [r10 + 040h], xmm4
1228 movdqa [r10 + 050h], xmm5
1229 movdqa [r10 + 060h], xmm6
1230 movdqa [r10 + 070h], xmm7
1231 movdqa [r10 + 080h], xmm8
1232 movdqa [r10 + 090h], xmm9
1233 movdqa [r10 + 0a0h], xmm10
1234 movdqa [r10 + 0b0h], xmm11
1235 movdqa [r10 + 0c0h], xmm12
1236 movdqa [r10 + 0d0h], xmm13
1237 movdqa [r10 + 0e0h], xmm14
1238 movdqa [r10 + 0f0h], xmm15
1239
1240 ; Load the host XMM registers.
1241 movdqa xmm6, [rsp + 040h + 000h]
1242 movdqa xmm7, [rsp + 040h + 010h]
1243 movdqa xmm8, [rsp + 040h + 020h]
1244 movdqa xmm9, [rsp + 040h + 030h]
1245 movdqa xmm10, [rsp + 040h + 040h]
1246 movdqa xmm11, [rsp + 040h + 050h]
1247 movdqa xmm12, [rsp + 040h + 060h]
1248 movdqa xmm13, [rsp + 040h + 070h]
1249 movdqa xmm14, [rsp + 040h + 080h]
1250 movdqa xmm15, [rsp + 040h + 090h]
1251 leave
1252 ret
1253ENDPROC HMR0VMXStartVMWrapXMM
1254
1255;;
1256; Wrapper around svm.pfnVMRun that preserves host XMM registers and
1257; load the guest ones when necessary.
1258;
1259; @cproto DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);
1260;
1261; @returns eax
1262;
1263; @param pVMCBHostPhys msc:rcx
1264; @param pVMCBPhys msc:rdx
1265; @param pCtx msc:r8
1266; @param pVM msc:r9
1267; @param pVCpu msc:[rbp+30h]
1268; @param pfnVMRun msc:[rbp+38h]
1269;
1270; @remarks This is essentially the same code as HMR0VMXStartVMWrapXMM, only the parameters differ a little bit.
1271;
1272; ASSUMING 64-bit and windows for now.
1273ALIGNCODE(16)
1274BEGINPROC HMR0SVMRunWrapXMM
1275 push xBP
1276 mov xBP, xSP
1277 sub xSP, 0a0h + 040h ; Don't bother optimizing the frame size.
1278
1279 ; spill input parameters.
1280 mov [xBP + 010h], rcx ; pVMCBHostPhys
1281 mov [xBP + 018h], rdx ; pVMCBPhys
1282 mov [xBP + 020h], r8 ; pCtx
1283 mov [xBP + 028h], r9 ; pVM
1284
1285 ; Ask CPUM whether we've started using the FPU yet.
1286 mov rcx, [xBP + 30h] ; pVCpu
1287 call NAME(CPUMIsGuestFPUStateActive)
1288 test al, al
1289 jnz .guest_fpu_state_active
1290
1291 ; No need to mess with XMM registers just call the start routine and return.
1292 mov r11, [xBP + 38h] ; pfnVMRun
1293 mov r10, [xBP + 30h] ; pVCpu
1294 mov [xSP + 020h], r10
1295 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1296 mov rdx, [xBP + 018h] ; pVMCBPhys
1297 mov r8, [xBP + 020h] ; pCtx
1298 mov r9, [xBP + 028h] ; pVM
1299 call r11
1300
1301 leave
1302 ret
1303
1304ALIGNCODE(8)
1305.guest_fpu_state_active:
1306 ; Save the host XMM registers.
1307 movdqa [rsp + 040h + 000h], xmm6
1308 movdqa [rsp + 040h + 010h], xmm7
1309 movdqa [rsp + 040h + 020h], xmm8
1310 movdqa [rsp + 040h + 030h], xmm9
1311 movdqa [rsp + 040h + 040h], xmm10
1312 movdqa [rsp + 040h + 050h], xmm11
1313 movdqa [rsp + 040h + 060h], xmm12
1314 movdqa [rsp + 040h + 070h], xmm13
1315 movdqa [rsp + 040h + 080h], xmm14
1316 movdqa [rsp + 040h + 090h], xmm15
1317
1318 ; Load the full guest XMM register state.
1319 mov r10, [xBP + 020h] ; pCtx
1320 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1321 movdqa xmm0, [r10 + 000h]
1322 movdqa xmm1, [r10 + 010h]
1323 movdqa xmm2, [r10 + 020h]
1324 movdqa xmm3, [r10 + 030h]
1325 movdqa xmm4, [r10 + 040h]
1326 movdqa xmm5, [r10 + 050h]
1327 movdqa xmm6, [r10 + 060h]
1328 movdqa xmm7, [r10 + 070h]
1329 movdqa xmm8, [r10 + 080h]
1330 movdqa xmm9, [r10 + 090h]
1331 movdqa xmm10, [r10 + 0a0h]
1332 movdqa xmm11, [r10 + 0b0h]
1333 movdqa xmm12, [r10 + 0c0h]
1334 movdqa xmm13, [r10 + 0d0h]
1335 movdqa xmm14, [r10 + 0e0h]
1336 movdqa xmm15, [r10 + 0f0h]
1337
1338 ; Make the call (same as in the other case ).
1339 mov r11, [xBP + 38h] ; pfnVMRun
1340 mov r10, [xBP + 30h] ; pVCpu
1341 mov [xSP + 020h], r10
1342 mov rcx, [xBP + 010h] ; pVMCBHostPhys
1343 mov rdx, [xBP + 018h] ; pVMCBPhys
1344 mov r8, [xBP + 020h] ; pCtx
1345 mov r9, [xBP + 028h] ; pVM
1346 call r11
1347
1348 ; Save the guest XMM registers.
1349 mov r10, [xBP + 020h] ; pCtx
1350 lea r10, [r10 + XMM_OFF_IN_X86FXSTATE]
1351 movdqa [r10 + 000h], xmm0
1352 movdqa [r10 + 010h], xmm1
1353 movdqa [r10 + 020h], xmm2
1354 movdqa [r10 + 030h], xmm3
1355 movdqa [r10 + 040h], xmm4
1356 movdqa [r10 + 050h], xmm5
1357 movdqa [r10 + 060h], xmm6
1358 movdqa [r10 + 070h], xmm7
1359 movdqa [r10 + 080h], xmm8
1360 movdqa [r10 + 090h], xmm9
1361 movdqa [r10 + 0a0h], xmm10
1362 movdqa [r10 + 0b0h], xmm11
1363 movdqa [r10 + 0c0h], xmm12
1364 movdqa [r10 + 0d0h], xmm13
1365 movdqa [r10 + 0e0h], xmm14
1366 movdqa [r10 + 0f0h], xmm15
1367
1368 ; Load the host XMM registers.
1369 movdqa xmm6, [rsp + 040h + 000h]
1370 movdqa xmm7, [rsp + 040h + 010h]
1371 movdqa xmm8, [rsp + 040h + 020h]
1372 movdqa xmm9, [rsp + 040h + 030h]
1373 movdqa xmm10, [rsp + 040h + 040h]
1374 movdqa xmm11, [rsp + 040h + 050h]
1375 movdqa xmm12, [rsp + 040h + 060h]
1376 movdqa xmm13, [rsp + 040h + 070h]
1377 movdqa xmm14, [rsp + 040h + 080h]
1378 movdqa xmm15, [rsp + 040h + 090h]
1379 leave
1380 ret
1381ENDPROC HMR0SVMRunWrapXMM
1382
1383%endif ; VBOX_WITH_KERNEL_USING_XMM
1384
1385;
1386; The default setup of the StartVM routines.
1387;
1388%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1389 %define MY_NAME(name) name %+ _32
1390%else
1391 %define MY_NAME(name) name
1392%endif
1393%ifdef RT_ARCH_AMD64
1394 %define MYPUSHAD MYPUSHAD64
1395 %define MYPOPAD MYPOPAD64
1396 %define MYPUSHSEGS MYPUSHSEGS64
1397 %define MYPOPSEGS MYPOPSEGS64
1398%else
1399 %define MYPUSHAD MYPUSHAD32
1400 %define MYPOPAD MYPOPAD32
1401 %define MYPUSHSEGS MYPUSHSEGS32
1402 %define MYPOPSEGS MYPOPSEGS32
1403%endif
1404
1405%include "HMR0Mixed.mac"
1406
1407
1408%ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1409 ;
1410 ; Write the wrapper procedures.
1411 ;
1412 ; These routines are probably being too paranoid about selector
1413 ; restoring, but better safe than sorry...
1414 ;
1415
1416; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1417ALIGNCODE(16)
1418BEGINPROC VMXR0StartVM32
1419 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1420 je near NAME(VMXR0StartVM32_32)
1421
1422 ; stack frame
1423 push esi
1424 push edi
1425 push fs
1426 push gs
1427
1428 ; jmp far .thunk64
1429 db 0xea
1430 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1431
1432ALIGNCODE(16)
1433BITS 64
1434.thunk64:
1435 sub esp, 20h
1436 mov edi, [rsp + 20h + 14h] ; fResume
1437 mov esi, [rsp + 20h + 18h] ; pCtx
1438 mov edx, [rsp + 20h + 1Ch] ; pCache
1439 call NAME(VMXR0StartVM32_64)
1440 add esp, 20h
1441 jmp far [.fpthunk32 wrt rip]
1442.fpthunk32: ; 16:32 Pointer to .thunk32.
1443 dd .thunk32, NAME(SUPR0AbsKernelCS)
1444
1445BITS 32
1446ALIGNCODE(16)
1447.thunk32:
1448 pop gs
1449 pop fs
1450 pop edi
1451 pop esi
1452 ret
1453ENDPROC VMXR0StartVM32
1454
1455
1456; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache /*, PVM pVM, PVMCPU pVCpu*/);
1457ALIGNCODE(16)
1458BEGINPROC VMXR0StartVM64
1459 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1460 je .not_in_long_mode
1461
1462 ; stack frame
1463 push esi
1464 push edi
1465 push fs
1466 push gs
1467
1468 ; jmp far .thunk64
1469 db 0xea
1470 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1471
1472ALIGNCODE(16)
1473BITS 64
1474.thunk64:
1475 sub esp, 20h
1476 mov edi, [rsp + 20h + 14h] ; fResume
1477 mov esi, [rsp + 20h + 18h] ; pCtx
1478 mov edx, [rsp + 20h + 1Ch] ; pCache
1479 call NAME(VMXR0StartVM64_64)
1480 add esp, 20h
1481 jmp far [.fpthunk32 wrt rip]
1482.fpthunk32: ; 16:32 Pointer to .thunk32.
1483 dd .thunk32, NAME(SUPR0AbsKernelCS)
1484
1485BITS 32
1486ALIGNCODE(16)
1487.thunk32:
1488 pop gs
1489 pop fs
1490 pop edi
1491 pop esi
1492 ret
1493
1494.not_in_long_mode:
1495 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1496 ret
1497ENDPROC VMXR0StartVM64
1498
1499;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1500ALIGNCODE(16)
1501BEGINPROC SVMR0VMRun
1502 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1503 je near NAME(SVMR0VMRun_32)
1504
1505 ; stack frame
1506 push esi
1507 push edi
1508 push fs
1509 push gs
1510
1511 ; jmp far .thunk64
1512 db 0xea
1513 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1514
1515ALIGNCODE(16)
1516BITS 64
1517.thunk64:
1518 sub esp, 20h
1519 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys
1520 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys
1521 mov edx, [rsp + 20h + 24h] ; pCtx
1522 call NAME(SVMR0VMRun_64)
1523 add esp, 20h
1524 jmp far [.fpthunk32 wrt rip]
1525.fpthunk32: ; 16:32 Pointer to .thunk32.
1526 dd .thunk32, NAME(SUPR0AbsKernelCS)
1527
1528BITS 32
1529ALIGNCODE(16)
1530.thunk32:
1531 pop gs
1532 pop fs
1533 pop edi
1534 pop esi
1535 ret
1536ENDPROC SVMR0VMRun
1537
1538
1539; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx /*, PVM pVM, PVMCPU pVCpu*/);
1540ALIGNCODE(16)
1541BEGINPROC SVMR0VMRun64
1542 cmp byte [NAME(g_fVMXIs64bitHost)], 0
1543 je .not_in_long_mode
1544
1545 ; stack frame
1546 push esi
1547 push edi
1548 push fs
1549 push gs
1550
1551 ; jmp far .thunk64
1552 db 0xea
1553 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)
1554
1555ALIGNCODE(16)
1556BITS 64
1557.thunk64:
1558 sub esp, 20h
1559 mov rdi, [rbp + 20h + 14h] ; pVMCBHostPhys
1560 mov rsi, [rbp + 20h + 1Ch] ; pVMCBPhys
1561 mov edx, [rbp + 20h + 24h] ; pCtx
1562 call NAME(SVMR0VMRun64_64)
1563 add esp, 20h
1564 jmp far [.fpthunk32 wrt rip]
1565.fpthunk32: ; 16:32 Pointer to .thunk32.
1566 dd .thunk32, NAME(SUPR0AbsKernelCS)
1567
1568BITS 32
1569ALIGNCODE(16)
1570.thunk32:
1571 pop gs
1572 pop fs
1573 pop edi
1574 pop esi
1575 ret
1576
1577.not_in_long_mode:
1578 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
1579 ret
1580ENDPROC SVMR0VMRun64
1581
1582 ;
1583 ; Do it a second time pretending we're a 64-bit host.
1584 ;
1585 ; This *HAS* to be done at the very end of the file to avoid restoring
1586 ; macros. So, add new code *BEFORE* this mess.
1587 ;
1588 BITS 64
1589 %undef RT_ARCH_X86
1590 %define RT_ARCH_AMD64
1591 %undef ASM_CALL64_MSC
1592 %define ASM_CALL64_GCC
1593 %define xCB 8
1594 %define xSP rsp
1595 %define xBP rbp
1596 %define xAX rax
1597 %define xBX rbx
1598 %define xCX rcx
1599 %define xDX rdx
1600 %define xDI rdi
1601 %define xSI rsi
1602 %define MY_NAME(name) name %+ _64
1603 %define MYPUSHAD MYPUSHAD64
1604 %define MYPOPAD MYPOPAD64
1605 %define MYPUSHSEGS MYPUSHSEGS64
1606 %define MYPOPSEGS MYPOPSEGS64
1607
1608 %include "HMR0Mixed.mac"
1609%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette