VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm@ 22040

最後變更 在這個檔案從22040是 22040,由 vboxsync 提交於 16 年 前

VT-x: use MSR bitmaps and automatic load/store (risky change).

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 15.5 KB
 
1; $Id: HWACCMGCA.asm 22040 2009-08-06 16:33:21Z vboxsync $
2;; @file
3; VMXM - GC vmx helpers
4;
5
6;
7; Copyright (C) 2006-2007 Sun Microsystems, Inc.
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.alldomusa.eu.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18; Clara, CA 95054 USA or visit http://www.sun.com if you need
19; additional information or have any questions.
20;
21
22;*******************************************************************************
23;* Header Files *
24;*******************************************************************************
25%undef RT_ARCH_X86
26%define RT_ARCH_AMD64
27%include "VBox/asmdefs.mac"
28%include "VBox/err.mac"
29%include "VBox/hwacc_vmx.mac"
30%include "VBox/cpum.mac"
31%include "VBox/x86.mac"
32%include "../HWACCMInternal.mac"
33
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; @def MYPUSHSEGS
51; Macro saving all segment registers on the stack.
52; @param 1 full width register name
53
54;; @def MYPOPSEGS
55; Macro restoring all segment registers on the stack
56; @param 1 full width register name
57
58 ; Load the corresponding guest MSR (trashes rdx & rcx)
59 %macro LOADGUESTMSR 2
60 mov rcx, %1
61 mov edx, dword [rsi + %2 + 4]
62 mov eax, dword [rsi + %2]
63 wrmsr
64 %endmacro
65
66 ; Save a guest MSR (trashes rdx & rcx)
67 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
68 %macro SAVEGUESTMSR 2
69 mov rcx, %1
70 rdmsr
71 mov dword [rsi + %2], eax
72 mov dword [rsi + %2 + 4], edx
73 %endmacro
74
75 %macro MYPUSHSEGS 1
76 mov %1, es
77 push %1
78 mov %1, ds
79 push %1
80 %endmacro
81
82 %macro MYPOPSEGS 1
83 pop %1
84 mov ds, %1
85 pop %1
86 mov es, %1
87 %endmacro
88
89BEGINCODE
90BITS 64
91
92
93;/**
94; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
95; *
96; * @returns VBox status code
97; * @param pPageCpuPhys VMXON physical address [rsp+8]
98; * @param pVMCSPhys VMCS physical address [rsp+16]
99; * @param pCache VMCS cache [rsp+24]
100; * @param pCtx Guest context (rsi)
101; */
102BEGINPROC VMXGCStartVM64
103 push rbp
104 mov rbp, rsp
105
106 ; Make sure VT-x instructions are allowed
107 mov rax, cr4
108 or rax, X86_CR4_VMXE
109 mov cr4, rax
110
111 ;/* Enter VMX Root Mode */
112 vmxon [rbp + 8 + 8]
113 jnc .vmxon_success
114 mov rax, VERR_VMX_INVALID_VMXON_PTR
115 jmp .vmstart64_vmxon_failed
116
117.vmxon_success:
118 jnz .vmxon_success2
119 mov rax, VERR_VMX_GENERIC
120 jmp .vmstart64_vmxon_failed
121
122.vmxon_success2:
123 ; Activate the VMCS pointer
124 vmptrld [rbp + 16 + 8]
125 jnc .vmptrld_success
126 mov rax, VERR_VMX_INVALID_VMCS_PTR
127 jmp .vmstart64_vmxoff_end
128
129.vmptrld_success:
130 jnz .vmptrld_success2
131 mov rax, VERR_VMX_GENERIC
132 jmp .vmstart64_vmxoff_end
133
134.vmptrld_success2:
135
136 ; Save the VMCS pointer on the stack
137 push qword [rbp + 16 + 8];
138
139 ;/* Save segment registers */
140 MYPUSHSEGS rax
141
142%ifdef VMX_USE_CACHED_VMCS_ACCESSES
143 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
144 mov rbx, [rbp + 24 + 8] ; pCache
145
146%ifdef VBOX_WITH_CRASHDUMP_MAGIC
147 mov qword [rbx + VMCSCACHE.uPos], 2
148%endif
149
150%ifdef DEBUG
151 mov rax, [rbp + 8 + 8] ; pPageCpuPhys
152 mov [rbx + VMCSCACHE.TestIn.pPageCpuPhys], rax
153 mov rax, [rbp + 16 + 8] ; pVMCSPhys
154 mov [rbx + VMCSCACHE.TestIn.pVMCSPhys], rax
155 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
156 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
157%endif
158
159 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
160 cmp ecx, 0
161 je .no_cached_writes
162 mov rdx, rcx
163 mov rcx, 0
164 jmp .cached_write
165
166ALIGN(16)
167.cached_write:
168 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
169 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
170 inc rcx
171 cmp rcx, rdx
172 jl .cached_write
173
174 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
175.no_cached_writes:
176
177%ifdef VBOX_WITH_CRASHDUMP_MAGIC
178 mov qword [rbx + VMCSCACHE.uPos], 3
179%endif
180 ; Save the pCache pointer
181 push xBX
182%endif
183
184 ; Save the host state that's relevant in the temporary 64 bits mode
185 mov rdx, cr0
186 mov eax, VMX_VMCS_HOST_CR0
187 vmwrite rax, rdx
188
189 mov rdx, cr3
190 mov eax, VMX_VMCS_HOST_CR3
191 vmwrite rax, rdx
192
193 mov rdx, cr4
194 mov eax, VMX_VMCS_HOST_CR4
195 vmwrite rax, rdx
196
197 mov rdx, cs
198 mov eax, VMX_VMCS_HOST_FIELD_CS
199 vmwrite rax, rdx
200
201 mov rdx, ss
202 mov eax, VMX_VMCS_HOST_FIELD_SS
203 vmwrite rax, rdx
204
205 sub rsp, 8*2
206 sgdt [rsp]
207 mov eax, VMX_VMCS_HOST_GDTR_BASE
208 vmwrite rax, [rsp+2]
209 add rsp, 8*2
210
211%ifdef VBOX_WITH_CRASHDUMP_MAGIC
212 mov qword [rbx + VMCSCACHE.uPos], 4
213%endif
214
215 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
216
217 ;/* First we have to save some final CPU context registers. */
218 lea rdx, [.vmlaunch64_done wrt rip]
219 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
220 vmwrite rax, rdx
221 ;/* Note: assumes success... */
222
223 ;/* Manual save and restore:
224 ; * - General purpose registers except RIP, RSP
225 ; *
226 ; * Trashed:
227 ; * - CR2 (we don't care)
228 ; * - LDTR (reset to 0)
229 ; * - DRx (presumably not changed at all)
230 ; * - DR7 (reset to 0x400)
231 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
232 ; *
233 ; */
234%ifdef VBOX_WITH_CRASHDUMP_MAGIC
235 mov qword [rbx + VMCSCACHE.uPos], 5
236%endif
237
238 ; Save the pCtx pointer
239 push rsi
240
241 ; Restore CR2
242 mov rbx, qword [rsi + CPUMCTX.cr2]
243 mov cr2, rbx
244
245 mov eax, VMX_VMCS_HOST_RSP
246 vmwrite rax, rsp
247 ;/* Note: assumes success... */
248 ;/* Don't mess with ESP anymore!! */
249
250 ;/* Restore Guest's general purpose registers. */
251 mov rax, qword [rsi + CPUMCTX.eax]
252 mov rbx, qword [rsi + CPUMCTX.ebx]
253 mov rcx, qword [rsi + CPUMCTX.ecx]
254 mov rdx, qword [rsi + CPUMCTX.edx]
255 mov rbp, qword [rsi + CPUMCTX.ebp]
256 mov r8, qword [rsi + CPUMCTX.r8]
257 mov r9, qword [rsi + CPUMCTX.r9]
258 mov r10, qword [rsi + CPUMCTX.r10]
259 mov r11, qword [rsi + CPUMCTX.r11]
260 mov r12, qword [rsi + CPUMCTX.r12]
261 mov r13, qword [rsi + CPUMCTX.r13]
262 mov r14, qword [rsi + CPUMCTX.r14]
263 mov r15, qword [rsi + CPUMCTX.r15]
264
265 ;/* Restore rdi & rsi. */
266 mov rdi, qword [rsi + CPUMCTX.edi]
267 mov rsi, qword [rsi + CPUMCTX.esi]
268
269 vmlaunch
270 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
271
272ALIGNCODE(16)
273.vmlaunch64_done:
274 jc near .vmstart64_invalid_vmxon_ptr
275 jz near .vmstart64_start_failed
276
277 push rdi
278 mov rdi, [rsp + 8] ; pCtx
279
280 mov qword [rdi + CPUMCTX.eax], rax
281 mov qword [rdi + CPUMCTX.ebx], rbx
282 mov qword [rdi + CPUMCTX.ecx], rcx
283 mov qword [rdi + CPUMCTX.edx], rdx
284 mov qword [rdi + CPUMCTX.esi], rsi
285 mov qword [rdi + CPUMCTX.ebp], rbp
286 mov qword [rdi + CPUMCTX.r8], r8
287 mov qword [rdi + CPUMCTX.r9], r9
288 mov qword [rdi + CPUMCTX.r10], r10
289 mov qword [rdi + CPUMCTX.r11], r11
290 mov qword [rdi + CPUMCTX.r12], r12
291 mov qword [rdi + CPUMCTX.r13], r13
292 mov qword [rdi + CPUMCTX.r14], r14
293 mov qword [rdi + CPUMCTX.r15], r15
294
295 pop rax ; the guest edi we pushed above
296 mov qword [rdi + CPUMCTX.edi], rax
297
298 pop rsi ; pCtx (needed in rsi by the macros below)
299
300%ifdef VMX_USE_CACHED_VMCS_ACCESSES
301 pop rdi ; saved pCache
302
303%ifdef VBOX_WITH_CRASHDUMP_MAGIC
304 mov dword [rdi + VMCSCACHE.uPos], 7
305%endif
306%ifdef DEBUG
307 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
308 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
309 mov rax, cr8
310 mov [rdi + VMCSCACHE.TestOut.cr8], rax
311%endif
312
313 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
314 cmp ecx, 0 ; can't happen
315 je .no_cached_reads
316 jmp .cached_read
317
318ALIGN(16)
319.cached_read:
320 dec rcx
321 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
322 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
323 cmp rcx, 0
324 jnz .cached_read
325.no_cached_reads:
326
327 ; Save CR2 for EPT
328 mov rax, cr2
329 mov [rdi + VMCSCACHE.cr2], rax
330%ifdef VBOX_WITH_CRASHDUMP_MAGIC
331 mov dword [rdi + VMCSCACHE.uPos], 8
332%endif
333%endif
334
335 ; Restore segment registers
336 MYPOPSEGS rax
337
338 mov eax, VINF_SUCCESS
339
340%ifdef VBOX_WITH_CRASHDUMP_MAGIC
341 mov dword [rdi + VMCSCACHE.uPos], 9
342%endif
343.vmstart64_end:
344
345%ifdef VMX_USE_CACHED_VMCS_ACCESSES
346%ifdef DEBUG
347 mov rdx, [rsp] ; pVMCSPhys
348 mov [rdi + VMCSCACHE.TestOut.pVMCSPhys], rdx
349%endif
350%endif
351
352 ; Write back the data and disable the VMCS
353 vmclear qword [rsp] ;Pushed pVMCS
354 add rsp, 8
355
356.vmstart64_vmxoff_end:
357 ; Disable VMX root mode
358 vmxoff
359.vmstart64_vmxon_failed:
360%ifdef VMX_USE_CACHED_VMCS_ACCESSES
361%ifdef DEBUG
362 cmp eax, VINF_SUCCESS
363 jne .skip_flags_save
364
365 pushf
366 pop rdx
367 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
368%ifdef VBOX_WITH_CRASHDUMP_MAGIC
369 mov dword [rdi + VMCSCACHE.uPos], 12
370%endif
371.skip_flags_save:
372%endif
373%endif
374 pop rbp
375 ret
376
377
378.vmstart64_invalid_vmxon_ptr:
379 pop rsi ; pCtx (needed in rsi by the macros below)
380
381%ifdef VMX_USE_CACHED_VMCS_ACCESSES
382 pop rdi ; pCache
383%ifdef VBOX_WITH_CRASHDUMP_MAGIC
384 mov dword [rdi + VMCSCACHE.uPos], 10
385%endif
386
387%ifdef DEBUG
388 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
389 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
390%endif
391
392%endif
393
394 ; Restore segment registers
395 MYPOPSEGS rax
396
397 ; Restore all general purpose host registers.
398 mov eax, VERR_VMX_INVALID_VMXON_PTR
399 jmp .vmstart64_end
400
401.vmstart64_start_failed:
402 pop rsi ; pCtx (needed in rsi by the macros below)
403
404%ifdef VMX_USE_CACHED_VMCS_ACCESSES
405 pop rdi ; pCache
406
407%ifdef DEBUG
408 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
409 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
410%endif
411%ifdef VBOX_WITH_CRASHDUMP_MAGIC
412 mov dword [rdi + VMCSCACHE.uPos], 11
413%endif
414
415%endif
416
417 ; Restore segment registers
418 MYPOPSEGS rax
419
420 ; Restore all general purpose host registers.
421 mov eax, VERR_VMX_UNABLE_TO_START_VM
422 jmp .vmstart64_end
423ENDPROC VMXGCStartVM64
424
425
426;/**
427; * Prepares for and executes VMRUN (64 bits guests)
428; *
429; * @returns VBox status code
430; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
431; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
432; * @param pCtx Guest context (rsi)
433; */
434BEGINPROC SVMGCVMRun64
435 push rbp
436 mov rbp, rsp
437 pushf
438
439 ;/* Manual save and restore:
440 ; * - General purpose registers except RIP, RSP, RAX
441 ; *
442 ; * Trashed:
443 ; * - CR2 (we don't care)
444 ; * - LDTR (reset to 0)
445 ; * - DRx (presumably not changed at all)
446 ; * - DR7 (reset to 0x400)
447 ; */
448
449 ;/* Save the Guest CPU context pointer. */
450 push rsi ; push for saving the state at the end
451
452 ; save host fs, gs, sysenter msr etc
453 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
454 push rax ; save for the vmload after vmrun
455 vmsave
456
457 ; setup eax for VMLOAD
458 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
459
460 ;/* Restore Guest's general purpose registers. */
461 ;/* RAX is loaded from the VMCB by VMRUN */
462 mov rbx, qword [rsi + CPUMCTX.ebx]
463 mov rcx, qword [rsi + CPUMCTX.ecx]
464 mov rdx, qword [rsi + CPUMCTX.edx]
465 mov rdi, qword [rsi + CPUMCTX.edi]
466 mov rbp, qword [rsi + CPUMCTX.ebp]
467 mov r8, qword [rsi + CPUMCTX.r8]
468 mov r9, qword [rsi + CPUMCTX.r9]
469 mov r10, qword [rsi + CPUMCTX.r10]
470 mov r11, qword [rsi + CPUMCTX.r11]
471 mov r12, qword [rsi + CPUMCTX.r12]
472 mov r13, qword [rsi + CPUMCTX.r13]
473 mov r14, qword [rsi + CPUMCTX.r14]
474 mov r15, qword [rsi + CPUMCTX.r15]
475 mov rsi, qword [rsi + CPUMCTX.esi]
476
477 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
478 clgi
479 sti
480
481 ; load guest fs, gs, sysenter msr etc
482 vmload
483 ; run the VM
484 vmrun
485
486 ;/* RAX is in the VMCB already; we can use it here. */
487
488 ; save guest fs, gs, sysenter msr etc
489 vmsave
490
491 ; load host fs, gs, sysenter msr etc
492 pop rax ; pushed above
493 vmload
494
495 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
496 cli
497 stgi
498
499 pop rax ; pCtx
500
501 mov qword [rax + CPUMCTX.ebx], rbx
502 mov qword [rax + CPUMCTX.ecx], rcx
503 mov qword [rax + CPUMCTX.edx], rdx
504 mov qword [rax + CPUMCTX.esi], rsi
505 mov qword [rax + CPUMCTX.edi], rdi
506 mov qword [rax + CPUMCTX.ebp], rbp
507 mov qword [rax + CPUMCTX.r8], r8
508 mov qword [rax + CPUMCTX.r9], r9
509 mov qword [rax + CPUMCTX.r10], r10
510 mov qword [rax + CPUMCTX.r11], r11
511 mov qword [rax + CPUMCTX.r12], r12
512 mov qword [rax + CPUMCTX.r13], r13
513 mov qword [rax + CPUMCTX.r14], r14
514 mov qword [rax + CPUMCTX.r15], r15
515
516 mov eax, VINF_SUCCESS
517
518 popf
519 pop rbp
520 ret
521ENDPROC SVMGCVMRun64
522
523;/**
524; * Saves the guest FPU context
525; *
526; * @returns VBox status code
527; * @param pCtx Guest context [rsi]
528; */
529BEGINPROC HWACCMSaveGuestFPU64
530 mov rax, cr0
531 mov rcx, rax ; save old CR0
532 and rax, ~(X86_CR0_TS | X86_CR0_EM)
533 mov cr0, rax
534
535 fxsave [rsi + CPUMCTX.fpu]
536
537 mov cr0, rcx ; and restore old CR0 again
538
539 mov eax, VINF_SUCCESS
540 ret
541ENDPROC HWACCMSaveGuestFPU64
542
543;/**
544; * Saves the guest debug context (DR0-3, DR6)
545; *
546; * @returns VBox status code
547; * @param pCtx Guest context [rsi]
548; */
549BEGINPROC HWACCMSaveGuestDebug64
550 mov rax, dr0
551 mov qword [rsi + CPUMCTX.dr + 0*8], rax
552 mov rax, dr1
553 mov qword [rsi + CPUMCTX.dr + 1*8], rax
554 mov rax, dr2
555 mov qword [rsi + CPUMCTX.dr + 2*8], rax
556 mov rax, dr3
557 mov qword [rsi + CPUMCTX.dr + 3*8], rax
558 mov rax, dr6
559 mov qword [rsi + CPUMCTX.dr + 6*8], rax
560 mov eax, VINF_SUCCESS
561 ret
562ENDPROC HWACCMSaveGuestDebug64
563
564;/**
565; * Dummy callback handler
566; *
567; * @returns VBox status code
568; * @param param1 Parameter 1 [rsp+8]
569; * @param param2 Parameter 2 [rsp+12]
570; * @param param3 Parameter 3 [rsp+16]
571; * @param param4 Parameter 4 [rsp+20]
572; * @param param5 Parameter 5 [rsp+24]
573; * @param pCtx Guest context [rsi]
574; */
575BEGINPROC HWACCMTestSwitcher64
576 mov eax, [rsp+8]
577 ret
578ENDPROC HWACCMTestSwitcher64
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette