VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 45875

最後變更 在這個檔案從45875是 45875,由 vboxsync 提交於 12 年 前

VMM/VMMR0: Distinguish better between invalid VMXON, VMCS pointers and VMCS pointers passed to VMLAUNCH/VMRESUME.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 36.5 KB
 
1; $Id: LegacyandAMD64.mac 45875 2013-05-02 12:52:33Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.alldomusa.eu.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;; Stubs for making OS/2 compile (though, not work).
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; Debug options
51;%define DEBUG_STUFF 1
52;%define STRICT_IF 1
53
54
55;*******************************************************************************
56;* Header Files *
57;*******************************************************************************
58%include "VBox/asmdefs.mac"
59%include "iprt/x86.mac"
60%include "VBox/err.mac"
61%include "VBox/apic.mac"
62
63%include "VBox/vmm/cpum.mac"
64%include "VBox/vmm/stam.mac"
65%include "VBox/vmm/vm.mac"
66%include "VBox/vmm/hm_vmx.mac"
67%include "CPUMInternal.mac"
68%include "HMInternal.mac"
69%include "VMMSwitcher.mac"
70
71
72;
73; Start the fixup records
74; We collect the fixups in the .data section as we go along
75; It is therefore VITAL that no-one is using the .data section
76; for anything else between 'Start' and 'End'.
77;
78BEGINDATA
79GLOBALNAME Fixups
80
81
82
83BEGINCODE
84GLOBALNAME Start
85
86BITS 32
87
88;;
89; The C interface.
90; @param [esp + 04h] Param 1 - VM handle
91; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
92; structure for the calling EMT.
93;
94BEGINPROC vmmR0ToRawMode
95%ifdef DEBUG_STUFF
96 COM32_S_NEWLINE
97 COM32_S_CHAR '^'
98%endif
99
100%ifdef VBOX_WITH_STATISTICS
101 ;
102 ; Switcher stats.
103 ;
104 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
105 mov edx, 0ffffffffh
106 STAM_PROFILE_ADV_START edx
107%endif
108
109 push ebp
110 mov ebp, [esp + 12] ; CPUMCPU offset
111
112 ; turn off interrupts
113 pushf
114 cli
115
116 ;
117 ; Call worker.
118 ;
119 FIXUP FIX_HC_CPUM_OFF, 1, 0
120 mov edx, 0ffffffffh
121 push cs ; allow for far return and restore cs correctly.
122 call NAME(vmmR0ToRawModeAsm)
123
124%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
125 CPUM_FROM_CPUMCPU(edx)
126 ; Restore blocked Local APIC NMI vectors
127 mov ecx, [edx + CPUM.fApicDisVectors]
128 mov edx, [edx + CPUM.pvApicBase]
129 shr ecx, 1
130 jnc gth_nolint0
131 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
132gth_nolint0:
133 shr ecx, 1
134 jnc gth_nolint1
135 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
136gth_nolint1:
137 shr ecx, 1
138 jnc gth_nopc
139 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
140gth_nopc:
141 shr ecx, 1
142 jnc gth_notherm
143 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
144gth_notherm:
145%endif
146
147 ; restore original flags
148 popf
149 pop ebp
150
151%ifdef VBOX_WITH_STATISTICS
152 ;
153 ; Switcher stats.
154 ;
155 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
156 mov edx, 0ffffffffh
157 STAM_PROFILE_ADV_STOP edx
158%endif
159
160 ret
161
162ENDPROC vmmR0ToRawMode
163
164; *****************************************************************************
165; vmmR0ToRawModeAsm
166;
167; Phase one of the switch from host to guest context (host MMU context)
168;
169; INPUT:
170; - edx virtual address of CPUM structure (valid in host context)
171; - ebp offset of the CPUMCPU structure relative to CPUM.
172;
173; USES/DESTROYS:
174; - eax, ecx, edx, esi
175;
176; ASSUMPTION:
177; - current CS and DS selectors are wide open
178;
179; *****************************************************************************
180ALIGNCODE(16)
181BEGINPROC vmmR0ToRawModeAsm
182 ;;
183 ;; Save CPU host context
184 ;; Skip eax, edx and ecx as these are not preserved over calls.
185 ;;
186 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
187%ifdef VBOX_WITH_CRASHDUMP_MAGIC
188 ; phys address of scratch page
189 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
190 mov cr2, eax
191
192 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
193%endif
194
195 ; general registers.
196 mov [edx + CPUMCPU.Host.ebx], ebx
197 mov [edx + CPUMCPU.Host.edi], edi
198 mov [edx + CPUMCPU.Host.esi], esi
199 mov [edx + CPUMCPU.Host.esp], esp
200 mov [edx + CPUMCPU.Host.ebp], ebp
201 ; selectors.
202 mov [edx + CPUMCPU.Host.ds], ds
203 mov [edx + CPUMCPU.Host.es], es
204 mov [edx + CPUMCPU.Host.fs], fs
205 mov [edx + CPUMCPU.Host.gs], gs
206 mov [edx + CPUMCPU.Host.ss], ss
207 ; special registers.
208 DEBUG32_S_CHAR('s')
209 DEBUG32_S_CHAR(';')
210 sldt [edx + CPUMCPU.Host.ldtr]
211 sidt [edx + CPUMCPU.Host.idtr]
212 sgdt [edx + CPUMCPU.Host.gdtr]
213 str [edx + CPUMCPU.Host.tr]
214
215%ifdef VBOX_WITH_CRASHDUMP_MAGIC
216 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
217%endif
218
219%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
220 DEBUG32_S_CHAR('f')
221 DEBUG32_S_CHAR(';')
222 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
223 mov ebx, [edx + CPUM.pvApicBase]
224 or ebx, ebx
225 jz htg_noapic
226 mov eax, [ebx + APIC_REG_LVT_LINT0]
227 mov ecx, eax
228 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
229 cmp ecx, APIC_REG_LVT_MODE_NMI
230 jne htg_nolint0
231 or edi, 0x01
232 or eax, APIC_REG_LVT_MASKED
233 mov [ebx + APIC_REG_LVT_LINT0], eax
234 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
235htg_nolint0:
236 mov eax, [ebx + APIC_REG_LVT_LINT1]
237 mov ecx, eax
238 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
239 cmp ecx, APIC_REG_LVT_MODE_NMI
240 jne htg_nolint1
241 or edi, 0x02
242 or eax, APIC_REG_LVT_MASKED
243 mov [ebx + APIC_REG_LVT_LINT1], eax
244 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
245htg_nolint1:
246 mov eax, [ebx + APIC_REG_LVT_PC]
247 mov ecx, eax
248 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
249 cmp ecx, APIC_REG_LVT_MODE_NMI
250 jne htg_nopc
251 or edi, 0x04
252 or eax, APIC_REG_LVT_MASKED
253 mov [ebx + APIC_REG_LVT_PC], eax
254 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
255htg_nopc:
256 mov eax, [ebx + APIC_REG_VERSION]
257 shr eax, 16
258 cmp al, 5
259 jb htg_notherm
260 mov eax, [ebx + APIC_REG_LVT_THMR]
261 mov ecx, eax
262 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
263 cmp ecx, APIC_REG_LVT_MODE_NMI
264 jne htg_notherm
265 or edi, 0x08
266 or eax, APIC_REG_LVT_MASKED
267 mov [ebx + APIC_REG_LVT_THMR], eax
268 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
269htg_notherm:
270 mov [edx + CPUM.fApicDisVectors], edi
271htg_noapic:
272 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
273%endif
274
275 ; control registers.
276 mov eax, cr0
277 mov [edx + CPUMCPU.Host.cr0], eax
278 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
279 mov eax, cr3
280 mov [edx + CPUMCPU.Host.cr3], eax
281 mov eax, cr4
282 mov [edx + CPUMCPU.Host.cr4], eax
283 DEBUG32_S_CHAR('c')
284 DEBUG32_S_CHAR(';')
285
286 ; save the host EFER msr
287 mov ebx, edx
288 mov ecx, MSR_K6_EFER
289 rdmsr
290 mov [ebx + CPUMCPU.Host.efer], eax
291 mov [ebx + CPUMCPU.Host.efer + 4], edx
292 mov edx, ebx
293 DEBUG32_S_CHAR('e')
294 DEBUG32_S_CHAR(';')
295
296%ifdef VBOX_WITH_CRASHDUMP_MAGIC
297 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
298%endif
299
300 ; Load new gdt so we can do a far jump after going into 64 bits mode
301 lgdt [edx + CPUMCPU.Hyper.gdtr]
302
303 DEBUG32_S_CHAR('g')
304 DEBUG32_S_CHAR('!')
305%ifdef VBOX_WITH_CRASHDUMP_MAGIC
306 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
307%endif
308
309 ;;
310 ;; Load Intermediate memory context.
311 ;;
312 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
313 mov eax, 0ffffffffh
314 mov cr3, eax
315 DEBUG32_CHAR('?')
316
317 ;;
318 ;; Jump to identity mapped location
319 ;;
320 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
321 jmp near NAME(IDEnterTarget)
322
323
324 ; We're now on identity mapped pages!
325ALIGNCODE(16)
326GLOBALNAME IDEnterTarget
327 DEBUG32_CHAR('1')
328
329 ; 1. Disable paging.
330 mov ebx, cr0
331 and ebx, ~X86_CR0_PG
332 mov cr0, ebx
333 DEBUG32_CHAR('2')
334
335%ifdef VBOX_WITH_CRASHDUMP_MAGIC
336 mov eax, cr2
337 mov dword [eax], 3
338%endif
339
340 ; 2. Enable PAE.
341 mov ecx, cr4
342 or ecx, X86_CR4_PAE
343 mov cr4, ecx
344
345 ; 3. Load long mode intermediate CR3.
346 FIXUP FIX_INTER_AMD64_CR3, 1
347 mov ecx, 0ffffffffh
348 mov cr3, ecx
349 DEBUG32_CHAR('3')
350
351%ifdef VBOX_WITH_CRASHDUMP_MAGIC
352 mov eax, cr2
353 mov dword [eax], 4
354%endif
355
356 ; 4. Enable long mode.
357 mov esi, edx
358 mov ecx, MSR_K6_EFER
359 rdmsr
360 FIXUP FIX_EFER_OR_MASK, 1
361 or eax, 0ffffffffh
362 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
363 wrmsr
364 mov edx, esi
365 DEBUG32_CHAR('4')
366
367%ifdef VBOX_WITH_CRASHDUMP_MAGIC
368 mov eax, cr2
369 mov dword [eax], 5
370%endif
371
372 ; 5. Enable paging.
373 or ebx, X86_CR0_PG
374 ; Disable ring 0 write protection too
375 and ebx, ~X86_CR0_WRITE_PROTECT
376 mov cr0, ebx
377 DEBUG32_CHAR('5')
378
379 ; Jump from compatibility mode to 64-bit mode.
380 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
381 jmp 0ffffh:0fffffffeh
382
383 ;
384 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
385BITS 64
386ALIGNCODE(16)
387NAME(IDEnter64Mode):
388 DEBUG64_CHAR('6')
389 jmp [NAME(pICEnterTarget) wrt rip]
390
391; 64-bit jump target
392NAME(pICEnterTarget):
393FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
394dq 0ffffffffffffffffh
395
396; 64-bit pCpum address.
397NAME(pCpumIC):
398FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
399dq 0ffffffffffffffffh
400
401%ifdef VBOX_WITH_CRASHDUMP_MAGIC
402NAME(pMarker):
403db 'Switch_marker'
404%endif
405
406 ;
407 ; When we arrive here we're in 64 bits mode in the intermediate context
408 ;
409ALIGNCODE(16)
410GLOBALNAME ICEnterTarget
411 ; Load CPUM pointer into rdx
412 mov rdx, [NAME(pCpumIC) wrt rip]
413 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
414
415 mov rax, cs
416 mov ds, rax
417 mov es, rax
418
419 ; Invalidate fs & gs
420 mov rax, 0
421 mov fs, rax
422 mov gs, rax
423
424%ifdef VBOX_WITH_CRASHDUMP_MAGIC
425 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
426%endif
427
428 ; Setup stack.
429 DEBUG64_CHAR('7')
430 mov rsp, 0
431 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
432 mov ss, ax
433 mov esp, [rdx + CPUMCPU.Hyper.esp]
434
435%ifdef VBOX_WITH_CRASHDUMP_MAGIC
436 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
437%endif
438
439
440 ; load the hypervisor function address
441 mov r9, [rdx + CPUMCPU.Hyper.eip]
442 DEBUG64_S_CHAR('8')
443
444 ; Check if we need to restore the guest FPU state
445 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
446 test esi, CPUM_SYNC_FPU_STATE
447 jz near gth_fpu_no
448
449%ifdef VBOX_WITH_CRASHDUMP_MAGIC
450 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
451%endif
452
453 mov rax, cr0
454 mov rcx, rax ; save old CR0
455 and rax, ~(X86_CR0_TS | X86_CR0_EM)
456 mov cr0, rax
457 fxrstor [rdx + CPUMCPU.Guest.fpu]
458 mov cr0, rcx ; and restore old CR0 again
459
460 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
461
462gth_fpu_no:
463 ; Check if we need to restore the guest debug state
464 test esi, CPUM_SYNC_DEBUG_STATE
465 jz near gth_debug_no
466
467%ifdef VBOX_WITH_CRASHDUMP_MAGIC
468 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
469%endif
470
471 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
472 mov dr0, rax
473 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
474 mov dr1, rax
475 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
476 mov dr2, rax
477 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
478 mov dr3, rax
479 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
480 mov dr6, rax ; not required for AMD-V
481
482 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
483
484gth_debug_no:
485
486%ifdef VBOX_WITH_CRASHDUMP_MAGIC
487 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
488%endif
489
490 ; parameter for all helper functions (pCtx)
491 DEBUG64_CHAR('9')
492 lea rsi, [rdx + CPUMCPU.Guest.fpu]
493 lea rax, [gth_return wrt rip]
494 push rax ; return address
495
496 cmp r9d, HM64ON32OP_VMXRCStartVM64
497 jz NAME(VMXRCStartVM64)
498 cmp r9d, HM64ON32OP_SVMRCVMRun64
499 jz NAME(SVMRCVMRun64)
500 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
501 jz NAME(HMRCSaveGuestFPU64)
502 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
503 jz NAME(HMRCSaveGuestDebug64)
504 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
505 jz NAME(HMRCTestSwitcher64)
506 mov eax, VERR_HM_INVALID_HM64ON32OP
507gth_return:
508 DEBUG64_CHAR('r')
509
510 ; Load CPUM pointer into rdx
511 mov rdx, [NAME(pCpumIC) wrt rip]
512 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
513
514%ifdef VBOX_WITH_CRASHDUMP_MAGIC
515 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
516%endif
517
518 ; Save the return code
519 mov dword [rdx + CPUMCPU.u32RetCode], eax
520
521 ; now let's switch back
522 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
523
524ENDPROC vmmR0ToRawModeAsm
525
526
527
528
529;
530;
531; HM code (used to be HMRCA.asm at one point).
532; HM code (used to be HMRCA.asm at one point).
533; HM code (used to be HMRCA.asm at one point).
534;
535;
536
537
538
539; Load the corresponding guest MSR (trashes rdx & rcx)
540%macro LOADGUESTMSR 2
541 mov rcx, %1
542 mov edx, dword [rsi + %2 + 4]
543 mov eax, dword [rsi + %2]
544 wrmsr
545%endmacro
546
547; Save a guest MSR (trashes rdx & rcx)
548; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
549%macro SAVEGUESTMSR 2
550 mov rcx, %1
551 rdmsr
552 mov dword [rsi + %2], eax
553 mov dword [rsi + %2 + 4], edx
554%endmacro
555
556;; @def MYPUSHSEGS
557; Macro saving all segment registers on the stack.
558; @param 1 full width register name
559%macro MYPUSHSEGS 1
560 mov %1, es
561 push %1
562 mov %1, ds
563 push %1
564%endmacro
565
566;; @def MYPOPSEGS
567; Macro restoring all segment registers on the stack
568; @param 1 full width register name
569%macro MYPOPSEGS 1
570 pop %1
571 mov ds, %1
572 pop %1
573 mov es, %1
574%endmacro
575
576
577;/**
578; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
579; *
580; * @returns VBox status code
581; * @param HCPhysCpuPage VMXON physical address [rsp+8]
582; * @param HCPhysVmcs VMCS physical address [rsp+16]
583; * @param pCache VMCS cache [rsp+24]
584; * @param pCtx Guest context (rsi)
585; */
586BEGINPROC VMXRCStartVM64
587 push rbp
588 mov rbp, rsp
589
590 ; Make sure VT-x instructions are allowed
591 mov rax, cr4
592 or rax, X86_CR4_VMXE
593 mov cr4, rax
594
595 ;/* Enter VMX Root Mode */
596 vmxon [rbp + 8 + 8]
597 jnc .vmxon_success
598 mov rax, VERR_VMX_INVALID_VMXON_PTR
599 jmp .vmstart64_vmxon_failed
600
601.vmxon_success:
602 jnz .vmxon_success2
603 mov rax, VERR_VMX_VMXON_FAILED
604 jmp .vmstart64_vmxon_failed
605
606.vmxon_success2:
607 ; Activate the VMCS pointer
608 vmptrld [rbp + 16 + 8]
609 jnc .vmptrld_success
610 mov rax, VERR_VMX_INVALID_VMCS_PTR
611 jmp .vmstart64_vmxoff_end
612
613.vmptrld_success:
614 jnz .vmptrld_success2
615 mov rax, VERR_VMX_VMPTRLD_FAILED
616 jmp .vmstart64_vmxoff_end
617
618.vmptrld_success2:
619
620 ; Save the VMCS pointer on the stack
621 push qword [rbp + 16 + 8];
622
623 ;/* Save segment registers */
624 MYPUSHSEGS rax
625
626%ifdef VMX_USE_CACHED_VMCS_ACCESSES
627 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
628 mov rbx, [rbp + 24 + 8] ; pCache
629
630 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
631 mov qword [rbx + VMCSCACHE.uPos], 2
632 %endif
633
634 %ifdef DEBUG
635 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
636 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
637 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
638 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
639 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
640 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
641 %endif
642
643 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
644 cmp ecx, 0
645 je .no_cached_writes
646 mov rdx, rcx
647 mov rcx, 0
648 jmp .cached_write
649
650ALIGN(16)
651.cached_write:
652 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
653 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
654 inc rcx
655 cmp rcx, rdx
656 jl .cached_write
657
658 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
659.no_cached_writes:
660
661 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
662 mov qword [rbx + VMCSCACHE.uPos], 3
663 %endif
664 ; Save the pCache pointer
665 push rbx
666%endif
667
668 ; Save the host state that's relevant in the temporary 64 bits mode
669 mov rdx, cr0
670 mov eax, VMX_VMCS_HOST_CR0
671 vmwrite rax, rdx
672
673 mov rdx, cr3
674 mov eax, VMX_VMCS_HOST_CR3
675 vmwrite rax, rdx
676
677 mov rdx, cr4
678 mov eax, VMX_VMCS_HOST_CR4
679 vmwrite rax, rdx
680
681 mov rdx, cs
682 mov eax, VMX_VMCS_HOST_FIELD_CS
683 vmwrite rax, rdx
684
685 mov rdx, ss
686 mov eax, VMX_VMCS_HOST_FIELD_SS
687 vmwrite rax, rdx
688
689 sub rsp, 8*2
690 sgdt [rsp]
691 mov eax, VMX_VMCS_HOST_GDTR_BASE
692 vmwrite rax, [rsp+2]
693 add rsp, 8*2
694
695%ifdef VBOX_WITH_CRASHDUMP_MAGIC
696 mov qword [rbx + VMCSCACHE.uPos], 4
697%endif
698
699 ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
700
701 ;/* First we have to save some final CPU context registers. */
702 lea rdx, [.vmlaunch64_done wrt rip]
703 mov rax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */
704 vmwrite rax, rdx
705 ;/* Note: assumes success... */
706
707 ;/* Manual save and restore:
708 ; * - General purpose registers except RIP, RSP
709 ; *
710 ; * Trashed:
711 ; * - CR2 (we don't care)
712 ; * - LDTR (reset to 0)
713 ; * - DRx (presumably not changed at all)
714 ; * - DR7 (reset to 0x400)
715 ; * - EFLAGS (reset to RT_BIT(1); not relevant)
716 ; *
717 ; */
718
719%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
720 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
721 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
722 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
723 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
724 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
725%else
726%ifdef VBOX_WITH_OLD_VTX_CODE
727 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
728 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
729%endif
730%endif
731
732%ifdef VBOX_WITH_CRASHDUMP_MAGIC
733 mov qword [rbx + VMCSCACHE.uPos], 5
734%endif
735
736 ; Save the pCtx pointer
737 push rsi
738
739 ; Restore CR2
740 mov rbx, qword [rsi + CPUMCTX.cr2]
741 mov rdx, cr2
742 cmp rdx, rbx
743 je .skipcr2write64
744 mov cr2, rbx
745
746.skipcr2write64:
747 mov eax, VMX_VMCS_HOST_RSP
748 vmwrite rax, rsp
749 ;/* Note: assumes success... */
750 ;/* Don't mess with ESP anymore!! */
751
752 ;/* Restore Guest's general purpose registers. */
753 mov rax, qword [rsi + CPUMCTX.eax]
754 mov rbx, qword [rsi + CPUMCTX.ebx]
755 mov rcx, qword [rsi + CPUMCTX.ecx]
756 mov rdx, qword [rsi + CPUMCTX.edx]
757 mov rbp, qword [rsi + CPUMCTX.ebp]
758 mov r8, qword [rsi + CPUMCTX.r8]
759 mov r9, qword [rsi + CPUMCTX.r9]
760 mov r10, qword [rsi + CPUMCTX.r10]
761 mov r11, qword [rsi + CPUMCTX.r11]
762 mov r12, qword [rsi + CPUMCTX.r12]
763 mov r13, qword [rsi + CPUMCTX.r13]
764 mov r14, qword [rsi + CPUMCTX.r14]
765 mov r15, qword [rsi + CPUMCTX.r15]
766
767 ;/* Restore rdi & rsi. */
768 mov rdi, qword [rsi + CPUMCTX.edi]
769 mov rsi, qword [rsi + CPUMCTX.esi]
770
771 vmlaunch
772 jmp .vmlaunch64_done; ;/* here if vmlaunch detected a failure. */
773
774ALIGNCODE(16)
775.vmlaunch64_done:
776 jc near .vmstart64_invalid_vmcs_ptr
777 jz near .vmstart64_start_failed
778
779 push rdi
780 mov rdi, [rsp + 8] ; pCtx
781
782 mov qword [rdi + CPUMCTX.eax], rax
783 mov qword [rdi + CPUMCTX.ebx], rbx
784 mov qword [rdi + CPUMCTX.ecx], rcx
785 mov qword [rdi + CPUMCTX.edx], rdx
786 mov qword [rdi + CPUMCTX.esi], rsi
787 mov qword [rdi + CPUMCTX.ebp], rbp
788 mov qword [rdi + CPUMCTX.r8], r8
789 mov qword [rdi + CPUMCTX.r9], r9
790 mov qword [rdi + CPUMCTX.r10], r10
791 mov qword [rdi + CPUMCTX.r11], r11
792 mov qword [rdi + CPUMCTX.r12], r12
793 mov qword [rdi + CPUMCTX.r13], r13
794 mov qword [rdi + CPUMCTX.r14], r14
795 mov qword [rdi + CPUMCTX.r15], r15
796%ifndef VBOX_WITH_OLD_VTX_CODE
797 mov rax, cr2
798 mov qword [rdi + CPUMCTX.cr2], rax
799%endif
800
801 pop rax ; the guest edi we pushed above
802 mov qword [rdi + CPUMCTX.edi], rax
803
804 pop rsi ; pCtx (needed in rsi by the macros below)
805
806%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
807 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
808 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
809 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
810 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
811%else
812%ifdef VBOX_WITH_OLD_VTX_CODE
813 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
814 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
815%endif
816%endif
817
818%ifdef VMX_USE_CACHED_VMCS_ACCESSES
819 pop rdi ; saved pCache
820
821 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
822 mov dword [rdi + VMCSCACHE.uPos], 7
823 %endif
824 %ifdef DEBUG
825 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
826 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
827 mov rax, cr8
828 mov [rdi + VMCSCACHE.TestOut.cr8], rax
829 %endif
830
831 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
832 cmp ecx, 0 ; can't happen
833 je .no_cached_reads
834 jmp .cached_read
835
836ALIGN(16)
837.cached_read:
838 dec rcx
839 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
840 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
841 cmp rcx, 0
842 jnz .cached_read
843.no_cached_reads:
844
845 %ifdef VBOX_WITH_OLD_VTX_CODE
846 ; Save CR2 for EPT
847 mov rax, cr2
848 mov [rdi + VMCSCACHE.cr2], rax
849 %endif
850 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
851 mov dword [rdi + VMCSCACHE.uPos], 8
852 %endif
853%endif
854
855 ; Restore segment registers
856 MYPOPSEGS rax
857
858 mov eax, VINF_SUCCESS
859
860%ifdef VBOX_WITH_CRASHDUMP_MAGIC
861 mov dword [rdi + VMCSCACHE.uPos], 9
862%endif
863.vmstart64_end:
864
865%ifdef VMX_USE_CACHED_VMCS_ACCESSES
866 %ifdef DEBUG
867 mov rdx, [rsp] ; HCPhysVmcs
868 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
869 %endif
870%endif
871
872 ; Write back the data and disable the VMCS
873 vmclear qword [rsp] ;Pushed pVMCS
874 add rsp, 8
875
876.vmstart64_vmxoff_end:
877 ; Disable VMX root mode
878 vmxoff
879.vmstart64_vmxon_failed:
880%ifdef VMX_USE_CACHED_VMCS_ACCESSES
881 %ifdef DEBUG
882 cmp eax, VINF_SUCCESS
883 jne .skip_flags_save
884
885 pushf
886 pop rdx
887 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
888 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
889 mov dword [rdi + VMCSCACHE.uPos], 12
890 %endif
891.skip_flags_save:
892 %endif
893%endif
894 pop rbp
895 ret
896
897
898.vmstart64_invalid_vmcs_ptr:
899 pop rsi ; pCtx (needed in rsi by the macros below)
900
901%ifdef VMX_USE_CACHED_VMCS_ACCESSES
902 pop rdi ; pCache
903 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
904 mov dword [rdi + VMCSCACHE.uPos], 10
905 %endif
906
907 %ifdef DEBUG
908 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
909 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
910 %endif
911%endif
912
913 ; Restore segment registers
914 MYPOPSEGS rax
915
916 ; Restore all general purpose host registers.
917 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
918 jmp .vmstart64_end
919
920.vmstart64_start_failed:
921 pop rsi ; pCtx (needed in rsi by the macros below)
922
923%ifdef VMX_USE_CACHED_VMCS_ACCESSES
924 pop rdi ; pCache
925
926 %ifdef DEBUG
927 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
928 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
929 %endif
930 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
931 mov dword [rdi + VMCSCACHE.uPos], 11
932 %endif
933%endif
934
935 ; Restore segment registers
936 MYPOPSEGS rax
937
938 ; Restore all general purpose host registers.
939 mov eax, VERR_VMX_UNABLE_TO_START_VM
940 jmp .vmstart64_end
941ENDPROC VMXRCStartVM64
942
943
944;/**
945; * Prepares for and executes VMRUN (64 bits guests)
946; *
947; * @returns VBox status code
948; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
949; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
950; * @param pCtx Guest context (rsi)
951; */
952BEGINPROC SVMRCVMRun64
953 push rbp
954 mov rbp, rsp
955 pushf
956
957 ;/* Manual save and restore:
958 ; * - General purpose registers except RIP, RSP, RAX
959 ; *
960 ; * Trashed:
961 ; * - CR2 (we don't care)
962 ; * - LDTR (reset to 0)
963 ; * - DRx (presumably not changed at all)
964 ; * - DR7 (reset to 0x400)
965 ; */
966
967 ;/* Save the Guest CPU context pointer. */
968 push rsi ; push for saving the state at the end
969
970 ; save host fs, gs, sysenter msr etc
971 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
972 push rax ; save for the vmload after vmrun
973 vmsave
974
975 ; setup eax for VMLOAD
976 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
977
978 ;/* Restore Guest's general purpose registers. */
979 ;/* RAX is loaded from the VMCB by VMRUN */
980 mov rbx, qword [rsi + CPUMCTX.ebx]
981 mov rcx, qword [rsi + CPUMCTX.ecx]
982 mov rdx, qword [rsi + CPUMCTX.edx]
983 mov rdi, qword [rsi + CPUMCTX.edi]
984 mov rbp, qword [rsi + CPUMCTX.ebp]
985 mov r8, qword [rsi + CPUMCTX.r8]
986 mov r9, qword [rsi + CPUMCTX.r9]
987 mov r10, qword [rsi + CPUMCTX.r10]
988 mov r11, qword [rsi + CPUMCTX.r11]
989 mov r12, qword [rsi + CPUMCTX.r12]
990 mov r13, qword [rsi + CPUMCTX.r13]
991 mov r14, qword [rsi + CPUMCTX.r14]
992 mov r15, qword [rsi + CPUMCTX.r15]
993 mov rsi, qword [rsi + CPUMCTX.esi]
994
995 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
996 clgi
997 sti
998
999 ; load guest fs, gs, sysenter msr etc
1000 vmload
1001 ; run the VM
1002 vmrun
1003
1004 ;/* RAX is in the VMCB already; we can use it here. */
1005
1006 ; save guest fs, gs, sysenter msr etc
1007 vmsave
1008
1009 ; load host fs, gs, sysenter msr etc
1010 pop rax ; pushed above
1011 vmload
1012
1013 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
1014 cli
1015 stgi
1016
1017 pop rax ; pCtx
1018
1019 mov qword [rax + CPUMCTX.ebx], rbx
1020 mov qword [rax + CPUMCTX.ecx], rcx
1021 mov qword [rax + CPUMCTX.edx], rdx
1022 mov qword [rax + CPUMCTX.esi], rsi
1023 mov qword [rax + CPUMCTX.edi], rdi
1024 mov qword [rax + CPUMCTX.ebp], rbp
1025 mov qword [rax + CPUMCTX.r8], r8
1026 mov qword [rax + CPUMCTX.r9], r9
1027 mov qword [rax + CPUMCTX.r10], r10
1028 mov qword [rax + CPUMCTX.r11], r11
1029 mov qword [rax + CPUMCTX.r12], r12
1030 mov qword [rax + CPUMCTX.r13], r13
1031 mov qword [rax + CPUMCTX.r14], r14
1032 mov qword [rax + CPUMCTX.r15], r15
1033
1034 mov eax, VINF_SUCCESS
1035
1036 popf
1037 pop rbp
1038 ret
1039ENDPROC SVMRCVMRun64
1040
1041;/**
1042; * Saves the guest FPU context
1043; *
1044; * @returns VBox status code
1045; * @param pCtx Guest context [rsi]
1046; */
1047BEGINPROC HMRCSaveGuestFPU64
1048 mov rax, cr0
1049 mov rcx, rax ; save old CR0
1050 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1051 mov cr0, rax
1052
1053 fxsave [rsi + CPUMCTX.fpu]
1054
1055 mov cr0, rcx ; and restore old CR0 again
1056
1057 mov eax, VINF_SUCCESS
1058 ret
1059ENDPROC HMRCSaveGuestFPU64
1060
1061;/**
1062; * Saves the guest debug context (DR0-3, DR6)
1063; *
1064; * @returns VBox status code
1065; * @param pCtx Guest context [rsi]
1066; */
1067BEGINPROC HMRCSaveGuestDebug64
1068 mov rax, dr0
1069 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1070 mov rax, dr1
1071 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1072 mov rax, dr2
1073 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1074 mov rax, dr3
1075 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1076 mov rax, dr6
1077 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1078 mov eax, VINF_SUCCESS
1079 ret
1080ENDPROC HMRCSaveGuestDebug64
1081
1082;/**
1083; * Dummy callback handler
1084; *
1085; * @returns VBox status code
1086; * @param param1 Parameter 1 [rsp+8]
1087; * @param param2 Parameter 2 [rsp+12]
1088; * @param param3 Parameter 3 [rsp+16]
1089; * @param param4 Parameter 4 [rsp+20]
1090; * @param param5 Parameter 5 [rsp+24]
1091; * @param pCtx Guest context [rsi]
1092; */
1093BEGINPROC HMRCTestSwitcher64
1094 mov eax, [rsp+8]
1095 ret
1096ENDPROC HMRCTestSwitcher64
1097
1098
1099
1100
1101;
1102;
1103; Back to switcher code.
1104; Back to switcher code.
1105; Back to switcher code.
1106;
1107;
1108
1109
1110
1111;;
1112; Trampoline for doing a call when starting the hyper visor execution.
1113;
1114; Push any arguments to the routine.
1115; Push the argument frame size (cArg * 4).
1116; Push the call target (_cdecl convention).
1117; Push the address of this routine.
1118;
1119;
1120BITS 64
1121ALIGNCODE(16)
1122BEGINPROC vmmRCCallTrampoline
1123%ifdef DEBUG_STUFF
1124 COM64_S_CHAR 'c'
1125 COM64_S_CHAR 't'
1126 COM64_S_CHAR '!'
1127%endif
1128 int3
1129ENDPROC vmmRCCallTrampoline
1130
1131
1132;;
1133; The C interface.
1134;
1135BITS 64
1136ALIGNCODE(16)
1137BEGINPROC vmmRCToHost
1138%ifdef DEBUG_STUFF
1139 push rsi
1140 COM_NEWLINE
1141 COM_CHAR 'b'
1142 COM_CHAR 'a'
1143 COM_CHAR 'c'
1144 COM_CHAR 'k'
1145 COM_CHAR '!'
1146 COM_NEWLINE
1147 pop rsi
1148%endif
1149 int3
1150ENDPROC vmmRCToHost
1151
1152;;
1153; vmmRCToHostAsm
1154;
1155; This is an alternative entry point which we'll be using
1156; when the we have saved the guest state already or we haven't
1157; been messing with the guest at all.
1158;
1159; @param eax Return code.
1160; @uses eax, edx, ecx (or it may use them in the future)
1161;
1162BITS 64
1163ALIGNCODE(16)
1164BEGINPROC vmmRCToHostAsm
1165NAME(vmmRCToHostAsmNoReturn):
1166 ;; We're still in the intermediate memory context!
1167
1168 ;;
1169 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1170 ;;
1171 jmp far [NAME(fpIDEnterTarget) wrt rip]
1172
1173; 16:32 Pointer to IDEnterTarget.
1174NAME(fpIDEnterTarget):
1175 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1176dd 0
1177 FIXUP FIX_HYPER_CS, 0
1178dd 0
1179
1180 ; We're now on identity mapped pages!
1181ALIGNCODE(16)
1182GLOBALNAME IDExitTarget
1183BITS 32
1184 DEBUG32_CHAR('1')
1185
1186 ; 1. Deactivate long mode by turning off paging.
1187 mov ebx, cr0
1188 and ebx, ~X86_CR0_PG
1189 mov cr0, ebx
1190 DEBUG32_CHAR('2')
1191
1192 ; 2. Load intermediate page table.
1193 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1194 mov edx, 0ffffffffh
1195 mov cr3, edx
1196 DEBUG32_CHAR('3')
1197
1198 ; 3. Disable long mode.
1199 mov ecx, MSR_K6_EFER
1200 rdmsr
1201 DEBUG32_CHAR('5')
1202 and eax, ~(MSR_K6_EFER_LME)
1203 wrmsr
1204 DEBUG32_CHAR('6')
1205
1206%ifndef NEED_PAE_ON_HOST
1207 ; 3b. Disable PAE.
1208 mov eax, cr4
1209 and eax, ~X86_CR4_PAE
1210 mov cr4, eax
1211 DEBUG32_CHAR('7')
1212%endif
1213
1214 ; 4. Enable paging.
1215 or ebx, X86_CR0_PG
1216 mov cr0, ebx
1217 jmp short just_a_jump
1218just_a_jump:
1219 DEBUG32_CHAR('8')
1220
1221 ;;
1222 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1223 ;;
1224 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1225 jmp near NAME(ICExitTarget)
1226
1227 ;;
1228 ;; When we arrive at this label we're at the
1229 ;; intermediate mapping of the switching code.
1230 ;;
1231BITS 32
1232ALIGNCODE(16)
1233GLOBALNAME ICExitTarget
1234 DEBUG32_CHAR('8')
1235
1236 ; load the hypervisor data selector into ds & es
1237 FIXUP FIX_HYPER_DS, 1
1238 mov eax, 0ffffh
1239 mov ds, eax
1240 mov es, eax
1241
1242 FIXUP FIX_GC_CPUM_OFF, 1, 0
1243 mov edx, 0ffffffffh
1244 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1245 mov esi, [edx + CPUMCPU.Host.cr3]
1246 mov cr3, esi
1247
1248 ;; now we're in host memory context, let's restore regs
1249 FIXUP FIX_HC_CPUM_OFF, 1, 0
1250 mov edx, 0ffffffffh
1251 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1252
1253 ; restore the host EFER
1254 mov ebx, edx
1255 mov ecx, MSR_K6_EFER
1256 mov eax, [ebx + CPUMCPU.Host.efer]
1257 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1258 wrmsr
1259 mov edx, ebx
1260
1261 ; activate host gdt and idt
1262 lgdt [edx + CPUMCPU.Host.gdtr]
1263 DEBUG32_CHAR('0')
1264 lidt [edx + CPUMCPU.Host.idtr]
1265 DEBUG32_CHAR('1')
1266
1267 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1268 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1269 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1270 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1271 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1272 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1273 ltr word [edx + CPUMCPU.Host.tr]
1274
1275 ; activate ldt
1276 DEBUG32_CHAR('2')
1277 lldt [edx + CPUMCPU.Host.ldtr]
1278
1279 ; Restore segment registers
1280 mov eax, [edx + CPUMCPU.Host.ds]
1281 mov ds, eax
1282 mov eax, [edx + CPUMCPU.Host.es]
1283 mov es, eax
1284 mov eax, [edx + CPUMCPU.Host.fs]
1285 mov fs, eax
1286 mov eax, [edx + CPUMCPU.Host.gs]
1287 mov gs, eax
1288 ; restore stack
1289 lss esp, [edx + CPUMCPU.Host.esp]
1290
1291 ; Control registers.
1292 mov ecx, [edx + CPUMCPU.Host.cr4]
1293 mov cr4, ecx
1294 mov ecx, [edx + CPUMCPU.Host.cr0]
1295 mov cr0, ecx
1296 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1297 ;mov cr2, ecx
1298
1299 ; restore general registers.
1300 mov edi, [edx + CPUMCPU.Host.edi]
1301 mov esi, [edx + CPUMCPU.Host.esi]
1302 mov ebx, [edx + CPUMCPU.Host.ebx]
1303 mov ebp, [edx + CPUMCPU.Host.ebp]
1304
1305 ; store the return code in eax
1306 mov eax, [edx + CPUMCPU.u32RetCode]
1307 retf
1308ENDPROC vmmRCToHostAsm
1309
1310
1311GLOBALNAME End
1312;
1313; The description string (in the text section).
1314;
1315NAME(Description):
1316 db SWITCHER_DESCRIPTION
1317 db 0
1318
1319extern NAME(Relocate)
1320
1321;
1322; End the fixup records.
1323;
1324BEGINDATA
1325 db FIX_THE_END ; final entry.
1326GLOBALNAME FixupsEnd
1327
1328;;
1329; The switcher definition structure.
1330ALIGNDATA(16)
1331GLOBALNAME Def
1332 istruc VMMSWITCHERDEF
1333 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1334 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1335 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1336 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1337 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1338 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1339 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1340 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1341 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1342 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1343 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1344 ; disasm help
1345 at VMMSWITCHERDEF.offHCCode0, dd 0
1346 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1347 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1348 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1349 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1350 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1351 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1352 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1353 at VMMSWITCHERDEF.offGCCode, dd 0
1354 at VMMSWITCHERDEF.cbGCCode, dd 0
1355
1356 iend
1357
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette