VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 47652

最後變更 在這個檔案從47652是 47652,由 vboxsync 提交於 12 年 前

VMM: Removed all VBOX_WITH_OLD_[VTX|AMDV]_CODE bits.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 35.9 KB
 
1; $Id: LegacyandAMD64.mac 47652 2013-08-09 14:56:17Z vboxsync $
2;; @file
3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not
6; normal raw-mode. All the code involved is contained in this
7; file.
8;
9
10;
11; Copyright (C) 2006-2013 Oracle Corporation
12;
13; This file is part of VirtualBox Open Source Edition (OSE), as
14; available from http://www.alldomusa.eu.org. This file is free software;
15; you can redistribute it and/or modify it under the terms of the GNU
16; General Public License (GPL) as published by the Free Software
17; Foundation, in version 2 as it comes in the "COPYING" file of the
18; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
19; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
20;
21
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26;; @note These values are from the HM64ON32OP enum in hm.h.
27%define HM64ON32OP_VMXRCStartVM64 1
28%define HM64ON32OP_SVMRCVMRun64 2
29%define HM64ON32OP_HMRCSaveGuestFPU64 3
30%define HM64ON32OP_HMRCSaveGuestDebug64 4
31%define HM64ON32OP_HMRCTestSwitcher64 5
32
33;; Stubs for making OS/2 compile (though, not work).
34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
35 %macro vmwrite 2,
36 int3
37 %endmacro
38 %define vmlaunch int3
39 %define vmresume int3
40 %define vmsave int3
41 %define vmload int3
42 %define vmrun int3
43 %define clgi int3
44 %define stgi int3
45 %macro invlpga 2,
46 int3
47 %endmacro
48%endif
49
50;; Debug options
51;%define DEBUG_STUFF 1
52;%define STRICT_IF 1
53
54
55;*******************************************************************************
56;* Header Files *
57;*******************************************************************************
58%include "VBox/asmdefs.mac"
59%include "iprt/x86.mac"
60%include "VBox/err.mac"
61%include "VBox/apic.mac"
62
63%include "VBox/vmm/cpum.mac"
64%include "VBox/vmm/stam.mac"
65%include "VBox/vmm/vm.mac"
66%include "VBox/vmm/hm_vmx.mac"
67%include "CPUMInternal.mac"
68%include "HMInternal.mac"
69%include "VMMSwitcher.mac"
70
71
72;
73; Start the fixup records
74; We collect the fixups in the .data section as we go along
75; It is therefore VITAL that no-one is using the .data section
76; for anything else between 'Start' and 'End'.
77;
78BEGINDATA
79GLOBALNAME Fixups
80
81
82
83BEGINCODE
84GLOBALNAME Start
85
86BITS 32
87
88;;
89; The C interface.
90; @param [esp + 04h] Param 1 - VM handle
91; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
92; structure for the calling EMT.
93;
94BEGINPROC vmmR0ToRawMode
95%ifdef DEBUG_STUFF
96 COM32_S_NEWLINE
97 COM32_S_CHAR '^'
98%endif
99
100%ifdef VBOX_WITH_STATISTICS
101 ;
102 ; Switcher stats.
103 ;
104 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
105 mov edx, 0ffffffffh
106 STAM_PROFILE_ADV_START edx
107%endif
108
109 push ebp
110 mov ebp, [esp + 12] ; CPUMCPU offset
111
112 ; turn off interrupts
113 pushf
114 cli
115
116 ;
117 ; Call worker.
118 ;
119 FIXUP FIX_HC_CPUM_OFF, 1, 0
120 mov edx, 0ffffffffh
121 push cs ; allow for far return and restore cs correctly.
122 call NAME(vmmR0ToRawModeAsm)
123
124%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
125 CPUM_FROM_CPUMCPU(edx)
126 ; Restore blocked Local APIC NMI vectors
127 mov ecx, [edx + CPUM.fApicDisVectors]
128 mov edx, [edx + CPUM.pvApicBase]
129 shr ecx, 1
130 jnc gth_nolint0
131 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
132gth_nolint0:
133 shr ecx, 1
134 jnc gth_nolint1
135 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
136gth_nolint1:
137 shr ecx, 1
138 jnc gth_nopc
139 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
140gth_nopc:
141 shr ecx, 1
142 jnc gth_notherm
143 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
144gth_notherm:
145%endif
146
147 ; restore original flags
148 popf
149 pop ebp
150
151%ifdef VBOX_WITH_STATISTICS
152 ;
153 ; Switcher stats.
154 ;
155 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
156 mov edx, 0ffffffffh
157 STAM_PROFILE_ADV_STOP edx
158%endif
159
160 ret
161
162ENDPROC vmmR0ToRawMode
163
164; *****************************************************************************
165; vmmR0ToRawModeAsm
166;
167; Phase one of the switch from host to guest context (host MMU context)
168;
169; INPUT:
170; - edx virtual address of CPUM structure (valid in host context)
171; - ebp offset of the CPUMCPU structure relative to CPUM.
172;
173; USES/DESTROYS:
174; - eax, ecx, edx, esi
175;
176; ASSUMPTION:
177; - current CS and DS selectors are wide open
178;
179; *****************************************************************************
180ALIGNCODE(16)
181BEGINPROC vmmR0ToRawModeAsm
182 ;;
183 ;; Save CPU host context
184 ;; Skip eax, edx and ecx as these are not preserved over calls.
185 ;;
186 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
187%ifdef VBOX_WITH_CRASHDUMP_MAGIC
188 ; phys address of scratch page
189 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
190 mov cr2, eax
191
192 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
193%endif
194
195 ; general registers.
196 mov [edx + CPUMCPU.Host.ebx], ebx
197 mov [edx + CPUMCPU.Host.edi], edi
198 mov [edx + CPUMCPU.Host.esi], esi
199 mov [edx + CPUMCPU.Host.esp], esp
200 mov [edx + CPUMCPU.Host.ebp], ebp
201 ; selectors.
202 mov [edx + CPUMCPU.Host.ds], ds
203 mov [edx + CPUMCPU.Host.es], es
204 mov [edx + CPUMCPU.Host.fs], fs
205 mov [edx + CPUMCPU.Host.gs], gs
206 mov [edx + CPUMCPU.Host.ss], ss
207 ; special registers.
208 DEBUG32_S_CHAR('s')
209 DEBUG32_S_CHAR(';')
210 sldt [edx + CPUMCPU.Host.ldtr]
211 sidt [edx + CPUMCPU.Host.idtr]
212 sgdt [edx + CPUMCPU.Host.gdtr]
213 str [edx + CPUMCPU.Host.tr]
214
215%ifdef VBOX_WITH_CRASHDUMP_MAGIC
216 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
217%endif
218
219%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
220 DEBUG32_S_CHAR('f')
221 DEBUG32_S_CHAR(';')
222 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
223 mov ebx, [edx + CPUM.pvApicBase]
224 or ebx, ebx
225 jz htg_noapic
226 mov eax, [ebx + APIC_REG_LVT_LINT0]
227 mov ecx, eax
228 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
229 cmp ecx, APIC_REG_LVT_MODE_NMI
230 jne htg_nolint0
231 or edi, 0x01
232 or eax, APIC_REG_LVT_MASKED
233 mov [ebx + APIC_REG_LVT_LINT0], eax
234 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
235htg_nolint0:
236 mov eax, [ebx + APIC_REG_LVT_LINT1]
237 mov ecx, eax
238 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
239 cmp ecx, APIC_REG_LVT_MODE_NMI
240 jne htg_nolint1
241 or edi, 0x02
242 or eax, APIC_REG_LVT_MASKED
243 mov [ebx + APIC_REG_LVT_LINT1], eax
244 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
245htg_nolint1:
246 mov eax, [ebx + APIC_REG_LVT_PC]
247 mov ecx, eax
248 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
249 cmp ecx, APIC_REG_LVT_MODE_NMI
250 jne htg_nopc
251 or edi, 0x04
252 or eax, APIC_REG_LVT_MASKED
253 mov [ebx + APIC_REG_LVT_PC], eax
254 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
255htg_nopc:
256 mov eax, [ebx + APIC_REG_VERSION]
257 shr eax, 16
258 cmp al, 5
259 jb htg_notherm
260 mov eax, [ebx + APIC_REG_LVT_THMR]
261 mov ecx, eax
262 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
263 cmp ecx, APIC_REG_LVT_MODE_NMI
264 jne htg_notherm
265 or edi, 0x08
266 or eax, APIC_REG_LVT_MASKED
267 mov [ebx + APIC_REG_LVT_THMR], eax
268 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
269htg_notherm:
270 mov [edx + CPUM.fApicDisVectors], edi
271htg_noapic:
272 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
273%endif
274
275 ; control registers.
276 mov eax, cr0
277 mov [edx + CPUMCPU.Host.cr0], eax
278 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
279 mov eax, cr3
280 mov [edx + CPUMCPU.Host.cr3], eax
281 mov eax, cr4
282 mov [edx + CPUMCPU.Host.cr4], eax
283 DEBUG32_S_CHAR('c')
284 DEBUG32_S_CHAR(';')
285
286 ; save the host EFER msr
287 mov ebx, edx
288 mov ecx, MSR_K6_EFER
289 rdmsr
290 mov [ebx + CPUMCPU.Host.efer], eax
291 mov [ebx + CPUMCPU.Host.efer + 4], edx
292 mov edx, ebx
293 DEBUG32_S_CHAR('e')
294 DEBUG32_S_CHAR(';')
295
296%ifdef VBOX_WITH_CRASHDUMP_MAGIC
297 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
298%endif
299
300 ; Load new gdt so we can do a far jump after going into 64 bits mode
301 lgdt [edx + CPUMCPU.Hyper.gdtr]
302
303 DEBUG32_S_CHAR('g')
304 DEBUG32_S_CHAR('!')
305%ifdef VBOX_WITH_CRASHDUMP_MAGIC
306 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
307%endif
308
309 ;;
310 ;; Load Intermediate memory context.
311 ;;
312 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
313 mov eax, 0ffffffffh
314 mov cr3, eax
315 DEBUG32_CHAR('?')
316
317 ;;
318 ;; Jump to identity mapped location
319 ;;
320 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
321 jmp near NAME(IDEnterTarget)
322
323
324 ; We're now on identity mapped pages!
325ALIGNCODE(16)
326GLOBALNAME IDEnterTarget
327 DEBUG32_CHAR('1')
328
329 ; 1. Disable paging.
330 mov ebx, cr0
331 and ebx, ~X86_CR0_PG
332 mov cr0, ebx
333 DEBUG32_CHAR('2')
334
335%ifdef VBOX_WITH_CRASHDUMP_MAGIC
336 mov eax, cr2
337 mov dword [eax], 3
338%endif
339
340 ; 2. Enable PAE.
341 mov ecx, cr4
342 or ecx, X86_CR4_PAE
343 mov cr4, ecx
344
345 ; 3. Load long mode intermediate CR3.
346 FIXUP FIX_INTER_AMD64_CR3, 1
347 mov ecx, 0ffffffffh
348 mov cr3, ecx
349 DEBUG32_CHAR('3')
350
351%ifdef VBOX_WITH_CRASHDUMP_MAGIC
352 mov eax, cr2
353 mov dword [eax], 4
354%endif
355
356 ; 4. Enable long mode.
357 mov esi, edx
358 mov ecx, MSR_K6_EFER
359 rdmsr
360 FIXUP FIX_EFER_OR_MASK, 1
361 or eax, 0ffffffffh
362 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
363 wrmsr
364 mov edx, esi
365 DEBUG32_CHAR('4')
366
367%ifdef VBOX_WITH_CRASHDUMP_MAGIC
368 mov eax, cr2
369 mov dword [eax], 5
370%endif
371
372 ; 5. Enable paging.
373 or ebx, X86_CR0_PG
374 ; Disable ring 0 write protection too
375 and ebx, ~X86_CR0_WRITE_PROTECT
376 mov cr0, ebx
377 DEBUG32_CHAR('5')
378
379 ; Jump from compatibility mode to 64-bit mode.
380 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
381 jmp 0ffffh:0fffffffeh
382
383 ;
384 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
385BITS 64
386ALIGNCODE(16)
387NAME(IDEnter64Mode):
388 DEBUG64_CHAR('6')
389 jmp [NAME(pICEnterTarget) wrt rip]
390
391; 64-bit jump target
392NAME(pICEnterTarget):
393FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
394dq 0ffffffffffffffffh
395
396; 64-bit pCpum address.
397NAME(pCpumIC):
398FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
399dq 0ffffffffffffffffh
400
401%ifdef VBOX_WITH_CRASHDUMP_MAGIC
402NAME(pMarker):
403db 'Switch_marker'
404%endif
405
406 ;
407 ; When we arrive here we're in 64 bits mode in the intermediate context
408 ;
409ALIGNCODE(16)
410GLOBALNAME ICEnterTarget
411 ; Load CPUM pointer into rdx
412 mov rdx, [NAME(pCpumIC) wrt rip]
413 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
414
415 mov rax, cs
416 mov ds, rax
417 mov es, rax
418
419 ; Invalidate fs & gs
420 mov rax, 0
421 mov fs, rax
422 mov gs, rax
423
424%ifdef VBOX_WITH_CRASHDUMP_MAGIC
425 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
426%endif
427
428 ; Setup stack.
429 DEBUG64_CHAR('7')
430 mov rsp, 0
431 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
432 mov ss, ax
433 mov esp, [rdx + CPUMCPU.Hyper.esp]
434
435%ifdef VBOX_WITH_CRASHDUMP_MAGIC
436 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
437%endif
438
439
440 ; load the hypervisor function address
441 mov r9, [rdx + CPUMCPU.Hyper.eip]
442 DEBUG64_S_CHAR('8')
443
444 ; Check if we need to restore the guest FPU state
445 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
446 test esi, CPUM_SYNC_FPU_STATE
447 jz near gth_fpu_no
448
449%ifdef VBOX_WITH_CRASHDUMP_MAGIC
450 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
451%endif
452
453 mov rax, cr0
454 mov rcx, rax ; save old CR0
455 and rax, ~(X86_CR0_TS | X86_CR0_EM)
456 mov cr0, rax
457 fxrstor [rdx + CPUMCPU.Guest.fpu]
458 mov cr0, rcx ; and restore old CR0 again
459
460 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
461
462gth_fpu_no:
463 ; Check if we need to restore the guest debug state
464 test esi, CPUM_SYNC_DEBUG_STATE
465 jz near gth_debug_no
466
467%ifdef VBOX_WITH_CRASHDUMP_MAGIC
468 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
469%endif
470
471 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
472 mov dr0, rax
473 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
474 mov dr1, rax
475 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
476 mov dr2, rax
477 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
478 mov dr3, rax
479 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
480 mov dr6, rax ; not required for AMD-V
481
482 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
483
484gth_debug_no:
485
486%ifdef VBOX_WITH_CRASHDUMP_MAGIC
487 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
488%endif
489
490 ; parameter for all helper functions (pCtx)
491 DEBUG64_CHAR('9')
492 lea rsi, [rdx + CPUMCPU.Guest.fpu]
493 lea rax, [gth_return wrt rip]
494 push rax ; return address
495
496 cmp r9d, HM64ON32OP_VMXRCStartVM64
497 jz NAME(VMXRCStartVM64)
498 cmp r9d, HM64ON32OP_SVMRCVMRun64
499 jz NAME(SVMRCVMRun64)
500 cmp r9d, HM64ON32OP_HMRCSaveGuestFPU64
501 jz NAME(HMRCSaveGuestFPU64)
502 cmp r9d, HM64ON32OP_HMRCSaveGuestDebug64
503 jz NAME(HMRCSaveGuestDebug64)
504 cmp r9d, HM64ON32OP_HMRCTestSwitcher64
505 jz NAME(HMRCTestSwitcher64)
506 mov eax, VERR_HM_INVALID_HM64ON32OP
507gth_return:
508 DEBUG64_CHAR('r')
509
510 ; Load CPUM pointer into rdx
511 mov rdx, [NAME(pCpumIC) wrt rip]
512 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
513
514%ifdef VBOX_WITH_CRASHDUMP_MAGIC
515 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
516%endif
517
518 ; Save the return code
519 mov dword [rdx + CPUMCPU.u32RetCode], eax
520
521 ; now let's switch back
522 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
523
524ENDPROC vmmR0ToRawModeAsm
525
526
527
528
529;
530;
531; HM code (used to be HMRCA.asm at one point).
532; HM code (used to be HMRCA.asm at one point).
533; HM code (used to be HMRCA.asm at one point).
534;
535;
536
537
538
539; Load the corresponding guest MSR (trashes rdx & rcx)
540%macro LOADGUESTMSR 2
541 mov rcx, %1
542 mov edx, dword [rsi + %2 + 4]
543 mov eax, dword [rsi + %2]
544 wrmsr
545%endmacro
546
547; Save a guest MSR (trashes rdx & rcx)
548; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
549%macro SAVEGUESTMSR 2
550 mov rcx, %1
551 rdmsr
552 mov dword [rsi + %2], eax
553 mov dword [rsi + %2 + 4], edx
554%endmacro
555
556;; @def MYPUSHSEGS
557; Macro saving all segment registers on the stack.
558; @param 1 full width register name
559%macro MYPUSHSEGS 1
560 mov %1, es
561 push %1
562 mov %1, ds
563 push %1
564%endmacro
565
566;; @def MYPOPSEGS
567; Macro restoring all segment registers on the stack
568; @param 1 full width register name
569%macro MYPOPSEGS 1
570 pop %1
571 mov ds, %1
572 pop %1
573 mov es, %1
574%endmacro
575
576
577;/**
578; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
579; *
580; * @returns VBox status code
581; * @param HCPhysCpuPage VMXON physical address [rsp+8]
582; * @param HCPhysVmcs VMCS physical address [rsp+16]
583; * @param pCache VMCS cache [rsp+24]
584; * @param pCtx Guest context (rsi)
585; */
586BEGINPROC VMXRCStartVM64
587 push rbp
588 mov rbp, rsp
589
590 ; Make sure VT-x instructions are allowed.
591 mov rax, cr4
592 or rax, X86_CR4_VMXE
593 mov cr4, rax
594
595 ; Enter VMX Root Mode.
596 vmxon [rbp + 8 + 8]
597 jnc .vmxon_success
598 mov rax, VERR_VMX_INVALID_VMXON_PTR
599 jmp .vmstart64_vmxon_failed
600
601.vmxon_success:
602 jnz .vmxon_success2
603 mov rax, VERR_VMX_VMXON_FAILED
604 jmp .vmstart64_vmxon_failed
605
606.vmxon_success2:
607 ; Activate the VMCS pointer
608 vmptrld [rbp + 16 + 8]
609 jnc .vmptrld_success
610 mov rax, VERR_VMX_INVALID_VMCS_PTR
611 jmp .vmstart64_vmxoff_end
612
613.vmptrld_success:
614 jnz .vmptrld_success2
615 mov rax, VERR_VMX_VMPTRLD_FAILED
616 jmp .vmstart64_vmxoff_end
617
618.vmptrld_success2:
619
620 ; Save the VMCS pointer on the stack
621 push qword [rbp + 16 + 8];
622
623 ; Save segment registers.
624 MYPUSHSEGS rax
625
626%ifdef VMX_USE_CACHED_VMCS_ACCESSES
627 ; Flush the VMCS write cache first (before any other vmreads/vmwrites!).
628 mov rbx, [rbp + 24 + 8] ; pCache
629
630 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
631 mov qword [rbx + VMCSCACHE.uPos], 2
632 %endif
633
634 %ifdef DEBUG
635 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage
636 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
637 mov rax, [rbp + 16 + 8] ; HCPhysVmcs
638 mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
639 mov [rbx + VMCSCACHE.TestIn.pCache], rbx
640 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
641 %endif
642
643 mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
644 cmp ecx, 0
645 je .no_cached_writes
646 mov rdx, rcx
647 mov rcx, 0
648 jmp .cached_write
649
650ALIGN(16)
651.cached_write:
652 mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
653 vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
654 inc rcx
655 cmp rcx, rdx
656 jl .cached_write
657
658 mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
659.no_cached_writes:
660
661 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
662 mov qword [rbx + VMCSCACHE.uPos], 3
663 %endif
664 ; Save the pCache pointer.
665 push rbx
666%endif
667
668 ; Save the host state that's relevant in the temporary 64-bit mode.
669 mov rdx, cr0
670 mov eax, VMX_VMCS_HOST_CR0
671 vmwrite rax, rdx
672
673 mov rdx, cr3
674 mov eax, VMX_VMCS_HOST_CR3
675 vmwrite rax, rdx
676
677 mov rdx, cr4
678 mov eax, VMX_VMCS_HOST_CR4
679 vmwrite rax, rdx
680
681 mov rdx, cs
682 mov eax, VMX_VMCS_HOST_FIELD_CS
683 vmwrite rax, rdx
684
685 mov rdx, ss
686 mov eax, VMX_VMCS_HOST_FIELD_SS
687 vmwrite rax, rdx
688
689 sub rsp, 8*2
690 sgdt [rsp]
691 mov eax, VMX_VMCS_HOST_GDTR_BASE
692 vmwrite rax, [rsp+2]
693 add rsp, 8*2
694
695%ifdef VBOX_WITH_CRASHDUMP_MAGIC
696 mov qword [rbx + VMCSCACHE.uPos], 4
697%endif
698
699 ; Hopefully we can ignore TR (we restore it anyway on the way back to 32-bit mode).
700
701 ; First we have to save some final CPU context registers.
702 lea rdx, [.vmlaunch64_done wrt rip]
703 mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
704 vmwrite rax, rdx
705 ; Note: assumes success!
706
707 ; Manual save and restore:
708 ; - General purpose registers except RIP, RSP
709 ;
710 ; Trashed:
711 ; - CR2 (we don't care)
712 ; - LDTR (reset to 0)
713 ; - DRx (presumably not changed at all)
714 ; - DR7 (reset to 0x400)
715 ; - EFLAGS (reset to RT_BIT(1); not relevant)
716
717%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
718 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs.
719 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
720 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
721 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
722 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
723%endif
724
725%ifdef VBOX_WITH_CRASHDUMP_MAGIC
726 mov qword [rbx + VMCSCACHE.uPos], 5
727%endif
728
729 ; Save the pCtx pointer
730 push rsi
731
732 ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
733 mov rbx, qword [rsi + CPUMCTX.cr2]
734 mov rdx, cr2
735 cmp rdx, rbx
736 je .skipcr2write64
737 mov cr2, rbx
738
739.skipcr2write64:
740 mov eax, VMX_VMCS_HOST_RSP
741 vmwrite rax, rsp
742 ; Note: assumes success!
743 ; Don't mess with ESP anymore!!!
744
745 ; Save Guest's general purpose registers.
746 mov rax, qword [rsi + CPUMCTX.eax]
747 mov rbx, qword [rsi + CPUMCTX.ebx]
748 mov rcx, qword [rsi + CPUMCTX.ecx]
749 mov rdx, qword [rsi + CPUMCTX.edx]
750 mov rbp, qword [rsi + CPUMCTX.ebp]
751 mov r8, qword [rsi + CPUMCTX.r8]
752 mov r9, qword [rsi + CPUMCTX.r9]
753 mov r10, qword [rsi + CPUMCTX.r10]
754 mov r11, qword [rsi + CPUMCTX.r11]
755 mov r12, qword [rsi + CPUMCTX.r12]
756 mov r13, qword [rsi + CPUMCTX.r13]
757 mov r14, qword [rsi + CPUMCTX.r14]
758 mov r15, qword [rsi + CPUMCTX.r15]
759
760 ; Save rdi & rsi.
761 mov rdi, qword [rsi + CPUMCTX.edi]
762 mov rsi, qword [rsi + CPUMCTX.esi]
763
764 vmlaunch
765 jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
766
767ALIGNCODE(16)
768.vmlaunch64_done:
769 jc near .vmstart64_invalid_vmcs_ptr
770 jz near .vmstart64_start_failed
771
772 push rdi
773 mov rdi, [rsp + 8] ; pCtx
774
775 mov qword [rdi + CPUMCTX.eax], rax
776 mov qword [rdi + CPUMCTX.ebx], rbx
777 mov qword [rdi + CPUMCTX.ecx], rcx
778 mov qword [rdi + CPUMCTX.edx], rdx
779 mov qword [rdi + CPUMCTX.esi], rsi
780 mov qword [rdi + CPUMCTX.ebp], rbp
781 mov qword [rdi + CPUMCTX.r8], r8
782 mov qword [rdi + CPUMCTX.r9], r9
783 mov qword [rdi + CPUMCTX.r10], r10
784 mov qword [rdi + CPUMCTX.r11], r11
785 mov qword [rdi + CPUMCTX.r12], r12
786 mov qword [rdi + CPUMCTX.r13], r13
787 mov qword [rdi + CPUMCTX.r14], r14
788 mov qword [rdi + CPUMCTX.r15], r15
789 mov rax, cr2
790 mov qword [rdi + CPUMCTX.cr2], rax
791
792 pop rax ; The guest edi we pushed above
793 mov qword [rdi + CPUMCTX.edi], rax
794
795 pop rsi ; pCtx (needed in rsi by the macros below)
796
797%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
798 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
799 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
800 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
801 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
802%endif
803
804%ifdef VMX_USE_CACHED_VMCS_ACCESSES
805 pop rdi ; Saved pCache
806
807 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
808 mov dword [rdi + VMCSCACHE.uPos], 7
809 %endif
810 %ifdef DEBUG
811 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
812 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
813 mov rax, cr8
814 mov [rdi + VMCSCACHE.TestOut.cr8], rax
815 %endif
816
817 mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
818 cmp ecx, 0 ; Can't happen
819 je .no_cached_reads
820 jmp .cached_read
821
822ALIGN(16)
823.cached_read:
824 dec rcx
825 mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
826 vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
827 cmp rcx, 0
828 jnz .cached_read
829.no_cached_reads:
830 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
831 mov dword [rdi + VMCSCACHE.uPos], 8
832 %endif
833%endif
834
835 ; Restore segment registers.
836 MYPOPSEGS rax
837
838 mov eax, VINF_SUCCESS
839
840%ifdef VBOX_WITH_CRASHDUMP_MAGIC
841 mov dword [rdi + VMCSCACHE.uPos], 9
842%endif
843.vmstart64_end:
844
845%ifdef VMX_USE_CACHED_VMCS_ACCESSES
846 %ifdef DEBUG
847 mov rdx, [rsp] ; HCPhysVmcs
848 mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
849 %endif
850%endif
851
852 ; Write back the data and disable the VMCS.
853 vmclear qword [rsp] ; Pushed pVMCS
854 add rsp, 8
855
856.vmstart64_vmxoff_end:
857 ; Disable VMX root mode.
858 vmxoff
859.vmstart64_vmxon_failed:
860%ifdef VMX_USE_CACHED_VMCS_ACCESSES
861 %ifdef DEBUG
862 cmp eax, VINF_SUCCESS
863 jne .skip_flags_save
864
865 pushf
866 pop rdx
867 mov [rdi + VMCSCACHE.TestOut.eflags], rdx
868 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
869 mov dword [rdi + VMCSCACHE.uPos], 12
870 %endif
871.skip_flags_save:
872 %endif
873%endif
874 pop rbp
875 ret
876
877
878.vmstart64_invalid_vmcs_ptr:
879 pop rsi ; pCtx (needed in rsi by the macros below)
880
881%ifdef VMX_USE_CACHED_VMCS_ACCESSES
882 pop rdi ; pCache
883 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
884 mov dword [rdi + VMCSCACHE.uPos], 10
885 %endif
886
887 %ifdef DEBUG
888 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
889 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
890 %endif
891%endif
892
893 ; Restore segment registers.
894 MYPOPSEGS rax
895
896 ; Restore all general purpose host registers.
897 mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
898 jmp .vmstart64_end
899
900.vmstart64_start_failed:
901 pop rsi ; pCtx (needed in rsi by the macros below)
902
903%ifdef VMX_USE_CACHED_VMCS_ACCESSES
904 pop rdi ; pCache
905
906 %ifdef DEBUG
907 mov [rdi + VMCSCACHE.TestOut.pCache], rdi
908 mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
909 %endif
910 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
911 mov dword [rdi + VMCSCACHE.uPos], 11
912 %endif
913%endif
914
915 ; Restore segment registers.
916 MYPOPSEGS rax
917
918 ; Restore all general purpose host registers.
919 mov eax, VERR_VMX_UNABLE_TO_START_VM
920 jmp .vmstart64_end
921ENDPROC VMXRCStartVM64
922
923
924;/**
925; * Prepares for and executes VMRUN (64 bits guests)
926; *
927; * @returns VBox status code
928; * @param HCPhysVMCB Physical address of host VMCB (rsp+8)
929; * @param HCPhysVMCB Physical address of guest VMCB (rsp+16)
930; * @param pCtx Guest context (rsi)
931; */
932BEGINPROC SVMRCVMRun64
933 push rbp
934 mov rbp, rsp
935 pushf
936
937 ; Manual save and restore:
938 ; - General purpose registers except RIP, RSP, RAX
939 ;
940 ; Trashed:
941 ; - CR2 (we don't care)
942 ; - LDTR (reset to 0)
943 ; - DRx (presumably not changed at all)
944 ; - DR7 (reset to 0x400)
945
946 ; Save the Guest CPU context pointer.
947 push rsi ; Push for saving the state at the end
948
949 ; Save host fs, gs, sysenter msr etc
950 mov rax, [rbp + 8 + 8] ; pVMCBHostPhys (64 bits physical address)
951 push rax ; Save for the vmload after vmrun
952 vmsave
953
954 ; Setup eax for VMLOAD
955 mov rax, [rbp + 8 + 8 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address)
956
957 ; Restore Guest's general purpose registers.
958 ; rax is loaded from the VMCB by VMRUN.
959 mov rbx, qword [rsi + CPUMCTX.ebx]
960 mov rcx, qword [rsi + CPUMCTX.ecx]
961 mov rdx, qword [rsi + CPUMCTX.edx]
962 mov rdi, qword [rsi + CPUMCTX.edi]
963 mov rbp, qword [rsi + CPUMCTX.ebp]
964 mov r8, qword [rsi + CPUMCTX.r8]
965 mov r9, qword [rsi + CPUMCTX.r9]
966 mov r10, qword [rsi + CPUMCTX.r10]
967 mov r11, qword [rsi + CPUMCTX.r11]
968 mov r12, qword [rsi + CPUMCTX.r12]
969 mov r13, qword [rsi + CPUMCTX.r13]
970 mov r14, qword [rsi + CPUMCTX.r14]
971 mov r15, qword [rsi + CPUMCTX.r15]
972 mov rsi, qword [rsi + CPUMCTX.esi]
973
974 ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
975 clgi
976 sti
977
978 ; Load guest fs, gs, sysenter msr etc
979 vmload
980 ; Run the VM
981 vmrun
982
983 ; rax is in the VMCB already; we can use it here.
984
985 ; Save guest fs, gs, sysenter msr etc.
986 vmsave
987
988 ; Load host fs, gs, sysenter msr etc.
989 pop rax ; Pushed above
990 vmload
991
992 ; Set the global interrupt flag again, but execute cli to make sure IF=0.
993 cli
994 stgi
995
996 pop rax ; pCtx
997
998 mov qword [rax + CPUMCTX.ebx], rbx
999 mov qword [rax + CPUMCTX.ecx], rcx
1000 mov qword [rax + CPUMCTX.edx], rdx
1001 mov qword [rax + CPUMCTX.esi], rsi
1002 mov qword [rax + CPUMCTX.edi], rdi
1003 mov qword [rax + CPUMCTX.ebp], rbp
1004 mov qword [rax + CPUMCTX.r8], r8
1005 mov qword [rax + CPUMCTX.r9], r9
1006 mov qword [rax + CPUMCTX.r10], r10
1007 mov qword [rax + CPUMCTX.r11], r11
1008 mov qword [rax + CPUMCTX.r12], r12
1009 mov qword [rax + CPUMCTX.r13], r13
1010 mov qword [rax + CPUMCTX.r14], r14
1011 mov qword [rax + CPUMCTX.r15], r15
1012
1013 mov eax, VINF_SUCCESS
1014
1015 popf
1016 pop rbp
1017 ret
1018ENDPROC SVMRCVMRun64
1019
1020;/**
1021; * Saves the guest FPU context
1022; *
1023; * @returns VBox status code
1024; * @param pCtx Guest context [rsi]
1025; */
1026BEGINPROC HMRCSaveGuestFPU64
1027 mov rax, cr0
1028 mov rcx, rax ; save old CR0
1029 and rax, ~(X86_CR0_TS | X86_CR0_EM)
1030 mov cr0, rax
1031
1032 fxsave [rsi + CPUMCTX.fpu]
1033
1034 mov cr0, rcx ; and restore old CR0 again
1035
1036 mov eax, VINF_SUCCESS
1037 ret
1038ENDPROC HMRCSaveGuestFPU64
1039
1040;/**
1041; * Saves the guest debug context (DR0-3, DR6)
1042; *
1043; * @returns VBox status code
1044; * @param pCtx Guest context [rsi]
1045; */
1046BEGINPROC HMRCSaveGuestDebug64
1047 mov rax, dr0
1048 mov qword [rsi + CPUMCTX.dr + 0*8], rax
1049 mov rax, dr1
1050 mov qword [rsi + CPUMCTX.dr + 1*8], rax
1051 mov rax, dr2
1052 mov qword [rsi + CPUMCTX.dr + 2*8], rax
1053 mov rax, dr3
1054 mov qword [rsi + CPUMCTX.dr + 3*8], rax
1055 mov rax, dr6
1056 mov qword [rsi + CPUMCTX.dr + 6*8], rax
1057 mov eax, VINF_SUCCESS
1058 ret
1059ENDPROC HMRCSaveGuestDebug64
1060
1061;/**
1062; * Dummy callback handler
1063; *
1064; * @returns VBox status code
1065; * @param param1 Parameter 1 [rsp+8]
1066; * @param param2 Parameter 2 [rsp+12]
1067; * @param param3 Parameter 3 [rsp+16]
1068; * @param param4 Parameter 4 [rsp+20]
1069; * @param param5 Parameter 5 [rsp+24]
1070; * @param pCtx Guest context [rsi]
1071; */
1072BEGINPROC HMRCTestSwitcher64
1073 mov eax, [rsp+8]
1074 ret
1075ENDPROC HMRCTestSwitcher64
1076
1077
1078
1079
1080;
1081;
1082; Back to switcher code.
1083; Back to switcher code.
1084; Back to switcher code.
1085;
1086;
1087
1088
1089
1090;;
1091; Trampoline for doing a call when starting the hyper visor execution.
1092;
1093; Push any arguments to the routine.
1094; Push the argument frame size (cArg * 4).
1095; Push the call target (_cdecl convention).
1096; Push the address of this routine.
1097;
1098;
1099BITS 64
1100ALIGNCODE(16)
1101BEGINPROC vmmRCCallTrampoline
1102%ifdef DEBUG_STUFF
1103 COM64_S_CHAR 'c'
1104 COM64_S_CHAR 't'
1105 COM64_S_CHAR '!'
1106%endif
1107 int3
1108ENDPROC vmmRCCallTrampoline
1109
1110
1111;;
1112; The C interface.
1113;
1114BITS 64
1115ALIGNCODE(16)
1116BEGINPROC vmmRCToHost
1117%ifdef DEBUG_STUFF
1118 push rsi
1119 COM_NEWLINE
1120 COM_CHAR 'b'
1121 COM_CHAR 'a'
1122 COM_CHAR 'c'
1123 COM_CHAR 'k'
1124 COM_CHAR '!'
1125 COM_NEWLINE
1126 pop rsi
1127%endif
1128 int3
1129ENDPROC vmmRCToHost
1130
1131;;
1132; vmmRCToHostAsm
1133;
1134; This is an alternative entry point which we'll be using
1135; when the we have saved the guest state already or we haven't
1136; been messing with the guest at all.
1137;
1138; @param eax Return code.
1139; @uses eax, edx, ecx (or it may use them in the future)
1140;
1141BITS 64
1142ALIGNCODE(16)
1143BEGINPROC vmmRCToHostAsm
1144NAME(vmmRCToHostAsmNoReturn):
1145 ;; We're still in the intermediate memory context!
1146
1147 ;;
1148 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
1149 ;;
1150 jmp far [NAME(fpIDEnterTarget) wrt rip]
1151
1152; 16:32 Pointer to IDEnterTarget.
1153NAME(fpIDEnterTarget):
1154 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
1155dd 0
1156 FIXUP FIX_HYPER_CS, 0
1157dd 0
1158
1159 ; We're now on identity mapped pages!
1160ALIGNCODE(16)
1161GLOBALNAME IDExitTarget
1162BITS 32
1163 DEBUG32_CHAR('1')
1164
1165 ; 1. Deactivate long mode by turning off paging.
1166 mov ebx, cr0
1167 and ebx, ~X86_CR0_PG
1168 mov cr0, ebx
1169 DEBUG32_CHAR('2')
1170
1171 ; 2. Load intermediate page table.
1172 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
1173 mov edx, 0ffffffffh
1174 mov cr3, edx
1175 DEBUG32_CHAR('3')
1176
1177 ; 3. Disable long mode.
1178 mov ecx, MSR_K6_EFER
1179 rdmsr
1180 DEBUG32_CHAR('5')
1181 and eax, ~(MSR_K6_EFER_LME)
1182 wrmsr
1183 DEBUG32_CHAR('6')
1184
1185%ifndef NEED_PAE_ON_HOST
1186 ; 3b. Disable PAE.
1187 mov eax, cr4
1188 and eax, ~X86_CR4_PAE
1189 mov cr4, eax
1190 DEBUG32_CHAR('7')
1191%endif
1192
1193 ; 4. Enable paging.
1194 or ebx, X86_CR0_PG
1195 mov cr0, ebx
1196 jmp short just_a_jump
1197just_a_jump:
1198 DEBUG32_CHAR('8')
1199
1200 ;;
1201 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
1202 ;;
1203 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
1204 jmp near NAME(ICExitTarget)
1205
1206 ;;
1207 ;; When we arrive at this label we're at the
1208 ;; intermediate mapping of the switching code.
1209 ;;
1210BITS 32
1211ALIGNCODE(16)
1212GLOBALNAME ICExitTarget
1213 DEBUG32_CHAR('8')
1214
1215 ; load the hypervisor data selector into ds & es
1216 FIXUP FIX_HYPER_DS, 1
1217 mov eax, 0ffffh
1218 mov ds, eax
1219 mov es, eax
1220
1221 FIXUP FIX_GC_CPUM_OFF, 1, 0
1222 mov edx, 0ffffffffh
1223 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1224 mov esi, [edx + CPUMCPU.Host.cr3]
1225 mov cr3, esi
1226
1227 ;; now we're in host memory context, let's restore regs
1228 FIXUP FIX_HC_CPUM_OFF, 1, 0
1229 mov edx, 0ffffffffh
1230 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
1231
1232 ; restore the host EFER
1233 mov ebx, edx
1234 mov ecx, MSR_K6_EFER
1235 mov eax, [ebx + CPUMCPU.Host.efer]
1236 mov edx, [ebx + CPUMCPU.Host.efer + 4]
1237 wrmsr
1238 mov edx, ebx
1239
1240 ; activate host gdt and idt
1241 lgdt [edx + CPUMCPU.Host.gdtr]
1242 DEBUG32_CHAR('0')
1243 lidt [edx + CPUMCPU.Host.idtr]
1244 DEBUG32_CHAR('1')
1245
1246 ; Restore TSS selector; must mark it as not busy before using ltr (!)
1247 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
1248 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
1249 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
1250 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
1251 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
1252 ltr word [edx + CPUMCPU.Host.tr]
1253
1254 ; activate ldt
1255 DEBUG32_CHAR('2')
1256 lldt [edx + CPUMCPU.Host.ldtr]
1257
1258 ; Restore segment registers
1259 mov eax, [edx + CPUMCPU.Host.ds]
1260 mov ds, eax
1261 mov eax, [edx + CPUMCPU.Host.es]
1262 mov es, eax
1263 mov eax, [edx + CPUMCPU.Host.fs]
1264 mov fs, eax
1265 mov eax, [edx + CPUMCPU.Host.gs]
1266 mov gs, eax
1267 ; restore stack
1268 lss esp, [edx + CPUMCPU.Host.esp]
1269
1270 ; Control registers.
1271 mov ecx, [edx + CPUMCPU.Host.cr4]
1272 mov cr4, ecx
1273 mov ecx, [edx + CPUMCPU.Host.cr0]
1274 mov cr0, ecx
1275 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
1276 ;mov cr2, ecx
1277
1278 ; restore general registers.
1279 mov edi, [edx + CPUMCPU.Host.edi]
1280 mov esi, [edx + CPUMCPU.Host.esi]
1281 mov ebx, [edx + CPUMCPU.Host.ebx]
1282 mov ebp, [edx + CPUMCPU.Host.ebp]
1283
1284 ; store the return code in eax
1285 mov eax, [edx + CPUMCPU.u32RetCode]
1286 retf
1287ENDPROC vmmRCToHostAsm
1288
1289
1290GLOBALNAME End
1291;
1292; The description string (in the text section).
1293;
1294NAME(Description):
1295 db SWITCHER_DESCRIPTION
1296 db 0
1297
1298extern NAME(Relocate)
1299
1300;
1301; End the fixup records.
1302;
1303BEGINDATA
1304 db FIX_THE_END ; final entry.
1305GLOBALNAME FixupsEnd
1306
1307;;
1308; The switcher definition structure.
1309ALIGNDATA(16)
1310GLOBALNAME Def
1311 istruc VMMSWITCHERDEF
1312 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1313 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1314 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1315 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1316 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1317 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1318 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1319 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1320 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1321 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1322 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1323 ; disasm help
1324 at VMMSWITCHERDEF.offHCCode0, dd 0
1325 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1326 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
1327 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
1328 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1329 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
1330 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1331 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
1332 at VMMSWITCHERDEF.offGCCode, dd 0
1333 at VMMSWITCHERDEF.cbGCCode, dd 0
1334
1335 iend
1336
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette