VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 45750

最後變更 在這個檔案從45750是 45745,由 vboxsync 提交於 12 年 前

Doc updates regarding guest 64-bit on 32-bit host.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 19.5 KB
 
1; $Id: LegacyandAMD64.mac 45745 2013-04-25 20:36:55Z vboxsync $
2;; @file
3; VMM - World Switchers, 32Bit to AMD64 intermediate context.
4;
5; This is used for running 64-bit guest on 32-bit hosts, not normal raw-mode.
6;
7
8;
9; Copyright (C) 2006-2012 Oracle Corporation
10;
11; This file is part of VirtualBox Open Source Edition (OSE), as
12; available from http://www.alldomusa.eu.org. This file is free software;
13; you can redistribute it and/or modify it under the terms of the GNU
14; General Public License (GPL) as published by the Free Software
15; Foundation, in version 2 as it comes in the "COPYING" file of the
16; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18;
19
20;%define DEBUG_STUFF 1
21;%define STRICT_IF 1
22
23;*******************************************************************************
24;* Defined Constants And Macros *
25;*******************************************************************************
26
27
28;*******************************************************************************
29;* Header Files *
30;*******************************************************************************
31%include "VBox/asmdefs.mac"
32%include "VBox/apic.mac"
33%include "iprt/x86.mac"
34%include "VBox/vmm/cpum.mac"
35%include "VBox/vmm/stam.mac"
36%include "VBox/vmm/vm.mac"
37%include "CPUMInternal.mac"
38%include "VMMSwitcher.mac"
39
40
41;
42; Start the fixup records
43; We collect the fixups in the .data section as we go along
44; It is therefore VITAL that no-one is using the .data section
45; for anything else between 'Start' and 'End'.
46;
47BEGINDATA
48GLOBALNAME Fixups
49
50
51
52BEGINCODE
53GLOBALNAME Start
54
55BITS 32
56
57;;
58; The C interface.
59; @param [esp + 04h] Param 1 - VM handle
60; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
61; structure for the calling EMT.
62;
63BEGINPROC vmmR0ToRawMode
64%ifdef DEBUG_STUFF
65 COM32_S_NEWLINE
66 COM32_S_CHAR '^'
67%endif
68
69%ifdef VBOX_WITH_STATISTICS
70 ;
71 ; Switcher stats.
72 ;
73 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
74 mov edx, 0ffffffffh
75 STAM_PROFILE_ADV_START edx
76%endif
77
78 push ebp
79 mov ebp, [esp + 12] ; CPUMCPU offset
80
81 ; turn off interrupts
82 pushf
83 cli
84
85 ;
86 ; Call worker.
87 ;
88 FIXUP FIX_HC_CPUM_OFF, 1, 0
89 mov edx, 0ffffffffh
90 push cs ; allow for far return and restore cs correctly.
91 call NAME(vmmR0ToRawModeAsm)
92
93%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
94 CPUM_FROM_CPUMCPU(edx)
95 ; Restore blocked Local APIC NMI vectors
96 mov ecx, [edx + CPUM.fApicDisVectors]
97 mov edx, [edx + CPUM.pvApicBase]
98 shr ecx, 1
99 jnc gth_nolint0
100 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
101gth_nolint0:
102 shr ecx, 1
103 jnc gth_nolint1
104 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
105gth_nolint1:
106 shr ecx, 1
107 jnc gth_nopc
108 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
109gth_nopc:
110 shr ecx, 1
111 jnc gth_notherm
112 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
113gth_notherm:
114%endif
115
116 ; restore original flags
117 popf
118 pop ebp
119
120%ifdef VBOX_WITH_STATISTICS
121 ;
122 ; Switcher stats.
123 ;
124 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
125 mov edx, 0ffffffffh
126 STAM_PROFILE_ADV_STOP edx
127%endif
128
129 ret
130
131ENDPROC vmmR0ToRawMode
132
133; *****************************************************************************
134; vmmR0ToRawModeAsm
135;
136; Phase one of the switch from host to guest context (host MMU context)
137;
138; INPUT:
139; - edx virtual address of CPUM structure (valid in host context)
140; - ebp offset of the CPUMCPU structure relative to CPUM.
141;
142; USES/DESTROYS:
143; - eax, ecx, edx, esi
144;
145; ASSUMPTION:
146; - current CS and DS selectors are wide open
147;
148; *****************************************************************************
149ALIGNCODE(16)
150BEGINPROC vmmR0ToRawModeAsm
151 ;;
152 ;; Save CPU host context
153 ;; Skip eax, edx and ecx as these are not preserved over calls.
154 ;;
155 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
156%ifdef VBOX_WITH_CRASHDUMP_MAGIC
157 ; phys address of scratch page
158 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
159 mov cr2, eax
160
161 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
162%endif
163
164 ; general registers.
165 mov [edx + CPUMCPU.Host.ebx], ebx
166 mov [edx + CPUMCPU.Host.edi], edi
167 mov [edx + CPUMCPU.Host.esi], esi
168 mov [edx + CPUMCPU.Host.esp], esp
169 mov [edx + CPUMCPU.Host.ebp], ebp
170 ; selectors.
171 mov [edx + CPUMCPU.Host.ds], ds
172 mov [edx + CPUMCPU.Host.es], es
173 mov [edx + CPUMCPU.Host.fs], fs
174 mov [edx + CPUMCPU.Host.gs], gs
175 mov [edx + CPUMCPU.Host.ss], ss
176 ; special registers.
177 sldt [edx + CPUMCPU.Host.ldtr]
178 sidt [edx + CPUMCPU.Host.idtr]
179 sgdt [edx + CPUMCPU.Host.gdtr]
180 str [edx + CPUMCPU.Host.tr]
181
182%ifdef VBOX_WITH_CRASHDUMP_MAGIC
183 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
184%endif
185
186%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
187 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
188 mov ebx, [edx + CPUM.pvApicBase]
189 or ebx, ebx
190 jz htg_noapic
191 mov eax, [ebx + APIC_REG_LVT_LINT0]
192 mov ecx, eax
193 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
194 cmp ecx, APIC_REG_LVT_MODE_NMI
195 jne htg_nolint0
196 or edi, 0x01
197 or eax, APIC_REG_LVT_MASKED
198 mov [ebx + APIC_REG_LVT_LINT0], eax
199 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
200htg_nolint0:
201 mov eax, [ebx + APIC_REG_LVT_LINT1]
202 mov ecx, eax
203 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
204 cmp ecx, APIC_REG_LVT_MODE_NMI
205 jne htg_nolint1
206 or edi, 0x02
207 or eax, APIC_REG_LVT_MASKED
208 mov [ebx + APIC_REG_LVT_LINT1], eax
209 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
210htg_nolint1:
211 mov eax, [ebx + APIC_REG_LVT_PC]
212 mov ecx, eax
213 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
214 cmp ecx, APIC_REG_LVT_MODE_NMI
215 jne htg_nopc
216 or edi, 0x04
217 or eax, APIC_REG_LVT_MASKED
218 mov [ebx + APIC_REG_LVT_PC], eax
219 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
220htg_nopc:
221 mov eax, [ebx + APIC_REG_VERSION]
222 shr eax, 16
223 cmp al, 5
224 jb htg_notherm
225 mov eax, [ebx + APIC_REG_LVT_THMR]
226 mov ecx, eax
227 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
228 cmp ecx, APIC_REG_LVT_MODE_NMI
229 jne htg_notherm
230 or edi, 0x08
231 or eax, APIC_REG_LVT_MASKED
232 mov [ebx + APIC_REG_LVT_THMR], eax
233 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
234htg_notherm:
235 mov [edx + CPUM.fApicDisVectors], edi
236htg_noapic:
237 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
238%endif
239
240 ; control registers.
241 mov eax, cr0
242 mov [edx + CPUMCPU.Host.cr0], eax
243 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
244 mov eax, cr3
245 mov [edx + CPUMCPU.Host.cr3], eax
246 mov eax, cr4
247 mov [edx + CPUMCPU.Host.cr4], eax
248
249 ; save the host EFER msr
250 mov ebx, edx
251 mov ecx, MSR_K6_EFER
252 rdmsr
253 mov [ebx + CPUMCPU.Host.efer], eax
254 mov [ebx + CPUMCPU.Host.efer + 4], edx
255 mov edx, ebx
256
257%ifdef VBOX_WITH_CRASHDUMP_MAGIC
258 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
259%endif
260
261 ; Load new gdt so we can do a far jump after going into 64 bits mode
262 lgdt [edx + CPUMCPU.Hyper.gdtr]
263
264%ifdef VBOX_WITH_CRASHDUMP_MAGIC
265 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
266%endif
267
268 ;;
269 ;; Load Intermediate memory context.
270 ;;
271 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
272 mov eax, 0ffffffffh
273 mov cr3, eax
274 DEBUG_CHAR('?')
275
276 ;;
277 ;; Jump to identity mapped location
278 ;;
279 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
280 jmp near NAME(IDEnterTarget)
281
282
283 ; We're now on identity mapped pages!
284ALIGNCODE(16)
285GLOBALNAME IDEnterTarget
286 DEBUG_CHAR('2')
287
288 ; 1. Disable paging.
289 mov ebx, cr0
290 and ebx, ~X86_CR0_PG
291 mov cr0, ebx
292 DEBUG_CHAR('2')
293
294%ifdef VBOX_WITH_CRASHDUMP_MAGIC
295 mov eax, cr2
296 mov dword [eax], 3
297%endif
298
299 ; 2. Enable PAE.
300 mov ecx, cr4
301 or ecx, X86_CR4_PAE
302 mov cr4, ecx
303
304 ; 3. Load long mode intermediate CR3.
305 FIXUP FIX_INTER_AMD64_CR3, 1
306 mov ecx, 0ffffffffh
307 mov cr3, ecx
308 DEBUG_CHAR('3')
309
310%ifdef VBOX_WITH_CRASHDUMP_MAGIC
311 mov eax, cr2
312 mov dword [eax], 4
313%endif
314
315 ; 4. Enable long mode.
316 mov esi, edx
317 mov ecx, MSR_K6_EFER
318 rdmsr
319 FIXUP FIX_EFER_OR_MASK, 1
320 or eax, 0ffffffffh
321 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
322 wrmsr
323 mov edx, esi
324 DEBUG_CHAR('4')
325
326%ifdef VBOX_WITH_CRASHDUMP_MAGIC
327 mov eax, cr2
328 mov dword [eax], 5
329%endif
330
331 ; 5. Enable paging.
332 or ebx, X86_CR0_PG
333 ; Disable ring 0 write protection too
334 and ebx, ~X86_CR0_WRITE_PROTECT
335 mov cr0, ebx
336 DEBUG_CHAR('5')
337
338 ; Jump from compatibility mode to 64-bit mode.
339 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
340 jmp 0ffffh:0fffffffeh
341
342 ;
343 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
344BITS 64
345ALIGNCODE(16)
346NAME(IDEnter64Mode):
347 DEBUG_CHAR('6')
348 jmp [NAME(pICEnterTarget) wrt rip]
349
350; 64-bit jump target
351NAME(pICEnterTarget):
352FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
353dq 0ffffffffffffffffh
354
355; 64-bit pCpum address.
356NAME(pCpumIC):
357FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
358dq 0ffffffffffffffffh
359
360%ifdef VBOX_WITH_CRASHDUMP_MAGIC
361NAME(pMarker):
362db 'Switch_marker'
363%endif
364
365 ;
366 ; When we arrive here we're in 64 bits mode in the intermediate context
367 ;
368ALIGNCODE(16)
369GLOBALNAME ICEnterTarget
370 ; Load CPUM pointer into rdx
371 mov rdx, [NAME(pCpumIC) wrt rip]
372 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
373
374 mov rax, cs
375 mov ds, rax
376 mov es, rax
377
378 ; Invalidate fs & gs
379 mov rax, 0
380 mov fs, rax
381 mov gs, rax
382
383%ifdef VBOX_WITH_CRASHDUMP_MAGIC
384 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
385%endif
386
387 ; Setup stack.
388 DEBUG_CHAR('7')
389 mov rsp, 0
390 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
391 mov ss, ax
392 mov esp, [rdx + CPUMCPU.Hyper.esp]
393
394%ifdef VBOX_WITH_CRASHDUMP_MAGIC
395 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
396%endif
397
398
399 ; load the hypervisor function address
400 mov r9, [rdx + CPUMCPU.Hyper.eip]
401
402 ; Check if we need to restore the guest FPU state
403 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
404 test esi, CPUM_SYNC_FPU_STATE
405 jz near gth_fpu_no
406
407%ifdef VBOX_WITH_CRASHDUMP_MAGIC
408 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
409%endif
410
411 mov rax, cr0
412 mov rcx, rax ; save old CR0
413 and rax, ~(X86_CR0_TS | X86_CR0_EM)
414 mov cr0, rax
415 fxrstor [rdx + CPUMCPU.Guest.fpu]
416 mov cr0, rcx ; and restore old CR0 again
417
418 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
419
420gth_fpu_no:
421 ; Check if we need to restore the guest debug state
422 test esi, CPUM_SYNC_DEBUG_STATE
423 jz near gth_debug_no
424
425%ifdef VBOX_WITH_CRASHDUMP_MAGIC
426 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
427%endif
428
429 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
430 mov dr0, rax
431 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
432 mov dr1, rax
433 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
434 mov dr2, rax
435 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
436 mov dr3, rax
437 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
438 mov dr6, rax ; not required for AMD-V
439
440 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
441
442gth_debug_no:
443
444%ifdef VBOX_WITH_CRASHDUMP_MAGIC
445 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
446%endif
447
448 ; parameter for all helper functions (pCtx)
449 lea rsi, [rdx + CPUMCPU.Guest.fpu]
450 call r9
451
452 ; Load CPUM pointer into rdx
453 mov rdx, [NAME(pCpumIC) wrt rip]
454 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
455
456%ifdef VBOX_WITH_CRASHDUMP_MAGIC
457 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
458%endif
459
460 ; Save the return code
461 mov dword [rdx + CPUMCPU.u32RetCode], eax
462
463 ; now let's switch back
464 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
465
466ENDPROC vmmR0ToRawModeAsm
467
468
469;;
470; Trampoline for doing a call when starting the hyper visor execution.
471;
472; Push any arguments to the routine.
473; Push the argument frame size (cArg * 4).
474; Push the call target (_cdecl convention).
475; Push the address of this routine.
476;
477;
478BITS 64
479ALIGNCODE(16)
480BEGINPROC vmmRCCallTrampoline
481%ifdef DEBUG_STUFF
482 COM64_S_CHAR 'c'
483 COM64_S_CHAR 't'
484 COM64_S_CHAR '!'
485%endif
486 int3
487ENDPROC vmmRCCallTrampoline
488
489
490;;
491; The C interface.
492;
493BITS 64
494ALIGNCODE(16)
495BEGINPROC vmmRCToHost
496%ifdef DEBUG_STUFF
497 push rsi
498 COM_NEWLINE
499 DEBUG_CHAR('b')
500 DEBUG_CHAR('a')
501 DEBUG_CHAR('c')
502 DEBUG_CHAR('k')
503 DEBUG_CHAR('!')
504 COM_NEWLINE
505 pop rsi
506%endif
507 int3
508ENDPROC vmmRCToHost
509
510;;
511; vmmRCToHostAsm
512;
513; This is an alternative entry point which we'll be using
514; when the we have saved the guest state already or we haven't
515; been messing with the guest at all.
516;
517; @param eax Return code.
518; @uses eax, edx, ecx (or it may use them in the future)
519;
520BITS 64
521ALIGNCODE(16)
522BEGINPROC vmmRCToHostAsm
523NAME(vmmRCToHostAsmNoReturn):
524 ;; We're still in the intermediate memory context!
525
526 ;;
527 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
528 ;;
529 jmp far [NAME(fpIDEnterTarget) wrt rip]
530
531; 16:32 Pointer to IDEnterTarget.
532NAME(fpIDEnterTarget):
533 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
534dd 0
535 FIXUP FIX_HYPER_CS, 0
536dd 0
537
538 ; We're now on identity mapped pages!
539ALIGNCODE(16)
540GLOBALNAME IDExitTarget
541BITS 32
542 DEBUG_CHAR('1')
543
544 ; 1. Deactivate long mode by turning off paging.
545 mov ebx, cr0
546 and ebx, ~X86_CR0_PG
547 mov cr0, ebx
548 DEBUG_CHAR('2')
549
550 ; 2. Load intermediate page table.
551 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
552 mov edx, 0ffffffffh
553 mov cr3, edx
554 DEBUG_CHAR('3')
555
556 ; 3. Disable long mode.
557 mov ecx, MSR_K6_EFER
558 rdmsr
559 DEBUG_CHAR('5')
560 and eax, ~(MSR_K6_EFER_LME)
561 wrmsr
562 DEBUG_CHAR('6')
563
564%ifndef NEED_PAE_ON_HOST
565 ; 3b. Disable PAE.
566 mov eax, cr4
567 and eax, ~X86_CR4_PAE
568 mov cr4, eax
569 DEBUG_CHAR('7')
570%endif
571
572 ; 4. Enable paging.
573 or ebx, X86_CR0_PG
574 mov cr0, ebx
575 jmp short just_a_jump
576just_a_jump:
577 DEBUG_CHAR('8')
578
579 ;;
580 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
581 ;;
582 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
583 jmp near NAME(ICExitTarget)
584
585 ;;
586 ;; When we arrive at this label we're at the
587 ;; intermediate mapping of the switching code.
588 ;;
589BITS 32
590ALIGNCODE(16)
591GLOBALNAME ICExitTarget
592 DEBUG_CHAR('8')
593
594 ; load the hypervisor data selector into ds & es
595 FIXUP FIX_HYPER_DS, 1
596 mov eax, 0ffffh
597 mov ds, eax
598 mov es, eax
599
600 FIXUP FIX_GC_CPUM_OFF, 1, 0
601 mov edx, 0ffffffffh
602 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
603 mov esi, [edx + CPUMCPU.Host.cr3]
604 mov cr3, esi
605
606 ;; now we're in host memory context, let's restore regs
607 FIXUP FIX_HC_CPUM_OFF, 1, 0
608 mov edx, 0ffffffffh
609 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
610
611 ; restore the host EFER
612 mov ebx, edx
613 mov ecx, MSR_K6_EFER
614 mov eax, [ebx + CPUMCPU.Host.efer]
615 mov edx, [ebx + CPUMCPU.Host.efer + 4]
616 wrmsr
617 mov edx, ebx
618
619 ; activate host gdt and idt
620 lgdt [edx + CPUMCPU.Host.gdtr]
621 DEBUG_CHAR('0')
622 lidt [edx + CPUMCPU.Host.idtr]
623 DEBUG_CHAR('1')
624
625 ; Restore TSS selector; must mark it as not busy before using ltr (!)
626 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
627 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
628 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
629 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
630 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
631 ltr word [edx + CPUMCPU.Host.tr]
632
633 ; activate ldt
634 DEBUG_CHAR('2')
635 lldt [edx + CPUMCPU.Host.ldtr]
636
637 ; Restore segment registers
638 mov eax, [edx + CPUMCPU.Host.ds]
639 mov ds, eax
640 mov eax, [edx + CPUMCPU.Host.es]
641 mov es, eax
642 mov eax, [edx + CPUMCPU.Host.fs]
643 mov fs, eax
644 mov eax, [edx + CPUMCPU.Host.gs]
645 mov gs, eax
646 ; restore stack
647 lss esp, [edx + CPUMCPU.Host.esp]
648
649 ; Control registers.
650 mov ecx, [edx + CPUMCPU.Host.cr4]
651 mov cr4, ecx
652 mov ecx, [edx + CPUMCPU.Host.cr0]
653 mov cr0, ecx
654 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
655 ;mov cr2, ecx
656
657 ; restore general registers.
658 mov edi, [edx + CPUMCPU.Host.edi]
659 mov esi, [edx + CPUMCPU.Host.esi]
660 mov ebx, [edx + CPUMCPU.Host.ebx]
661 mov ebp, [edx + CPUMCPU.Host.ebp]
662
663 ; store the return code in eax
664 mov eax, [edx + CPUMCPU.u32RetCode]
665 retf
666ENDPROC vmmRCToHostAsm
667
668
669GLOBALNAME End
670;
671; The description string (in the text section).
672;
673NAME(Description):
674 db SWITCHER_DESCRIPTION
675 db 0
676
677extern NAME(Relocate)
678
679;
680; End the fixup records.
681;
682BEGINDATA
683 db FIX_THE_END ; final entry.
684GLOBALNAME FixupsEnd
685
686;;
687; The switcher definition structure.
688ALIGNDATA(16)
689GLOBALNAME Def
690 istruc VMMSWITCHERDEF
691 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
692 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
693 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
694 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
695 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
696 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
697 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
698 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
699 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
700 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
701 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
702 ; disasm help
703 at VMMSWITCHERDEF.offHCCode0, dd 0
704 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
705 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
706 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
707 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
708 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
709 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
710 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
711 at VMMSWITCHERDEF.offGCCode, dd 0
712 at VMMSWITCHERDEF.cbGCCode, dd 0
713
714 iend
715
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette