VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac@ 45737

最後變更 在這個檔案從45737是 45737,由 vboxsync 提交於 12 年 前

doc corrections.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 19.4 KB
 
1; VMM - World Switchers, 32Bit to AMD64.
2;
3
4;
5; Copyright (C) 2006-2012 Oracle Corporation
6;
7; This file is part of VirtualBox Open Source Edition (OSE), as
8; available from http://www.alldomusa.eu.org. This file is free software;
9; you can redistribute it and/or modify it under the terms of the GNU
10; General Public License (GPL) as published by the Free Software
11; Foundation, in version 2 as it comes in the "COPYING" file of the
12; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
13; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
14;
15
16;%define DEBUG_STUFF 1
17;%define STRICT_IF 1
18
19;*******************************************************************************
20;* Defined Constants And Macros *
21;*******************************************************************************
22
23
24;*******************************************************************************
25;* Header Files *
26;*******************************************************************************
27%include "VBox/asmdefs.mac"
28%include "VBox/apic.mac"
29%include "iprt/x86.mac"
30%include "VBox/vmm/cpum.mac"
31%include "VBox/vmm/stam.mac"
32%include "VBox/vmm/vm.mac"
33%include "CPUMInternal.mac"
34%include "VMMSwitcher.mac"
35
36
37;
38; Start the fixup records
39; We collect the fixups in the .data section as we go along
40; It is therefore VITAL that no-one is using the .data section
41; for anything else between 'Start' and 'End'.
42;
43BEGINDATA
44GLOBALNAME Fixups
45
46
47
48BEGINCODE
49GLOBALNAME Start
50
51BITS 32
52
53;;
54; The C interface.
55; @param [esp + 04h] Param 1 - VM handle
56; @param [esp + 08h] Param 2 - Offset from VM::CPUM to the CPUMCPU
57; structure for the calling EMT.
58;
59BEGINPROC vmmR0ToRawMode
60%ifdef DEBUG_STUFF
61 COM32_S_NEWLINE
62 COM32_S_CHAR '^'
63%endif
64
65%ifdef VBOX_WITH_STATISTICS
66 ;
67 ; Switcher stats.
68 ;
69 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
70 mov edx, 0ffffffffh
71 STAM_PROFILE_ADV_START edx
72%endif
73
74 push ebp
75 mov ebp, [esp + 12] ; CPUMCPU offset
76
77 ; turn off interrupts
78 pushf
79 cli
80
81 ;
82 ; Call worker.
83 ;
84 FIXUP FIX_HC_CPUM_OFF, 1, 0
85 mov edx, 0ffffffffh
86 push cs ; allow for far return and restore cs correctly.
87 call NAME(vmmR0ToRawModeAsm)
88
89%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
90 CPUM_FROM_CPUMCPU(edx)
91 ; Restore blocked Local APIC NMI vectors
92 mov ecx, [edx + CPUM.fApicDisVectors]
93 mov edx, [edx + CPUM.pvApicBase]
94 shr ecx, 1
95 jnc gth_nolint0
96 and dword [edx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
97gth_nolint0:
98 shr ecx, 1
99 jnc gth_nolint1
100 and dword [edx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
101gth_nolint1:
102 shr ecx, 1
103 jnc gth_nopc
104 and dword [edx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
105gth_nopc:
106 shr ecx, 1
107 jnc gth_notherm
108 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
109gth_notherm:
110%endif
111
112 ; restore original flags
113 popf
114 pop ebp
115
116%ifdef VBOX_WITH_STATISTICS
117 ;
118 ; Switcher stats.
119 ;
120 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
121 mov edx, 0ffffffffh
122 STAM_PROFILE_ADV_STOP edx
123%endif
124
125 ret
126
127ENDPROC vmmR0ToRawMode
128
129; *****************************************************************************
130; vmmR0ToRawModeAsm
131;
132; Phase one of the switch from host to guest context (host MMU context)
133;
134; INPUT:
135; - edx virtual address of CPUM structure (valid in host context)
136; - ebp offset of the CPUMCPU structure relative to CPUM.
137;
138; USES/DESTROYS:
139; - eax, ecx, edx, esi
140;
141; ASSUMPTION:
142; - current CS and DS selectors are wide open
143;
144; *****************************************************************************
145ALIGNCODE(16)
146BEGINPROC vmmR0ToRawModeAsm
147 ;;
148 ;; Save CPU host context
149 ;; Skip eax, edx and ecx as these are not preserved over calls.
150 ;;
151 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
152%ifdef VBOX_WITH_CRASHDUMP_MAGIC
153 ; phys address of scratch page
154 mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
155 mov cr2, eax
156
157 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
158%endif
159
160 ; general registers.
161 mov [edx + CPUMCPU.Host.ebx], ebx
162 mov [edx + CPUMCPU.Host.edi], edi
163 mov [edx + CPUMCPU.Host.esi], esi
164 mov [edx + CPUMCPU.Host.esp], esp
165 mov [edx + CPUMCPU.Host.ebp], ebp
166 ; selectors.
167 mov [edx + CPUMCPU.Host.ds], ds
168 mov [edx + CPUMCPU.Host.es], es
169 mov [edx + CPUMCPU.Host.fs], fs
170 mov [edx + CPUMCPU.Host.gs], gs
171 mov [edx + CPUMCPU.Host.ss], ss
172 ; special registers.
173 sldt [edx + CPUMCPU.Host.ldtr]
174 sidt [edx + CPUMCPU.Host.idtr]
175 sgdt [edx + CPUMCPU.Host.gdtr]
176 str [edx + CPUMCPU.Host.tr]
177
178%ifdef VBOX_WITH_CRASHDUMP_MAGIC
179 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
180%endif
181
182%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
183 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
184 mov ebx, [edx + CPUM.pvApicBase]
185 or ebx, ebx
186 jz htg_noapic
187 mov eax, [ebx + APIC_REG_LVT_LINT0]
188 mov ecx, eax
189 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
190 cmp ecx, APIC_REG_LVT_MODE_NMI
191 jne htg_nolint0
192 or edi, 0x01
193 or eax, APIC_REG_LVT_MASKED
194 mov [ebx + APIC_REG_LVT_LINT0], eax
195 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion
196htg_nolint0:
197 mov eax, [ebx + APIC_REG_LVT_LINT1]
198 mov ecx, eax
199 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
200 cmp ecx, APIC_REG_LVT_MODE_NMI
201 jne htg_nolint1
202 or edi, 0x02
203 or eax, APIC_REG_LVT_MASKED
204 mov [ebx + APIC_REG_LVT_LINT1], eax
205 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion
206htg_nolint1:
207 mov eax, [ebx + APIC_REG_LVT_PC]
208 mov ecx, eax
209 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
210 cmp ecx, APIC_REG_LVT_MODE_NMI
211 jne htg_nopc
212 or edi, 0x04
213 or eax, APIC_REG_LVT_MASKED
214 mov [ebx + APIC_REG_LVT_PC], eax
215 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion
216htg_nopc:
217 mov eax, [ebx + APIC_REG_VERSION]
218 shr eax, 16
219 cmp al, 5
220 jb htg_notherm
221 mov eax, [ebx + APIC_REG_LVT_THMR]
222 mov ecx, eax
223 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
224 cmp ecx, APIC_REG_LVT_MODE_NMI
225 jne htg_notherm
226 or edi, 0x08
227 or eax, APIC_REG_LVT_MASKED
228 mov [ebx + APIC_REG_LVT_THMR], eax
229 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion
230htg_notherm:
231 mov [edx + CPUM.fApicDisVectors], edi
232htg_noapic:
233 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
234%endif
235
236 ; control registers.
237 mov eax, cr0
238 mov [edx + CPUMCPU.Host.cr0], eax
239 ;Skip cr2; assume host os don't stuff things in cr2. (safe)
240 mov eax, cr3
241 mov [edx + CPUMCPU.Host.cr3], eax
242 mov eax, cr4
243 mov [edx + CPUMCPU.Host.cr4], eax
244
245 ; save the host EFER msr
246 mov ebx, edx
247 mov ecx, MSR_K6_EFER
248 rdmsr
249 mov [ebx + CPUMCPU.Host.efer], eax
250 mov [ebx + CPUMCPU.Host.efer + 4], edx
251 mov edx, ebx
252
253%ifdef VBOX_WITH_CRASHDUMP_MAGIC
254 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
255%endif
256
257 ; Load new gdt so we can do a far jump after going into 64 bits mode
258 lgdt [edx + CPUMCPU.Hyper.gdtr]
259
260%ifdef VBOX_WITH_CRASHDUMP_MAGIC
261 mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
262%endif
263
264 ;;
265 ;; Load Intermediate memory context.
266 ;;
267 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
268 mov eax, 0ffffffffh
269 mov cr3, eax
270 DEBUG_CHAR('?')
271
272 ;;
273 ;; Jump to identity mapped location
274 ;;
275 FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
276 jmp near NAME(IDEnterTarget)
277
278
279 ; We're now on identity mapped pages!
280ALIGNCODE(16)
281GLOBALNAME IDEnterTarget
282 DEBUG_CHAR('2')
283
284 ; 1. Disable paging.
285 mov ebx, cr0
286 and ebx, ~X86_CR0_PG
287 mov cr0, ebx
288 DEBUG_CHAR('2')
289
290%ifdef VBOX_WITH_CRASHDUMP_MAGIC
291 mov eax, cr2
292 mov dword [eax], 3
293%endif
294
295 ; 2. Enable PAE.
296 mov ecx, cr4
297 or ecx, X86_CR4_PAE
298 mov cr4, ecx
299
300 ; 3. Load long mode intermediate CR3.
301 FIXUP FIX_INTER_AMD64_CR3, 1
302 mov ecx, 0ffffffffh
303 mov cr3, ecx
304 DEBUG_CHAR('3')
305
306%ifdef VBOX_WITH_CRASHDUMP_MAGIC
307 mov eax, cr2
308 mov dword [eax], 4
309%endif
310
311 ; 4. Enable long mode.
312 mov esi, edx
313 mov ecx, MSR_K6_EFER
314 rdmsr
315 FIXUP FIX_EFER_OR_MASK, 1
316 or eax, 0ffffffffh
317 and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
318 wrmsr
319 mov edx, esi
320 DEBUG_CHAR('4')
321
322%ifdef VBOX_WITH_CRASHDUMP_MAGIC
323 mov eax, cr2
324 mov dword [eax], 5
325%endif
326
327 ; 5. Enable paging.
328 or ebx, X86_CR0_PG
329 ; Disable ring 0 write protection too
330 and ebx, ~X86_CR0_WRITE_PROTECT
331 mov cr0, ebx
332 DEBUG_CHAR('5')
333
334 ; Jump from compatibility mode to 64-bit mode.
335 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
336 jmp 0ffffh:0fffffffeh
337
338 ;
339 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
340BITS 64
341ALIGNCODE(16)
342NAME(IDEnter64Mode):
343 DEBUG_CHAR('6')
344 jmp [NAME(pICEnterTarget) wrt rip]
345
346; 64-bit jump target
347NAME(pICEnterTarget):
348FIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
349dq 0ffffffffffffffffh
350
351; 64-bit pCpum address.
352NAME(pCpumIC):
353FIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
354dq 0ffffffffffffffffh
355
356%ifdef VBOX_WITH_CRASHDUMP_MAGIC
357NAME(pMarker):
358db 'Switch_marker'
359%endif
360
361 ;
362 ; When we arrive here we're in 64 bits mode in the intermediate context
363 ;
364ALIGNCODE(16)
365GLOBALNAME ICEnterTarget
366 ; Load CPUM pointer into rdx
367 mov rdx, [NAME(pCpumIC) wrt rip]
368 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
369
370 mov rax, cs
371 mov ds, rax
372 mov es, rax
373
374 ; Invalidate fs & gs
375 mov rax, 0
376 mov fs, rax
377 mov gs, rax
378
379%ifdef VBOX_WITH_CRASHDUMP_MAGIC
380 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
381%endif
382
383 ; Setup stack.
384 DEBUG_CHAR('7')
385 mov rsp, 0
386 mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
387 mov ss, ax
388 mov esp, [rdx + CPUMCPU.Hyper.esp]
389
390%ifdef VBOX_WITH_CRASHDUMP_MAGIC
391 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
392%endif
393
394
395 ; load the hypervisor function address
396 mov r9, [rdx + CPUMCPU.Hyper.eip]
397
398 ; Check if we need to restore the guest FPU state
399 mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
400 test esi, CPUM_SYNC_FPU_STATE
401 jz near gth_fpu_no
402
403%ifdef VBOX_WITH_CRASHDUMP_MAGIC
404 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
405%endif
406
407 mov rax, cr0
408 mov rcx, rax ; save old CR0
409 and rax, ~(X86_CR0_TS | X86_CR0_EM)
410 mov cr0, rax
411 fxrstor [rdx + CPUMCPU.Guest.fpu]
412 mov cr0, rcx ; and restore old CR0 again
413
414 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
415
416gth_fpu_no:
417 ; Check if we need to restore the guest debug state
418 test esi, CPUM_SYNC_DEBUG_STATE
419 jz near gth_debug_no
420
421%ifdef VBOX_WITH_CRASHDUMP_MAGIC
422 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
423%endif
424
425 mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
426 mov dr0, rax
427 mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
428 mov dr1, rax
429 mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
430 mov dr2, rax
431 mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
432 mov dr3, rax
433 mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
434 mov dr6, rax ; not required for AMD-V
435
436 and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
437
438gth_debug_no:
439
440%ifdef VBOX_WITH_CRASHDUMP_MAGIC
441 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
442%endif
443
444 ; parameter for all helper functions (pCtx)
445 lea rsi, [rdx + CPUMCPU.Guest.fpu]
446 call r9
447
448 ; Load CPUM pointer into rdx
449 mov rdx, [NAME(pCpumIC) wrt rip]
450 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
451
452%ifdef VBOX_WITH_CRASHDUMP_MAGIC
453 mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
454%endif
455
456 ; Save the return code
457 mov dword [rdx + CPUMCPU.u32RetCode], eax
458
459 ; now let's switch back
460 jmp NAME(vmmRCToHostAsm) ; rax = returncode.
461
462ENDPROC vmmR0ToRawModeAsm
463
464
465;;
466; Trampoline for doing a call when starting the hyper visor execution.
467;
468; Push any arguments to the routine.
469; Push the argument frame size (cArg * 4).
470; Push the call target (_cdecl convention).
471; Push the address of this routine.
472;
473;
474BITS 64
475ALIGNCODE(16)
476BEGINPROC vmmRCCallTrampoline
477%ifdef DEBUG_STUFF
478 COM64_S_CHAR 'c'
479 COM64_S_CHAR 't'
480 COM64_S_CHAR '!'
481%endif
482 int3
483ENDPROC vmmRCCallTrampoline
484
485
486;;
487; The C interface.
488;
489BITS 64
490ALIGNCODE(16)
491BEGINPROC vmmRCToHost
492%ifdef DEBUG_STUFF
493 push rsi
494 COM_NEWLINE
495 DEBUG_CHAR('b')
496 DEBUG_CHAR('a')
497 DEBUG_CHAR('c')
498 DEBUG_CHAR('k')
499 DEBUG_CHAR('!')
500 COM_NEWLINE
501 pop rsi
502%endif
503 int3
504ENDPROC vmmRCToHost
505
506;;
507; vmmRCToHostAsm
508;
509; This is an alternative entry point which we'll be using
510; when the we have saved the guest state already or we haven't
511; been messing with the guest at all.
512;
513; @param eax Return code.
514; @uses eax, edx, ecx (or it may use them in the future)
515;
516BITS 64
517ALIGNCODE(16)
518BEGINPROC vmmRCToHostAsm
519NAME(vmmRCToHostAsmNoReturn):
520 ;; We're still in the intermediate memory context!
521
522 ;;
523 ;; Switch to compatibility mode, placing ourselves in identity mapped code.
524 ;;
525 jmp far [NAME(fpIDEnterTarget) wrt rip]
526
527; 16:32 Pointer to IDEnterTarget.
528NAME(fpIDEnterTarget):
529 FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
530dd 0
531 FIXUP FIX_HYPER_CS, 0
532dd 0
533
534 ; We're now on identity mapped pages!
535ALIGNCODE(16)
536GLOBALNAME IDExitTarget
537BITS 32
538 DEBUG_CHAR('1')
539
540 ; 1. Deactivate long mode by turning off paging.
541 mov ebx, cr0
542 and ebx, ~X86_CR0_PG
543 mov cr0, ebx
544 DEBUG_CHAR('2')
545
546 ; 2. Load intermediate page table.
547 FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
548 mov edx, 0ffffffffh
549 mov cr3, edx
550 DEBUG_CHAR('3')
551
552 ; 3. Disable long mode.
553 mov ecx, MSR_K6_EFER
554 rdmsr
555 DEBUG_CHAR('5')
556 and eax, ~(MSR_K6_EFER_LME)
557 wrmsr
558 DEBUG_CHAR('6')
559
560%ifndef NEED_PAE_ON_HOST
561 ; 3b. Disable PAE.
562 mov eax, cr4
563 and eax, ~X86_CR4_PAE
564 mov cr4, eax
565 DEBUG_CHAR('7')
566%endif
567
568 ; 4. Enable paging.
569 or ebx, X86_CR0_PG
570 mov cr0, ebx
571 jmp short just_a_jump
572just_a_jump:
573 DEBUG_CHAR('8')
574
575 ;;
576 ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
577 ;;
578 FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
579 jmp near NAME(ICExitTarget)
580
581 ;;
582 ;; When we arrive at this label we're at the
583 ;; intermediate mapping of the switching code.
584 ;;
585BITS 32
586ALIGNCODE(16)
587GLOBALNAME ICExitTarget
588 DEBUG_CHAR('8')
589
590 ; load the hypervisor data selector into ds & es
591 FIXUP FIX_HYPER_DS, 1
592 mov eax, 0ffffh
593 mov ds, eax
594 mov es, eax
595
596 FIXUP FIX_GC_CPUM_OFF, 1, 0
597 mov edx, 0ffffffffh
598 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
599 mov esi, [edx + CPUMCPU.Host.cr3]
600 mov cr3, esi
601
602 ;; now we're in host memory context, let's restore regs
603 FIXUP FIX_HC_CPUM_OFF, 1, 0
604 mov edx, 0ffffffffh
605 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp
606
607 ; restore the host EFER
608 mov ebx, edx
609 mov ecx, MSR_K6_EFER
610 mov eax, [ebx + CPUMCPU.Host.efer]
611 mov edx, [ebx + CPUMCPU.Host.efer + 4]
612 wrmsr
613 mov edx, ebx
614
615 ; activate host gdt and idt
616 lgdt [edx + CPUMCPU.Host.gdtr]
617 DEBUG_CHAR('0')
618 lidt [edx + CPUMCPU.Host.idtr]
619 DEBUG_CHAR('1')
620
621 ; Restore TSS selector; must mark it as not busy before using ltr (!)
622 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
623 movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
624 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
625 add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
626 and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
627 ltr word [edx + CPUMCPU.Host.tr]
628
629 ; activate ldt
630 DEBUG_CHAR('2')
631 lldt [edx + CPUMCPU.Host.ldtr]
632
633 ; Restore segment registers
634 mov eax, [edx + CPUMCPU.Host.ds]
635 mov ds, eax
636 mov eax, [edx + CPUMCPU.Host.es]
637 mov es, eax
638 mov eax, [edx + CPUMCPU.Host.fs]
639 mov fs, eax
640 mov eax, [edx + CPUMCPU.Host.gs]
641 mov gs, eax
642 ; restore stack
643 lss esp, [edx + CPUMCPU.Host.esp]
644
645 ; Control registers.
646 mov ecx, [edx + CPUMCPU.Host.cr4]
647 mov cr4, ecx
648 mov ecx, [edx + CPUMCPU.Host.cr0]
649 mov cr0, ecx
650 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
651 ;mov cr2, ecx
652
653 ; restore general registers.
654 mov edi, [edx + CPUMCPU.Host.edi]
655 mov esi, [edx + CPUMCPU.Host.esi]
656 mov ebx, [edx + CPUMCPU.Host.ebx]
657 mov ebp, [edx + CPUMCPU.Host.ebp]
658
659 ; store the return code in eax
660 mov eax, [edx + CPUMCPU.u32RetCode]
661 retf
662ENDPROC vmmRCToHostAsm
663
664
665GLOBALNAME End
666;
667; The description string (in the text section).
668;
669NAME(Description):
670 db SWITCHER_DESCRIPTION
671 db 0
672
673extern NAME(Relocate)
674
675;
676; End the fixup records.
677;
678BEGINDATA
679 db FIX_THE_END ; final entry.
680GLOBALNAME FixupsEnd
681
682;;
683; The switcher definition structure.
684ALIGNDATA(16)
685GLOBALNAME Def
686 istruc VMMSWITCHERDEF
687 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
688 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
689 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
690 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
691 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
692 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
693 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
694 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
695 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
696 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
697 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
698 ; disasm help
699 at VMMSWITCHERDEF.offHCCode0, dd 0
700 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
701 at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
702 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
703 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
704 at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
705 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
706 at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
707 at VMMSWITCHERDEF.offGCCode, dd 0
708 at VMMSWITCHERDEF.cbGCCode, dd 0
709
710 iend
711
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette