VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac@ 47689

最後變更 在這個檔案從47689是 47689,由 vboxsync 提交於 12 年 前

tstVMM: DRx fixes.

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id
檔案大小: 33.4 KB
 
1; $Id: AMD64andLegacy.mac 47689 2013-08-13 12:53:07Z vboxsync $
2;; @file
3; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
4;
5
6;
7; Copyright (C) 2006-2013 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.alldomusa.eu.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;%define DEBUG_STUFF 1
19;%define STRICT_IF 1
20
21;*******************************************************************************
22;* Header Files *
23;*******************************************************************************
24%include "VBox/asmdefs.mac"
25%include "VBox/apic.mac"
26%include "iprt/x86.mac"
27%include "VBox/vmm/cpum.mac"
28%include "VBox/vmm/stam.mac"
29%include "VBox/vmm/vm.mac"
30%include "VBox/err.mac"
31%include "CPUMInternal.mac"
32%include "VMMSwitcher.mac"
33
34
35;
36; Start the fixup records
37; We collect the fixups in the .data section as we go along
38; It is therefore VITAL that no-one is using the .data section
39; for anything else between 'Start' and 'End'.
40;
41BEGINDATA
42GLOBALNAME Fixups
43
44
45
46BEGINCODE
47GLOBALNAME Start
48
49%ifndef VBOX_WITH_HYBRID_32BIT_KERNEL
50BITS 64
51
52;;
53; The C interface.
54;
55; @param pVM GCC: rdi MSC:rcx The VM handle.
56;
57BEGINPROC vmmR0ToRawMode
58%ifdef DEBUG_STUFF
59 COM64_S_NEWLINE
60 COM64_S_CHAR '^'
61%endif
62 ;
63 ; The ordinary version of the code.
64 ;
65
66 %ifdef STRICT_IF
67 pushf
68 pop rax
69 test eax, X86_EFL_IF
70 jz .if_clear_in
71 mov eax, 0c0ffee00h
72 ret
73.if_clear_in:
74 %endif
75
76 ;
77 ; make r9 = pVM and rdx = pCpum.
78 ; rax, rcx and r8 are scratch here after.
79 %ifdef RT_OS_WINDOWS
80 mov r9, rcx
81 %else
82 mov r9, rdi
83 %endif
84 lea rdx, [r9 + VM.cpum]
85
86 %ifdef VBOX_WITH_STATISTICS
87 ;
88 ; Switcher stats.
89 ;
90 lea r8, [r9 + VM.StatSwitcherToGC]
91 STAM64_PROFILE_ADV_START r8
92 %endif
93
94 ;
95 ; Call worker (far return).
96 ;
97 mov eax, cs
98 push rax
99 call NAME(vmmR0ToRawModeAsm)
100
101 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
102 ; Unblock Local APIC NMI vectors
103 ; Do this here to ensure the host CS is already restored
104 mov ecx, [rdx + CPUM.fApicDisVectors]
105 mov r8, [rdx + CPUM.pvApicBase]
106 shr ecx, 1
107 jnc gth64_nolint0
108 and dword [r8 + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED
109gth64_nolint0:
110 shr ecx, 1
111 jnc gth64_nolint1
112 and dword [r8 + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED
113gth64_nolint1:
114 shr ecx, 1
115 jnc gth64_nopc
116 and dword [r8 + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED
117gth64_nopc:
118 shr ecx, 1
119 jnc gth64_notherm
120 and dword [r8 + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED
121gth64_notherm:
122 %endif
123
124 %ifdef VBOX_WITH_STATISTICS
125 ;
126 ; Switcher stats.
127 ;
128 lea r8, [r9 + VM.StatSwitcherToGC]
129 STAM64_PROFILE_ADV_STOP r8
130 %endif
131
132 ret
133ENDPROC vmmR0ToRawMode
134
135
136%else ; VBOX_WITH_HYBRID_32BIT_KERNEL
137
138
139BITS 32
140
141;;
142; The C interface.
143;
144BEGINPROC vmmR0ToRawMode
145 %ifdef DEBUG_STUFF
146 COM32_S_NEWLINE
147 COM32_S_CHAR '^'
148 %endif
149
150 %ifdef VBOX_WITH_STATISTICS
151 ;
152 ; Switcher stats.
153 ;
154 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
155 mov edx, 0ffffffffh
156 STAM_PROFILE_ADV_START edx
157 %endif
158
159 ; Thunk to/from 64 bit when invoking the worker routine.
160 ;
161 FIXUP FIX_HC_VM_OFF, 1, VM.cpum
162 mov edx, 0ffffffffh
163
164 push 0
165 push cs
166 push 0
167 FIXUP FIX_HC_32BIT, 1, .vmmR0ToRawModeReturn - NAME(Start)
168 push 0ffffffffh
169
170 FIXUP FIX_HC_64BIT_CS, 1
171 push 0ffffh
172 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0ToRawModeAsm) - NAME(Start)
173 push 0ffffffffh
174 retf
175.vmmR0ToRawModeReturn:
176
177 ;
178 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
179 ; the CPU has the right idea about the selectors.
180 ;
181 mov edx, ds
182 mov ds, edx
183 mov ecx, es
184 mov es, ecx
185 mov edx, ss
186 mov ss, edx
187
188 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
189 Missing implementation!
190 %endif
191
192
193 %ifdef VBOX_WITH_STATISTICS
194 ;
195 ; Switcher stats.
196 ;
197 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
198 mov edx, 0ffffffffh
199 STAM_PROFILE_ADV_STOP edx
200 %endif
201
202 ret
203ENDPROC vmmR0ToRawMode
204
205BITS 64
206%endif ;!VBOX_WITH_HYBRID_32BIT_KERNEL
207
208
209
210; *****************************************************************************
211; vmmR0ToRawModeAsm
212;
213; Phase one of the switch from host to guest context (host MMU context)
214;
215; INPUT:
216; - edx virtual address of CPUM structure (valid in host context)
217;
218; USES/DESTROYS:
219; - eax, ecx, edx, r8
220;
221; ASSUMPTION:
222; - current CS and DS selectors are wide open
223;
224; *****************************************************************************
225ALIGNCODE(16)
226BEGINPROC vmmR0ToRawModeAsm
227 ;; Store the offset from CPUM to CPUMCPU in r8
228 mov r8d, [rdx + CPUM.offCPUMCPU0]
229
230 ;;
231 ;; Save CPU host context
232 ;; Skip eax, edx and ecx as these are not preserved over calls.
233 ;;
234 ; general registers.
235 ; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
236 mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
237 ; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
238 ; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
239 mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
240 mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
241 mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
242 mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
243 ; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
244 ; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
245 mov [rdx + r8 + CPUMCPU.Host.r10], r10
246 mov [rdx + r8 + CPUMCPU.Host.r11], r11
247 mov [rdx + r8 + CPUMCPU.Host.r12], r12
248 mov [rdx + r8 + CPUMCPU.Host.r13], r13
249 mov [rdx + r8 + CPUMCPU.Host.r14], r14
250 mov [rdx + r8 + CPUMCPU.Host.r15], r15
251 ; selectors.
252 mov [rdx + r8 + CPUMCPU.Host.ds], ds
253 mov [rdx + r8 + CPUMCPU.Host.es], es
254 mov [rdx + r8 + CPUMCPU.Host.fs], fs
255 mov [rdx + r8 + CPUMCPU.Host.gs], gs
256 mov [rdx + r8 + CPUMCPU.Host.ss], ss
257 ; MSRs
258 mov rbx, rdx
259 mov ecx, MSR_K8_FS_BASE
260 rdmsr
261 mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
262 mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
263 mov ecx, MSR_K8_GS_BASE
264 rdmsr
265 mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
266 mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
267 mov ecx, MSR_K6_EFER
268 rdmsr
269 mov [rbx + r8 + CPUMCPU.Host.efer], eax
270 mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
271 mov rdx, rbx
272 ; special registers.
273 sldt [rdx + r8 + CPUMCPU.Host.ldtr]
274 sidt [rdx + r8 + CPUMCPU.Host.idtr]
275 sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
276 str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
277 ; flags
278 pushf
279 pop qword [rdx + r8 + CPUMCPU.Host.rflags]
280
281%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
282 ; Block Local APIC NMI vectors
283 mov rbx, [rdx + CPUM.pvApicBase]
284 or rbx, rbx
285 jz htg_noapic
286 xor edi, edi
287 mov eax, [rbx + APIC_REG_LVT_LINT0]
288 mov ecx, eax
289 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
290 cmp ecx, APIC_REG_LVT_MODE_NMI
291 jne htg_nolint0
292 or edi, 0x01
293 or eax, APIC_REG_LVT_MASKED
294 mov [rbx + APIC_REG_LVT_LINT0], eax
295 mov eax, [rbx + APIC_REG_LVT_LINT0] ; write completion
296htg_nolint0:
297 mov eax, [rbx + APIC_REG_LVT_LINT1]
298 mov ecx, eax
299 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
300 cmp ecx, APIC_REG_LVT_MODE_NMI
301 jne htg_nolint1
302 or edi, 0x02
303 or eax, APIC_REG_LVT_MASKED
304 mov [rbx + APIC_REG_LVT_LINT1], eax
305 mov eax, [rbx + APIC_REG_LVT_LINT1] ; write completion
306htg_nolint1:
307 mov eax, [rbx + APIC_REG_LVT_PC]
308 mov ecx, eax
309 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
310 cmp ecx, APIC_REG_LVT_MODE_NMI
311 jne htg_nopc
312 or edi, 0x04
313 or eax, APIC_REG_LVT_MASKED
314 mov [rbx + APIC_REG_LVT_PC], eax
315 mov eax, [rbx + APIC_REG_LVT_PC] ; write completion
316htg_nopc:
317 mov eax, [rbx + APIC_REG_VERSION]
318 shr eax, 16
319 cmp al, 5
320 jb htg_notherm
321 mov eax, [rbx + APIC_REG_LVT_THMR]
322 mov ecx, eax
323 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK)
324 cmp ecx, APIC_REG_LVT_MODE_NMI
325 jne htg_notherm
326 or edi, 0x08
327 or eax, APIC_REG_LVT_MASKED
328 mov [rbx + APIC_REG_LVT_THMR], eax
329 mov eax, [rbx + APIC_REG_LVT_THMR] ; write completion
330htg_notherm:
331 mov [rdx + CPUM.fApicDisVectors], edi
332htg_noapic:
333%endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
334
335 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
336 ; save MSR_IA32_SYSENTER_CS register.
337 mov rbx, rdx ; save edx
338 mov ecx, MSR_IA32_SYSENTER_CS
339 rdmsr ; edx:eax <- MSR[ecx]
340 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
341 mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
342 xor eax, eax ; load 0:0 to cause #GP upon sysenter
343 xor edx, edx
344 wrmsr
345 mov rdx, rbx ; restore edx
346 jmp short htg_no_sysenter
347
348ALIGNCODE(16)
349htg_no_sysenter:
350
351 ;; handle use flags.
352 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
353 and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
354 mov [rdx + r8 + CPUMCPU.fUseFlags], esi
355
356 ; debug registers.
357 test esi, CPUM_USE_DEBUG_REGS_HYPER | CPUM_USE_DEBUG_REGS_HOST
358 jnz htg_debug_regs_save
359htg_debug_regs_no:
360 DEBUG_CHAR('a') ; trashes esi
361
362 ; control registers.
363 mov rax, cr0
364 mov [rdx + r8 + CPUMCPU.Host.cr0], rax
365 ;mov rax, cr2 ; assume host os don't stuff things in cr2. (safe)
366 ;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
367 mov rax, cr3
368 mov [rdx + r8 + CPUMCPU.Host.cr3], rax
369 mov rax, cr4
370 mov [rdx + r8 + CPUMCPU.Host.cr4], rax
371
372 ;;
373 ;; Start switching to VMM context.
374 ;;
375
376 ;
377 ; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
378 ; Also disable WP. (eax==cr4 now)
379 ; Note! X86_CR4_PSE and X86_CR4_PAE are important if the host thinks so :-)
380 ;
381 and rax, X86_CR4_MCE | X86_CR4_PSE | X86_CR4_PAE
382 mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
383 DEBUG_CHAR('b') ; trashes esi
384 ;; @todo Switcher cleanup: Determine base CR4 during CPUMR0Init / VMMR3SelectSwitcher putting it
385 ; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
386 ; simplify this operation a bit (and improve locality of the data).
387
388 ;
389 ; CR4.AndMask and CR4.OrMask are set in CPUMR3Init based on the presence of
390 ; FXSAVE support on the host CPU
391 ;
392 and ecx, [rdx + CPUM.CR4.AndMask]
393 or eax, ecx
394 or eax, [rdx + CPUM.CR4.OrMask]
395 mov cr4, rax
396 DEBUG_CHAR('c') ; trashes esi
397
398 mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
399 and eax, X86_CR0_EM
400 or eax, X86_CR0_PE | X86_CR0_PG | X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP
401 mov cr0, rax
402 DEBUG_CHAR('0') ; trashes esi
403
404
405 ; Load new gdt so we can do far jump to guest code after cr3 reload.
406 lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
407 DEBUG_CHAR('1') ; trashes esi
408
409 ; Store the hypervisor cr3 for later loading
410 mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
411
412 ;;
413 ;; Load Intermediate memory context.
414 ;;
415 FIXUP FIX_INTER_AMD64_CR3, 1
416 mov eax, 0ffffffffh
417 mov cr3, rax
418 DEBUG_CHAR('2') ; trashes esi
419
420 ;;
421 ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
422 ;;
423 jmp far [NAME(fpIDEnterTarget) wrt rip]
424
425; 16:32 Pointer to IDEnterTarget.
426NAME(fpIDEnterTarget):
427 FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
428dd 0
429 FIXUP FIX_HYPER_CS, 0
430dd 0
431
432
433;;
434; Detour for saving the host DR7 and DR6.
435; esi and rdx must be preserved.
436htg_debug_regs_save:
437DEBUG_S_CHAR('s');
438 mov rax, dr7 ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
439 mov [rdx + r8 + CPUMCPU.Host.dr7], rax
440 mov ecx, X86_DR7_INIT_VAL
441 cmp eax, ecx
442 je .htg_debug_regs_dr7_disabled
443 mov dr7, rcx
444.htg_debug_regs_dr7_disabled:
445 mov rax, dr6 ; just in case we save the state register too.
446 mov [rdx + r8 + CPUMCPU.Host.dr6], rax
447 ; save host DR0-3?
448 test esi, CPUM_USE_DEBUG_REGS_HYPER
449 jz htg_debug_regs_no
450DEBUG_S_CHAR('S');
451 mov rax, dr0
452 mov [rdx + r8 + CPUMCPU.Host.dr0], rax
453 mov rbx, dr1
454 mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
455 mov rcx, dr2
456 mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
457 mov rax, dr3
458 mov [rdx + r8 + CPUMCPU.Host.dr3], rax
459 or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
460 jmp htg_debug_regs_no
461
462
463 ; We're now on identity mapped pages in 32-bit compatibility mode.
464BITS 32
465ALIGNCODE(16)
466GLOBALNAME IDEnterTarget
467 DEBUG_CHAR('3')
468
469 ; 2. Deactivate long mode by turning off paging.
470 mov ebx, cr0
471 and ebx, ~X86_CR0_PG
472 mov cr0, ebx
473 DEBUG_CHAR('4')
474
475 ; 3. Load intermediate page table.
476 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
477 mov edx, 0ffffffffh
478 mov cr3, edx
479
480 ; 4. Disable long mode.
481 ; We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
482 mov ecx, MSR_K6_EFER
483 rdmsr
484 DEBUG_CHAR('5')
485 and eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
486 wrmsr
487 DEBUG_CHAR('6')
488
489%ifndef SWITCHER_TO_PAE
490 ; 4b. Disable PAE.
491 mov eax, cr4
492 and eax, ~X86_CR4_PAE
493 mov cr4, eax
494%else
495%endif
496
497 ; 5. Enable paging.
498 or ebx, X86_CR0_PG
499 mov cr0, ebx
500 jmp short just_a_jump
501just_a_jump:
502 DEBUG_CHAR('7')
503
504 ;;
505 ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
506 ;;
507 FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
508 jmp near NAME(JmpGCTarget)
509
510
511 ;;
512 ;; When we arrive at this label we're at the
513 ;; guest code mapping of the switching code.
514 ;;
515ALIGNCODE(16)
516GLOBALNAME JmpGCTarget
517 DEBUG_CHAR('-')
518 ; load final cr3 and do far jump to load cs.
519 mov cr3, ebp ; ebp set above
520 DEBUG_CHAR('0')
521
522 ;;
523 ;; We're in VMM MMU context and VMM CS is loaded.
524 ;; Setup the rest of the VMM state.
525 ;;
526 ; Load selectors
527 DEBUG_CHAR('1')
528 FIXUP FIX_HYPER_DS, 1
529 mov eax, 0ffffh
530 mov ds, eax
531 mov es, eax
532 xor eax, eax
533 mov gs, eax
534 mov fs, eax
535 ; Load pCpum into EDX
536 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
537 mov edx, 0ffffffffh
538 ; Activate guest IDT
539 DEBUG_CHAR('2')
540 lidt [edx + CPUMCPU.Hyper.idtr]
541
542 ; Setup the stack.
543 DEBUG_CHAR('3')
544 mov ax, [edx + CPUMCPU.Hyper.ss.Sel]
545 mov ss, ax
546 mov esp, [edx + CPUMCPU.Hyper.esp]
547
548 ; Restore TSS selector; must mark it as not busy before using ltr (!)
549 DEBUG_S_CHAR('4')
550 FIXUP FIX_GC_TSS_GDTE_DW2, 2
551 and dword [0ffffffffh], ~0200h ; clear busy flag (2nd type2 bit)
552 DEBUG_S_CHAR('5')
553 ltr word [edx + CPUMCPU.Hyper.tr.Sel]
554 DEBUG_S_CHAR('6')
555
556 ; Activate the ldt (now we can safely crash).
557 lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
558 DEBUG_S_CHAR('7')
559
560 ;; Use flags.
561 mov esi, [edx + CPUMCPU.fUseFlags]
562
563 ; debug registers
564 test esi, CPUM_USE_DEBUG_REGS_HYPER
565 jnz htg_debug_regs_guest
566htg_debug_regs_guest_done:
567 DEBUG_S_CHAR('9')
568
569 ; General registers (sans edx).
570 mov eax, [edx + CPUMCPU.Hyper.eax]
571 mov ebx, [edx + CPUMCPU.Hyper.ebx]
572 mov ecx, [edx + CPUMCPU.Hyper.ecx]
573 mov ebp, [edx + CPUMCPU.Hyper.ebp]
574 mov esi, [edx + CPUMCPU.Hyper.esi]
575 mov edi, [edx + CPUMCPU.Hyper.edi]
576 DEBUG_S_CHAR('!')
577
578 ;;
579 ;; Return to the VMM code which either called the switcher or
580 ;; the code set up to run by HC.
581 ;;
582 push dword [edx + CPUMCPU.Hyper.eflags]
583 push cs
584 push dword [edx + CPUMCPU.Hyper.eip]
585 mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
586
587%ifdef DEBUG_STUFF
588 COM32_S_PRINT ';eip='
589 push eax
590 mov eax, [esp + 8]
591 COM32_S_DWORD_REG eax
592 pop eax
593 COM32_S_CHAR ';'
594%endif
595%ifdef VBOX_WITH_STATISTICS
596 push eax
597 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
598 mov eax, 0ffffffffh
599 STAM32_PROFILE_ADV_STOP eax
600 pop eax
601%endif
602
603 iret ; Use iret to make debugging and TF/RF work.
604
605;;
606; Detour for saving host DR0-3 and loading hypervisor debug registers.
607; esi and edx must be preserved.
608htg_debug_regs_guest:
609 DEBUG_S_CHAR('D')
610 DEBUG_S_CHAR('R')
611 DEBUG_S_CHAR('x')
612 ; load hyper DR0-7
613 mov ebx, [edx + CPUMCPU.Hyper.dr]
614 mov dr0, ebx
615 mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
616 mov dr1, ecx
617 mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
618 mov dr2, eax
619 mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
620 mov dr3, ebx
621 mov ecx, X86_DR6_INIT_VAL
622 mov dr6, ecx
623 mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
624 mov dr7, eax
625 or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
626 jmp htg_debug_regs_guest_done
627
628ENDPROC vmmR0ToRawModeAsm
629
630
631;;
632; Trampoline for doing a call when starting the hyper visor execution.
633;
634; Push any arguments to the routine.
635; Push the argument frame size (cArg * 4).
636; Push the call target (_cdecl convention).
637; Push the address of this routine.
638;
639;
640ALIGNCODE(16)
641BEGINPROC vmmRCCallTrampoline
642%ifdef DEBUG_STUFF
643 COM32_S_CHAR 'c'
644 COM32_S_CHAR 't'
645 COM32_S_CHAR '!'
646%endif
647
648 ; call routine
649 pop eax ; call address
650 pop edi ; argument count.
651%ifdef DEBUG_STUFF
652 COM32_S_PRINT ';eax='
653 COM32_S_DWORD_REG eax
654 COM32_S_CHAR ';'
655%endif
656 call eax ; do call
657 add esp, edi ; cleanup stack
658
659 ; return to the host context (eax = C returncode).
660%ifdef DEBUG_STUFF
661 COM32_S_CHAR '`'
662%endif
663.to_host_again:
664 call NAME(vmmRCToHostAsm)
665 mov eax, VERR_VMM_SWITCHER_IPE_1
666 jmp .to_host_again
667ENDPROC vmmRCCallTrampoline
668
669
670
671;;
672; The C interface.
673;
674ALIGNCODE(16)
675BEGINPROC vmmRCToHost
676%ifdef DEBUG_STUFF
677 push esi
678 COM_NEWLINE
679 DEBUG_CHAR('b')
680 DEBUG_CHAR('a')
681 DEBUG_CHAR('c')
682 DEBUG_CHAR('k')
683 DEBUG_CHAR('!')
684 COM_NEWLINE
685 pop esi
686%endif
687 mov eax, [esp + 4]
688 jmp NAME(vmmRCToHostAsm)
689ENDPROC vmmRCToHost
690
691
692;;
693; vmmRCToHostAsmNoReturn
694;
695; This is an entry point used by TRPM when dealing with raw-mode traps,
696; i.e. traps in the hypervisor code. This will not return and saves no
697; state, because the caller has already saved the state.
698;
699; @param eax Return code.
700;
701ALIGNCODE(16)
702BEGINPROC vmmRCToHostAsmNoReturn
703 DEBUG_S_CHAR('%')
704
705%ifdef VBOX_WITH_STATISTICS
706 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
707 mov edx, 0ffffffffh
708 STAM32_PROFILE_ADV_STOP edx
709
710 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
711 mov edx, 0ffffffffh
712 STAM32_PROFILE_ADV_START edx
713
714 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
715 mov edx, 0ffffffffh
716 STAM32_PROFILE_ADV_START edx
717%endif
718
719 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
720 mov edx, 0ffffffffh
721
722 jmp vmmRCToHostAsm_SaveNoGeneralRegs
723ENDPROC vmmRCToHostAsmNoReturn
724
725
726;;
727; vmmRCToHostAsm
728;
729; This is an entry point used by TRPM to return to host context when an
730; interrupt occured or an guest trap needs handling in host context. It
731; is also used by the C interface above.
732;
733; The hypervisor context is saved and it will return to the caller if
734; host context so desires.
735;
736; @param eax Return code.
737; @uses eax, edx, ecx (or it may use them in the future)
738;
739ALIGNCODE(16)
740BEGINPROC vmmRCToHostAsm
741 DEBUG_S_CHAR('%')
742 push edx
743
744%ifdef VBOX_WITH_STATISTICS
745 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
746 mov edx, 0ffffffffh
747 STAM32_PROFILE_ADV_STOP edx
748
749 FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
750 mov edx, 0ffffffffh
751 STAM32_PROFILE_ADV_START edx
752
753 FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
754 mov edx, 0ffffffffh
755 STAM32_PROFILE_ADV_START edx
756%endif
757
758 ;
759 ; Load the CPUM pointer.
760 ;
761 FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
762 mov edx, 0ffffffffh
763
764 ; Save register context.
765 pop dword [edx + CPUMCPU.Hyper.edx]
766 pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
767 mov dword [edx + CPUMCPU.Hyper.esp], esp
768 mov dword [edx + CPUMCPU.Hyper.eax], eax
769 mov dword [edx + CPUMCPU.Hyper.ebx], ebx
770 mov dword [edx + CPUMCPU.Hyper.ecx], ecx
771 mov dword [edx + CPUMCPU.Hyper.esi], esi
772 mov dword [edx + CPUMCPU.Hyper.edi], edi
773 mov dword [edx + CPUMCPU.Hyper.ebp], ebp
774
775 ; special registers which may change.
776vmmRCToHostAsm_SaveNoGeneralRegs:
777%ifdef STRICT_IF
778 pushf
779 pop ecx
780 test ecx, X86_EFL_IF
781 jz .if_clear_out
782 mov eax, 0c0ffee01h
783 cli
784.if_clear_out:
785%endif
786 mov edi, eax ; save return code in EDI (careful with COM_DWORD_REG from here on!)
787
788 ; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
789 sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
790
791 ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
792 ; FPU context is saved before restore of host saving (another) branch.
793
794 ; Disable debug registers if active so they cannot trigger while switching.
795 test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
796 jz .gth_disabled_dr7
797 mov eax, X86_DR7_INIT_VAL
798 mov dr7, eax
799.gth_disabled_dr7:
800
801 ;;
802 ;; Load Intermediate memory context.
803 ;;
804 FIXUP SWITCHER_FIX_INTER_CR3_GC, 1
805 mov eax, 0ffffffffh
806 mov cr3, eax
807 DEBUG_CHAR('?')
808
809 ;; We're now in intermediate memory context!
810
811 ;;
812 ;; 0. Jump to identity mapped location
813 ;;
814 FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
815 jmp near NAME(IDExitTarget)
816
817 ; We're now on identity mapped pages!
818ALIGNCODE(16)
819GLOBALNAME IDExitTarget
820 DEBUG_CHAR('1')
821
822 ; 1. Disable paging.
823 mov ebx, cr0
824 and ebx, ~X86_CR0_PG
825 mov cr0, ebx
826 DEBUG_CHAR('2')
827
828 ; 2. Enable PAE.
829%ifdef SWITCHER_TO_PAE
830 ; - already enabled
831%else
832 mov ecx, cr4
833 or ecx, X86_CR4_PAE
834 mov cr4, ecx
835%endif
836
837 ; 3. Load long mode intermediate CR3.
838 FIXUP FIX_INTER_AMD64_CR3, 1
839 mov ecx, 0ffffffffh
840 mov cr3, ecx
841 DEBUG_CHAR('3')
842
843 ; 4. Enable long mode.
844 mov ebp, edx
845 mov ecx, MSR_K6_EFER
846 rdmsr
847 or eax, MSR_K6_EFER_LME
848 wrmsr
849 mov edx, ebp
850 DEBUG_CHAR('4')
851
852 ; 5. Enable paging.
853 or ebx, X86_CR0_PG
854 mov cr0, ebx
855 DEBUG_CHAR('5')
856
857 ; Jump from compatibility mode to 64-bit mode.
858 FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
859 jmp 0ffffh:0fffffffeh
860
861 ;
862 ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
863 ; Move on to the HC mapping.
864 ;
865BITS 64
866ALIGNCODE(16)
867NAME(IDExit64Mode):
868 DEBUG_CHAR('6')
869 jmp [NAME(pHCExitTarget) wrt rip]
870
871; 64-bit jump target
872NAME(pHCExitTarget):
873FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
874dq 0ffffffffffffffffh
875
876; 64-bit pCpum address.
877NAME(pCpumHC):
878FIXUP FIX_HC_64BIT_CPUM, 0
879dq 0ffffffffffffffffh
880
881 ;
882 ; When we arrive here we're at the host context
883 ; mapping of the switcher code.
884 ;
885ALIGNCODE(16)
886GLOBALNAME HCExitTarget
887 DEBUG_CHAR('9')
888
889 ; Clear high dword of the CPUMCPU pointer
890 and rdx, 0ffffffffh
891
892 ; load final cr3
893 mov rsi, [rdx + CPUMCPU.Host.cr3]
894 mov cr3, rsi
895 DEBUG_CHAR('@')
896
897 ;;
898 ;; Restore Host context.
899 ;;
900 ; Load CPUM pointer into edx
901 mov rdx, [NAME(pCpumHC) wrt rip]
902 ; Load the CPUMCPU offset.
903 mov r8d, [rdx + CPUM.offCPUMCPU0]
904
905 ; activate host gdt and idt
906 lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
907 DEBUG_CHAR('0')
908 lidt [rdx + r8 + CPUMCPU.Host.idtr]
909 DEBUG_CHAR('1')
910 ; Restore TSS selector; must mark it as not busy before using ltr (!)
911%if 1 ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
912 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
913 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
914 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
915 and dword [rax + 4], ~0200h ; clear busy flag (2nd type2 bit)
916 ltr word [rdx + r8 + CPUMCPU.Host.tr]
917%else
918 movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
919 and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
920 add rax, [rdx + r8 + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
921 mov ecx, [rax + 4] ; ecx <- 2nd descriptor dword
922 mov ebx, ecx ; save original value
923 and ecx, ~0200h ; clear busy flag (2nd type2 bit)
924 mov [rax + 4], ccx ; not using xchg here is paranoia..
925 ltr word [rdx + r8 + CPUMCPU.Host.tr]
926 xchg [rax + 4], ebx ; using xchg is paranoia too...
927%endif
928 ; activate ldt
929 DEBUG_CHAR('2')
930 lldt [rdx + r8 + CPUMCPU.Host.ldtr]
931 ; Restore segment registers
932 mov eax, [rdx + r8 + CPUMCPU.Host.ds]
933 mov ds, eax
934 mov eax, [rdx + r8 + CPUMCPU.Host.es]
935 mov es, eax
936 mov eax, [rdx + r8 + CPUMCPU.Host.fs]
937 mov fs, eax
938 mov eax, [rdx + r8 + CPUMCPU.Host.gs]
939 mov gs, eax
940 ; restore stack
941 mov eax, [rdx + r8 + CPUMCPU.Host.ss]
942 mov ss, eax
943 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
944
945 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
946 ; restore MSR_IA32_SYSENTER_CS register.
947 mov rbx, rdx ; save edx
948 mov ecx, MSR_IA32_SYSENTER_CS
949 mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
950 mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
951 wrmsr ; MSR[ecx] <- edx:eax
952 mov rdx, rbx ; restore edx
953 jmp short gth_sysenter_no
954
955ALIGNCODE(16)
956gth_sysenter_no:
957
958 ;; @todo AMD syscall
959
960 ; Restore FPU if guest has used it.
961 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
962 mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
963 test esi, CPUM_USED_FPU
964 jz short gth_fpu_no
965 mov rcx, cr0
966 and rcx, ~(X86_CR0_TS | X86_CR0_EM)
967 mov cr0, rcx
968
969 fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
970 fxrstor [rdx + r8 + CPUMCPU.Host.fpu]
971 jmp short gth_fpu_no
972
973ALIGNCODE(16)
974gth_fpu_no:
975
976 ; Control registers.
977 ; Would've liked to have these higher up in case of crashes, but
978 ; the fpu stuff must be done before we restore cr0.
979 mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
980 test rcx, X86_CR4_PCIDE
981 jz gth_no_pcide
982 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
983 and rax, ~0xfff ; clear the PCID in cr3
984 mov cr3, rax
985 mov cr4, rcx
986 mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
987 mov cr3, rax ; reload it with the right PCID.
988 jmp gth_restored_cr4
989gth_no_pcide:
990 mov cr4, rcx
991gth_restored_cr4:
992 mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
993 mov cr0, rcx
994 ;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
995 ;mov cr2, rcx
996
997 ; Restore MSRs
998 mov rbx, rdx
999 mov ecx, MSR_K8_FS_BASE
1000 mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
1001 mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
1002 wrmsr
1003 mov ecx, MSR_K8_GS_BASE
1004 mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
1005 mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
1006 wrmsr
1007 mov ecx, MSR_K6_EFER
1008 mov eax, [rbx + r8 + CPUMCPU.Host.efer]
1009 mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
1010 wrmsr
1011 mov rdx, rbx
1012
1013 ; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
1014 test esi, CPUM_USE_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HOST | CPUM_USE_DEBUG_REGS_HYPER
1015 jnz gth_debug_regs_restore
1016gth_debug_regs_done:
1017 and dword [rdx + r8 + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
1018
1019 ; Restore general registers.
1020 mov eax, edi ; restore return code. eax = return code !!
1021 ; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
1022 mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
1023 ; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
1024 ; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
1025 mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
1026 mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
1027 mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
1028 mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
1029 ; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
1030 ; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
1031 mov r10, [rdx + r8 + CPUMCPU.Host.r10]
1032 mov r11, [rdx + r8 + CPUMCPU.Host.r11]
1033 mov r12, [rdx + r8 + CPUMCPU.Host.r12]
1034 mov r13, [rdx + r8 + CPUMCPU.Host.r13]
1035 mov r14, [rdx + r8 + CPUMCPU.Host.r14]
1036 mov r15, [rdx + r8 + CPUMCPU.Host.r15]
1037
1038 ; finally restore flags. (probably not required)
1039 push qword [rdx + r8 + CPUMCPU.Host.rflags]
1040 popf
1041
1042
1043%ifdef DEBUG_STUFF
1044 COM64_S_CHAR '4'
1045%endif
1046 db 048h
1047 retf
1048
1049;;
1050; Detour for restoring the host debug registers.
1051; edx and edi must be preserved.
1052gth_debug_regs_restore:
1053 DEBUG_S_CHAR('d')
1054 mov rax, dr7 ; Some DR7 paranoia first...
1055 mov ecx, X86_DR7_INIT_VAL
1056 cmp rax, rcx
1057 je .gth_debug_skip_dr7_disabling
1058 mov dr7, rcx
1059.gth_debug_skip_dr7_disabling:
1060 test esi, CPUM_USED_DEBUG_REGS_HOST
1061 jz .gth_debug_regs_dr7
1062
1063 DEBUG_S_CHAR('r')
1064 mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
1065 mov dr0, rax
1066 mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
1067 mov dr1, rbx
1068 mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
1069 mov dr2, rcx
1070 mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
1071 mov dr3, rax
1072.gth_debug_regs_dr7:
1073 mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
1074 mov dr6, rbx
1075 mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
1076 mov dr7, rcx
1077
1078 ; We clear the USED flags in the main code path.
1079 jmp gth_debug_regs_done
1080
1081ENDPROC vmmRCToHostAsm
1082
1083
1084GLOBALNAME End
1085;
1086; The description string (in the text section).
1087;
1088NAME(Description):
1089 db SWITCHER_DESCRIPTION
1090 db 0
1091
1092extern NAME(Relocate)
1093
1094;
1095; End the fixup records.
1096;
1097BEGINDATA
1098 db FIX_THE_END ; final entry.
1099GLOBALNAME FixupsEnd
1100
1101;;
1102; The switcher definition structure.
1103ALIGNDATA(16)
1104GLOBALNAME Def
1105 istruc VMMSWITCHERDEF
1106 at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
1107 at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
1108 at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
1109 at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
1110 at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
1111 at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
1112 at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
1113 at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
1114 at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
1115 at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
1116 at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
1117 ; disasm help
1118 at VMMSWITCHERDEF.offHCCode0, dd 0
1119 at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
1120 at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
1121 at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
1122 at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
1123 at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
1124 at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
1125 at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
1126 at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
1127 at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)
1128
1129 iend
1130
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette