VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm@ 90177

最後變更 在這個檔案從90177是 90177,由 vboxsync 提交於 4 年 前

VMM/VMMR0JmpA-amd64.asm: Another stack fuzz adjustment for linux 5.13. bugref:10064 ticketref:20090 ticketref:20456

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 14.2 KB
 
1; $Id: VMMR0JmpA-amd64.asm 90177 2021-07-14 09:45:39Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for AMD64.
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.alldomusa.eu.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%define RT_ASM_WITH_SEH64_ALT
22%include "VBox/asmdefs.mac"
23%include "VMMInternal.mac"
24%include "VBox/err.mac"
25%include "VBox/param.mac"
26
27
28;*******************************************************************************
29;* Defined Constants And Macros *
30;*******************************************************************************
31%define RESUME_MAGIC 07eadf00dh
32%define STACK_PADDING 0eeeeeeeeeeeeeeeeh
33
34;; Workaround for linux 4.6 fast/slow syscall stack depth difference.
35;; Update: This got worse with linux 5.13 and CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT.
36;; The x86 arch_exit_to_user_mode_prepare code limits the offset to 255,
37;; while the generic limit is 1023. See bugref:10064 for details.
38%ifdef VMM_R0_SWITCH_STACK
39 %define STACK_FUZZ_SIZE 0
40%else
41 %ifdef RT_OS_LINUX
42 %define STACK_FUZZ_SIZE 384
43 %else
44 %define STACK_FUZZ_SIZE 128
45 %endif
46%endif
47
48
49BEGINCODE
50
51
52;;
53; The setjmp variant used for calling Ring-3.
54;
55; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
56; in the middle of a ring-3 call. Another differences is the function pointer and
57; argument. This has to do with resuming code and the stack frame of the caller.
58;
59; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
60; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
61; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
62; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
63; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
64;
65GLOBALNAME vmmR0CallRing3SetJmp2
66GLOBALNAME vmmR0CallRing3SetJmpEx
67BEGINPROC vmmR0CallRing3SetJmp
68 ;
69 ; Save the registers.
70 ;
71 push rbp
72 SEH64_PUSH_xBP
73 mov rbp, rsp
74 SEH64_SET_FRAME_xBP 0
75 %ifdef ASM_CALL64_MSC
76 sub rsp, 30h + STACK_FUZZ_SIZE ; (10h is used by resume (??), 20h for callee spill area)
77 SEH64_ALLOCATE_STACK 30h + STACK_FUZZ_SIZE
78SEH64_END_PROLOGUE
79 mov r11, rdx ; pfn
80 mov rdx, rcx ; pJmpBuf;
81 %else
82 sub rsp, 10h + STACK_FUZZ_SIZE ; (10h is used by resume (??))
83 SEH64_ALLOCATE_STACK 10h + STACK_FUZZ_SIZE
84SEH64_END_PROLOGUE
85 mov r8, rdx ; pvUser1 (save it like MSC)
86 mov r9, rcx ; pvUser2 (save it like MSC)
87 mov r11, rsi ; pfn
88 mov rdx, rdi ; pJmpBuf
89 %endif
90 mov [xDX + VMMR0JMPBUF.rbx], rbx
91 %ifdef ASM_CALL64_MSC
92 mov [xDX + VMMR0JMPBUF.rsi], rsi
93 mov [xDX + VMMR0JMPBUF.rdi], rdi
94 %endif
95 mov [xDX + VMMR0JMPBUF.rbp], rbp
96 mov [xDX + VMMR0JMPBUF.r12], r12
97 mov [xDX + VMMR0JMPBUF.r13], r13
98 mov [xDX + VMMR0JMPBUF.r14], r14
99 mov [xDX + VMMR0JMPBUF.r15], r15
100 mov xAX, [rbp + 8] ; (not really necessary, except for validity check)
101 mov [xDX + VMMR0JMPBUF.rip], xAX
102 %ifdef ASM_CALL64_MSC
103 lea r10, [rsp + 20h] ; must save the spill area
104 %else
105 lea r10, [rsp]
106 %endif
107 mov [xDX + VMMR0JMPBUF.rsp], r10
108 %ifdef RT_OS_WINDOWS
109 movdqa [xDX + VMMR0JMPBUF.xmm6], xmm6
110 movdqa [xDX + VMMR0JMPBUF.xmm7], xmm7
111 movdqa [xDX + VMMR0JMPBUF.xmm8], xmm8
112 movdqa [xDX + VMMR0JMPBUF.xmm9], xmm9
113 movdqa [xDX + VMMR0JMPBUF.xmm10], xmm10
114 movdqa [xDX + VMMR0JMPBUF.xmm11], xmm11
115 movdqa [xDX + VMMR0JMPBUF.xmm12], xmm12
116 movdqa [xDX + VMMR0JMPBUF.xmm13], xmm13
117 movdqa [xDX + VMMR0JMPBUF.xmm14], xmm14
118 movdqa [xDX + VMMR0JMPBUF.xmm15], xmm15
119 %endif
120 pushf
121 pop xAX
122 mov [xDX + VMMR0JMPBUF.rflags], xAX
123
124 ;
125 ; If we're not in a ring-3 call, call pfn and return.
126 ;
127 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
128 jnz .resume
129
130 %ifdef VMM_R0_SWITCH_STACK
131 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
132 test r15, r15
133 jz .entry_error
134 %ifdef VBOX_STRICT
135 cmp dword [r15], 0h
136 jne .entry_error
137 mov rdi, r15
138 mov rcx, VMM_STACK_SIZE / 8
139 mov rax, qword 0eeeeeeeffeeeeeeeh
140 repne stosq
141 mov [rdi - 10h], rbx
142 %endif
143 lea r15, [r15 + VMM_STACK_SIZE - 40h]
144 mov rsp, r15 ; Switch stack!
145 %endif ; VMM_R0_SWITCH_STACK
146
147 mov r12, rdx ; Save pJmpBuf.
148 %ifdef ASM_CALL64_MSC
149 mov rcx, r8 ; pvUser -> arg0
150 mov rdx, r9
151 %else
152 mov rdi, r8 ; pvUser -> arg0
153 mov rsi, r9
154 %endif
155 call r11
156 mov rdx, r12 ; Restore pJmpBuf
157
158 %ifdef VMM_R0_SWITCH_STACK
159 %ifdef VBOX_STRICT
160 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
161 mov dword [r15], 0h ; Reset the marker
162 %endif
163 %endif
164
165 ;
166 ; Return like in the long jump but clear eip, no shortcuts here.
167 ;
168.proper_return:
169%ifdef RT_OS_WINDOWS
170 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
171 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
172 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
173 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
174 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
175 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
176 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
177 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
178 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
179 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
180%endif
181 mov rbx, [xDX + VMMR0JMPBUF.rbx]
182%ifdef ASM_CALL64_MSC
183 mov rsi, [xDX + VMMR0JMPBUF.rsi]
184 mov rdi, [xDX + VMMR0JMPBUF.rdi]
185%endif
186 mov r12, [xDX + VMMR0JMPBUF.r12]
187 mov r13, [xDX + VMMR0JMPBUF.r13]
188 mov r14, [xDX + VMMR0JMPBUF.r14]
189 mov r15, [xDX + VMMR0JMPBUF.r15]
190 mov rbp, [xDX + VMMR0JMPBUF.rbp]
191 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
192 mov rsp, [xDX + VMMR0JMPBUF.rsp]
193 push qword [xDX + VMMR0JMPBUF.rflags]
194 popf
195 leave
196 ret
197
198.entry_error:
199 mov eax, VERR_VMM_SET_JMP_ERROR
200 jmp .proper_return
201
202.stack_overflow:
203 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
204 jmp .proper_return
205
206 ;
207 ; Aborting resume.
208 ; Note! No need to restore XMM registers here since we haven't touched them yet.
209 ;
210.bad:
211 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
212 mov rbx, [xDX + VMMR0JMPBUF.rbx]
213 %ifdef ASM_CALL64_MSC
214 mov rsi, [xDX + VMMR0JMPBUF.rsi]
215 mov rdi, [xDX + VMMR0JMPBUF.rdi]
216 %endif
217 mov r12, [xDX + VMMR0JMPBUF.r12]
218 mov r13, [xDX + VMMR0JMPBUF.r13]
219 mov r14, [xDX + VMMR0JMPBUF.r14]
220 mov r15, [xDX + VMMR0JMPBUF.r15]
221 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME
222 leave
223 ret
224
225 ;
226 ; Resume VMMRZCallRing3 the call.
227 ;
228.resume:
229 %ifndef VMM_R0_SWITCH_STACK
230 ; Sanity checks incoming stack, applying fuzz if needed.
231 sub r10, [xDX + VMMR0JMPBUF.SpCheck]
232 jz .resume_stack_checked_out
233 add r10, STACK_FUZZ_SIZE ; plus/minus STACK_FUZZ_SIZE is fine.
234 cmp r10, STACK_FUZZ_SIZE * 2
235 ja .bad
236
237 mov r10, [xDX + VMMR0JMPBUF.SpCheck]
238 mov [xDX + VMMR0JMPBUF.rsp], r10 ; Must be update in case of another long jump (used for save calc).
239
240.resume_stack_checked_out:
241 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
242 cmp rcx, VMM_STACK_SIZE
243 ja .bad
244 test rcx, 7
245 jnz .bad
246 mov rdi, [xDX + VMMR0JMPBUF.SpCheck]
247 sub rdi, [xDX + VMMR0JMPBUF.SpResume]
248 cmp rcx, rdi
249 jne .bad
250 %endif
251
252%ifdef VMM_R0_SWITCH_STACK
253 ; Switch stack.
254 mov rsp, [xDX + VMMR0JMPBUF.SpResume]
255%else
256 ; Restore the stack.
257 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
258 shr ecx, 3
259 mov rsi, [xDX + VMMR0JMPBUF.pvSavedStack]
260 mov rdi, [xDX + VMMR0JMPBUF.SpResume]
261 mov rsp, rdi
262 rep movsq
263%endif ; !VMM_R0_SWITCH_STACK
264 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
265
266 ;
267 ; Continue where we left off.
268 ;
269%ifdef VBOX_STRICT
270 pop rax ; magic
271 cmp rax, RESUME_MAGIC
272 je .magic_ok
273 mov ecx, 0123h
274 mov [ecx], edx
275.magic_ok:
276%endif
277%ifdef RT_OS_WINDOWS
278 movdqa xmm6, [rsp + 000h]
279 movdqa xmm7, [rsp + 010h]
280 movdqa xmm8, [rsp + 020h]
281 movdqa xmm9, [rsp + 030h]
282 movdqa xmm10, [rsp + 040h]
283 movdqa xmm11, [rsp + 050h]
284 movdqa xmm12, [rsp + 060h]
285 movdqa xmm13, [rsp + 070h]
286 movdqa xmm14, [rsp + 080h]
287 movdqa xmm15, [rsp + 090h]
288 add rsp, 0a0h
289%endif
290 popf
291 pop rbx
292%ifdef ASM_CALL64_MSC
293 pop rsi
294 pop rdi
295%endif
296 pop r12
297 pop r13
298 pop r14
299 pop r15
300 pop rbp
301 xor eax, eax ; VINF_SUCCESS
302 ret
303ENDPROC vmmR0CallRing3SetJmp
304
305
306;;
307; Worker for VMMRZCallRing3.
308; This will save the stack and registers.
309;
310; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
311; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
312;
313BEGINPROC vmmR0CallRing3LongJmp
314 ;
315 ; Save the registers on the stack.
316 ;
317 push rbp
318 SEH64_PUSH_xBP
319 mov rbp, rsp
320 SEH64_SET_FRAME_xBP 0
321 push r15
322 SEH64_PUSH_GREG r15
323 push r14
324 SEH64_PUSH_GREG r14
325 push r13
326 SEH64_PUSH_GREG r13
327 push r12
328 SEH64_PUSH_GREG r12
329%ifdef ASM_CALL64_MSC
330 push rdi
331 SEH64_PUSH_GREG rdi
332 push rsi
333 SEH64_PUSH_GREG rsi
334%endif
335 push rbx
336 SEH64_PUSH_GREG rbx
337 pushf
338 SEH64_ALLOCATE_STACK 8
339%ifdef RT_OS_WINDOWS
340 sub rsp, 0a0h
341 SEH64_ALLOCATE_STACK 0a0h
342 movdqa [rsp + 000h], xmm6
343 movdqa [rsp + 010h], xmm7
344 movdqa [rsp + 020h], xmm8
345 movdqa [rsp + 030h], xmm9
346 movdqa [rsp + 040h], xmm10
347 movdqa [rsp + 050h], xmm11
348 movdqa [rsp + 060h], xmm12
349 movdqa [rsp + 070h], xmm13
350 movdqa [rsp + 080h], xmm14
351 movdqa [rsp + 090h], xmm15
352%endif
353%ifdef VBOX_STRICT
354 push RESUME_MAGIC
355 SEH64_ALLOCATE_STACK 8
356%endif
357SEH64_END_PROLOGUE
358
359 ;
360 ; Normalize the parameters.
361 ;
362%ifdef ASM_CALL64_MSC
363 mov eax, edx ; rc
364 mov rdx, rcx ; pJmpBuf
365%else
366 mov rdx, rdi ; pJmpBuf
367 mov eax, esi ; rc
368%endif
369
370 ;
371 ; Is the jump buffer armed?
372 ;
373 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
374 je .nok
375
376 ;
377 ; Sanity checks.
378 ;
379 mov rdi, [xDX + VMMR0JMPBUF.pvSavedStack]
380 test rdi, rdi ; darwin may set this to 0.
381 jz .nok
382 mov [xDX + VMMR0JMPBUF.SpResume], rsp
383 %ifndef VMM_R0_SWITCH_STACK
384 mov rsi, rsp
385 mov rcx, [xDX + VMMR0JMPBUF.rsp]
386 sub rcx, rsi
387
388 ; two sanity checks on the size.
389 cmp rcx, VMM_STACK_SIZE ; check max size.
390 jnbe .nok
391
392 ;
393 ; Copy the stack
394 ;
395 test ecx, 7 ; check alignment
396 jnz .nok
397 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
398 shr ecx, 3
399 rep movsq
400
401 %endif ; !VMM_R0_SWITCH_STACK
402
403 ; Save a PC and return PC here to assist unwinding.
404.unwind_point:
405 lea rcx, [.unwind_point wrt RIP]
406 mov [xDX + VMMR0JMPBUF.SavedEipForUnwind], rcx
407 mov rcx, [xDX + VMMR0JMPBUF.rbp]
408 lea rcx, [rcx + 8]
409 mov [xDX + VMMR0JMPBUF.UnwindRetPcLocation], rcx
410 mov rcx, [rcx]
411 mov [xDX + VMMR0JMPBUF.UnwindRetPcValue], rcx
412
413 ; Save RSP & RBP to enable stack dumps
414 mov rcx, rbp
415 mov [xDX + VMMR0JMPBUF.SavedEbp], rcx
416 sub rcx, 8
417 mov [xDX + VMMR0JMPBUF.SavedEsp], rcx
418
419 ; store the last pieces of info.
420 mov rcx, [xDX + VMMR0JMPBUF.rsp]
421 mov [xDX + VMMR0JMPBUF.SpCheck], rcx
422 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
423
424 ;
425 ; Do the long jump.
426 ;
427%ifdef RT_OS_WINDOWS
428 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
429 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
430 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
431 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
432 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
433 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
434 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
435 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
436 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
437 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
438%endif
439 mov rbx, [xDX + VMMR0JMPBUF.rbx]
440%ifdef ASM_CALL64_MSC
441 mov rsi, [xDX + VMMR0JMPBUF.rsi]
442 mov rdi, [xDX + VMMR0JMPBUF.rdi]
443%endif
444 mov r12, [xDX + VMMR0JMPBUF.r12]
445 mov r13, [xDX + VMMR0JMPBUF.r13]
446 mov r14, [xDX + VMMR0JMPBUF.r14]
447 mov r15, [xDX + VMMR0JMPBUF.r15]
448 mov rbp, [xDX + VMMR0JMPBUF.rbp]
449 mov rsp, [xDX + VMMR0JMPBUF.rsp]
450 push qword [xDX + VMMR0JMPBUF.rflags]
451 popf
452 leave
453 ret
454
455 ;
456 ; Failure
457 ;
458.nok:
459%ifdef VBOX_STRICT
460 pop rax ; magic
461 cmp rax, RESUME_MAGIC
462 je .magic_ok
463 mov ecx, 0123h
464 mov [rcx], edx
465.magic_ok:
466%endif
467 mov eax, VERR_VMM_LONG_JMP_ERROR
468%ifdef RT_OS_WINDOWS
469 add rsp, 0a0h ; skip XMM registers since they are unmodified.
470%endif
471 popf
472 pop rbx
473%ifdef ASM_CALL64_MSC
474 pop rsi
475 pop rdi
476%endif
477 pop r12
478 pop r13
479 pop r14
480 pop r15
481 leave
482 ret
483ENDPROC vmmR0CallRing3LongJmp
484
485
486;;
487; Internal R0 logger worker: Logger wrapper.
488;
489; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
490;
491BEGINPROC_EXPORTED vmmR0LoggerWrapper
492SEH64_END_PROLOGUE
493 int3
494 int3
495 int3
496 ret
497ENDPROC vmmR0LoggerWrapper
498
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette