VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0JmpA-amd64.asm@ 90173

最後變更 在這個檔案從90173是 90173,由 vboxsync 提交於 4 年 前

VMM/VMMR0JmpA-amd64.asm: Double the stack fuzz for linux 5.13. bugref:10064 ticketref:20090 ticketref:20456

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Id Revision
檔案大小: 13.9 KB
 
1; $Id: VMMR0JmpA-amd64.asm 90173 2021-07-14 00:55:32Z vboxsync $
2;; @file
3; VMM - R0 SetJmp / LongJmp routines for AMD64.
4;
5
6;
7; Copyright (C) 2006-2020 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.alldomusa.eu.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18;*******************************************************************************
19;* Header Files *
20;*******************************************************************************
21%define RT_ASM_WITH_SEH64_ALT
22%include "VBox/asmdefs.mac"
23%include "VMMInternal.mac"
24%include "VBox/err.mac"
25%include "VBox/param.mac"
26
27
28;*******************************************************************************
29;* Defined Constants And Macros *
30;*******************************************************************************
31%define RESUME_MAGIC 07eadf00dh
32%define STACK_PADDING 0eeeeeeeeeeeeeeeeh
33
34;; Workaround for linux 4.6 fast/slow syscall stack depth difference.
35;; Update: This got worse with linux 5.13. See bugref:10064.
36%ifdef VMM_R0_SWITCH_STACK
37 %define STACK_FUZZ_SIZE 0
38%else
39 %define STACK_FUZZ_SIZE 256
40%endif
41
42
43BEGINCODE
44
45
46;;
47; The setjmp variant used for calling Ring-3.
48;
49; This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
50; in the middle of a ring-3 call. Another differences is the function pointer and
51; argument. This has to do with resuming code and the stack frame of the caller.
52;
53; @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
54; @param pJmpBuf msc:rcx gcc:rdi x86:[esp+0x04] Our jmp_buf.
55; @param pfn msc:rdx gcc:rsi x86:[esp+0x08] The function to be called when not resuming.
56; @param pvUser1 msc:r8 gcc:rdx x86:[esp+0x0c] The argument of that function.
57; @param pvUser2 msc:r9 gcc:rcx x86:[esp+0x10] The argument of that function.
58;
59GLOBALNAME vmmR0CallRing3SetJmp2
60GLOBALNAME vmmR0CallRing3SetJmpEx
61BEGINPROC vmmR0CallRing3SetJmp
62 ;
63 ; Save the registers.
64 ;
65 push rbp
66 SEH64_PUSH_xBP
67 mov rbp, rsp
68 SEH64_SET_FRAME_xBP 0
69 %ifdef ASM_CALL64_MSC
70 sub rsp, 30h + STACK_FUZZ_SIZE ; (10h is used by resume (??), 20h for callee spill area)
71 SEH64_ALLOCATE_STACK 30h + STACK_FUZZ_SIZE
72SEH64_END_PROLOGUE
73 mov r11, rdx ; pfn
74 mov rdx, rcx ; pJmpBuf;
75 %else
76 sub rsp, 10h + STACK_FUZZ_SIZE ; (10h is used by resume (??))
77 SEH64_ALLOCATE_STACK 10h + STACK_FUZZ_SIZE
78SEH64_END_PROLOGUE
79 mov r8, rdx ; pvUser1 (save it like MSC)
80 mov r9, rcx ; pvUser2 (save it like MSC)
81 mov r11, rsi ; pfn
82 mov rdx, rdi ; pJmpBuf
83 %endif
84 mov [xDX + VMMR0JMPBUF.rbx], rbx
85 %ifdef ASM_CALL64_MSC
86 mov [xDX + VMMR0JMPBUF.rsi], rsi
87 mov [xDX + VMMR0JMPBUF.rdi], rdi
88 %endif
89 mov [xDX + VMMR0JMPBUF.rbp], rbp
90 mov [xDX + VMMR0JMPBUF.r12], r12
91 mov [xDX + VMMR0JMPBUF.r13], r13
92 mov [xDX + VMMR0JMPBUF.r14], r14
93 mov [xDX + VMMR0JMPBUF.r15], r15
94 mov xAX, [rbp + 8] ; (not really necessary, except for validity check)
95 mov [xDX + VMMR0JMPBUF.rip], xAX
96 %ifdef ASM_CALL64_MSC
97 lea r10, [rsp + 20h] ; must save the spill area
98 %else
99 lea r10, [rsp]
100 %endif
101 mov [xDX + VMMR0JMPBUF.rsp], r10
102 %ifdef RT_OS_WINDOWS
103 movdqa [xDX + VMMR0JMPBUF.xmm6], xmm6
104 movdqa [xDX + VMMR0JMPBUF.xmm7], xmm7
105 movdqa [xDX + VMMR0JMPBUF.xmm8], xmm8
106 movdqa [xDX + VMMR0JMPBUF.xmm9], xmm9
107 movdqa [xDX + VMMR0JMPBUF.xmm10], xmm10
108 movdqa [xDX + VMMR0JMPBUF.xmm11], xmm11
109 movdqa [xDX + VMMR0JMPBUF.xmm12], xmm12
110 movdqa [xDX + VMMR0JMPBUF.xmm13], xmm13
111 movdqa [xDX + VMMR0JMPBUF.xmm14], xmm14
112 movdqa [xDX + VMMR0JMPBUF.xmm15], xmm15
113 %endif
114 pushf
115 pop xAX
116 mov [xDX + VMMR0JMPBUF.rflags], xAX
117
118 ;
119 ; If we're not in a ring-3 call, call pfn and return.
120 ;
121 test byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
122 jnz .resume
123
124 %ifdef VMM_R0_SWITCH_STACK
125 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
126 test r15, r15
127 jz .entry_error
128 %ifdef VBOX_STRICT
129 cmp dword [r15], 0h
130 jne .entry_error
131 mov rdi, r15
132 mov rcx, VMM_STACK_SIZE / 8
133 mov rax, qword 0eeeeeeeffeeeeeeeh
134 repne stosq
135 mov [rdi - 10h], rbx
136 %endif
137 lea r15, [r15 + VMM_STACK_SIZE - 40h]
138 mov rsp, r15 ; Switch stack!
139 %endif ; VMM_R0_SWITCH_STACK
140
141 mov r12, rdx ; Save pJmpBuf.
142 %ifdef ASM_CALL64_MSC
143 mov rcx, r8 ; pvUser -> arg0
144 mov rdx, r9
145 %else
146 mov rdi, r8 ; pvUser -> arg0
147 mov rsi, r9
148 %endif
149 call r11
150 mov rdx, r12 ; Restore pJmpBuf
151
152 %ifdef VMM_R0_SWITCH_STACK
153 %ifdef VBOX_STRICT
154 mov r15, [xDX + VMMR0JMPBUF.pvSavedStack]
155 mov dword [r15], 0h ; Reset the marker
156 %endif
157 %endif
158
159 ;
160 ; Return like in the long jump but clear eip, no shortcuts here.
161 ;
162.proper_return:
163%ifdef RT_OS_WINDOWS
164 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
165 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
166 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
167 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
168 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
169 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
170 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
171 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
172 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
173 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
174%endif
175 mov rbx, [xDX + VMMR0JMPBUF.rbx]
176%ifdef ASM_CALL64_MSC
177 mov rsi, [xDX + VMMR0JMPBUF.rsi]
178 mov rdi, [xDX + VMMR0JMPBUF.rdi]
179%endif
180 mov r12, [xDX + VMMR0JMPBUF.r12]
181 mov r13, [xDX + VMMR0JMPBUF.r13]
182 mov r14, [xDX + VMMR0JMPBUF.r14]
183 mov r15, [xDX + VMMR0JMPBUF.r15]
184 mov rbp, [xDX + VMMR0JMPBUF.rbp]
185 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
186 mov rsp, [xDX + VMMR0JMPBUF.rsp]
187 push qword [xDX + VMMR0JMPBUF.rflags]
188 popf
189 leave
190 ret
191
192.entry_error:
193 mov eax, VERR_VMM_SET_JMP_ERROR
194 jmp .proper_return
195
196.stack_overflow:
197 mov eax, VERR_VMM_SET_JMP_STACK_OVERFLOW
198 jmp .proper_return
199
200 ;
201 ; Aborting resume.
202 ; Note! No need to restore XMM registers here since we haven't touched them yet.
203 ;
204.bad:
205 and qword [xDX + VMMR0JMPBUF.rip], byte 0 ; used for valid check.
206 mov rbx, [xDX + VMMR0JMPBUF.rbx]
207 %ifdef ASM_CALL64_MSC
208 mov rsi, [xDX + VMMR0JMPBUF.rsi]
209 mov rdi, [xDX + VMMR0JMPBUF.rdi]
210 %endif
211 mov r12, [xDX + VMMR0JMPBUF.r12]
212 mov r13, [xDX + VMMR0JMPBUF.r13]
213 mov r14, [xDX + VMMR0JMPBUF.r14]
214 mov r15, [xDX + VMMR0JMPBUF.r15]
215 mov eax, VERR_VMM_SET_JMP_ABORTED_RESUME
216 leave
217 ret
218
219 ;
220 ; Resume VMMRZCallRing3 the call.
221 ;
222.resume:
223 %ifndef VMM_R0_SWITCH_STACK
224 ; Sanity checks incoming stack, applying fuzz if needed.
225 sub r10, [xDX + VMMR0JMPBUF.SpCheck]
226 jz .resume_stack_checked_out
227 add r10, STACK_FUZZ_SIZE ; plus/minus STACK_FUZZ_SIZE is fine.
228 cmp r10, STACK_FUZZ_SIZE * 2
229 ja .bad
230
231 mov r10, [xDX + VMMR0JMPBUF.SpCheck]
232 mov [xDX + VMMR0JMPBUF.rsp], r10 ; Must be update in case of another long jump (used for save calc).
233
234.resume_stack_checked_out:
235 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
236 cmp rcx, VMM_STACK_SIZE
237 ja .bad
238 test rcx, 7
239 jnz .bad
240 mov rdi, [xDX + VMMR0JMPBUF.SpCheck]
241 sub rdi, [xDX + VMMR0JMPBUF.SpResume]
242 cmp rcx, rdi
243 jne .bad
244 %endif
245
246%ifdef VMM_R0_SWITCH_STACK
247 ; Switch stack.
248 mov rsp, [xDX + VMMR0JMPBUF.SpResume]
249%else
250 ; Restore the stack.
251 mov ecx, [xDX + VMMR0JMPBUF.cbSavedStack]
252 shr ecx, 3
253 mov rsi, [xDX + VMMR0JMPBUF.pvSavedStack]
254 mov rdi, [xDX + VMMR0JMPBUF.SpResume]
255 mov rsp, rdi
256 rep movsq
257%endif ; !VMM_R0_SWITCH_STACK
258 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 0
259
260 ;
261 ; Continue where we left off.
262 ;
263%ifdef VBOX_STRICT
264 pop rax ; magic
265 cmp rax, RESUME_MAGIC
266 je .magic_ok
267 mov ecx, 0123h
268 mov [ecx], edx
269.magic_ok:
270%endif
271%ifdef RT_OS_WINDOWS
272 movdqa xmm6, [rsp + 000h]
273 movdqa xmm7, [rsp + 010h]
274 movdqa xmm8, [rsp + 020h]
275 movdqa xmm9, [rsp + 030h]
276 movdqa xmm10, [rsp + 040h]
277 movdqa xmm11, [rsp + 050h]
278 movdqa xmm12, [rsp + 060h]
279 movdqa xmm13, [rsp + 070h]
280 movdqa xmm14, [rsp + 080h]
281 movdqa xmm15, [rsp + 090h]
282 add rsp, 0a0h
283%endif
284 popf
285 pop rbx
286%ifdef ASM_CALL64_MSC
287 pop rsi
288 pop rdi
289%endif
290 pop r12
291 pop r13
292 pop r14
293 pop r15
294 pop rbp
295 xor eax, eax ; VINF_SUCCESS
296 ret
297ENDPROC vmmR0CallRing3SetJmp
298
299
300;;
301; Worker for VMMRZCallRing3.
302; This will save the stack and registers.
303;
304; @param pJmpBuf msc:rcx gcc:rdi x86:[ebp+8] Pointer to the jump buffer.
305; @param rc msc:rdx gcc:rsi x86:[ebp+c] The return code.
306;
307BEGINPROC vmmR0CallRing3LongJmp
308 ;
309 ; Save the registers on the stack.
310 ;
311 push rbp
312 SEH64_PUSH_xBP
313 mov rbp, rsp
314 SEH64_SET_FRAME_xBP 0
315 push r15
316 SEH64_PUSH_GREG r15
317 push r14
318 SEH64_PUSH_GREG r14
319 push r13
320 SEH64_PUSH_GREG r13
321 push r12
322 SEH64_PUSH_GREG r12
323%ifdef ASM_CALL64_MSC
324 push rdi
325 SEH64_PUSH_GREG rdi
326 push rsi
327 SEH64_PUSH_GREG rsi
328%endif
329 push rbx
330 SEH64_PUSH_GREG rbx
331 pushf
332 SEH64_ALLOCATE_STACK 8
333%ifdef RT_OS_WINDOWS
334 sub rsp, 0a0h
335 SEH64_ALLOCATE_STACK 0a0h
336 movdqa [rsp + 000h], xmm6
337 movdqa [rsp + 010h], xmm7
338 movdqa [rsp + 020h], xmm8
339 movdqa [rsp + 030h], xmm9
340 movdqa [rsp + 040h], xmm10
341 movdqa [rsp + 050h], xmm11
342 movdqa [rsp + 060h], xmm12
343 movdqa [rsp + 070h], xmm13
344 movdqa [rsp + 080h], xmm14
345 movdqa [rsp + 090h], xmm15
346%endif
347%ifdef VBOX_STRICT
348 push RESUME_MAGIC
349 SEH64_ALLOCATE_STACK 8
350%endif
351SEH64_END_PROLOGUE
352
353 ;
354 ; Normalize the parameters.
355 ;
356%ifdef ASM_CALL64_MSC
357 mov eax, edx ; rc
358 mov rdx, rcx ; pJmpBuf
359%else
360 mov rdx, rdi ; pJmpBuf
361 mov eax, esi ; rc
362%endif
363
364 ;
365 ; Is the jump buffer armed?
366 ;
367 cmp qword [xDX + VMMR0JMPBUF.rip], byte 0
368 je .nok
369
370 ;
371 ; Sanity checks.
372 ;
373 mov rdi, [xDX + VMMR0JMPBUF.pvSavedStack]
374 test rdi, rdi ; darwin may set this to 0.
375 jz .nok
376 mov [xDX + VMMR0JMPBUF.SpResume], rsp
377 %ifndef VMM_R0_SWITCH_STACK
378 mov rsi, rsp
379 mov rcx, [xDX + VMMR0JMPBUF.rsp]
380 sub rcx, rsi
381
382 ; two sanity checks on the size.
383 cmp rcx, VMM_STACK_SIZE ; check max size.
384 jnbe .nok
385
386 ;
387 ; Copy the stack
388 ;
389 test ecx, 7 ; check alignment
390 jnz .nok
391 mov [xDX + VMMR0JMPBUF.cbSavedStack], ecx
392 shr ecx, 3
393 rep movsq
394
395 %endif ; !VMM_R0_SWITCH_STACK
396
397 ; Save a PC and return PC here to assist unwinding.
398.unwind_point:
399 lea rcx, [.unwind_point wrt RIP]
400 mov [xDX + VMMR0JMPBUF.SavedEipForUnwind], rcx
401 mov rcx, [xDX + VMMR0JMPBUF.rbp]
402 lea rcx, [rcx + 8]
403 mov [xDX + VMMR0JMPBUF.UnwindRetPcLocation], rcx
404 mov rcx, [rcx]
405 mov [xDX + VMMR0JMPBUF.UnwindRetPcValue], rcx
406
407 ; Save RSP & RBP to enable stack dumps
408 mov rcx, rbp
409 mov [xDX + VMMR0JMPBUF.SavedEbp], rcx
410 sub rcx, 8
411 mov [xDX + VMMR0JMPBUF.SavedEsp], rcx
412
413 ; store the last pieces of info.
414 mov rcx, [xDX + VMMR0JMPBUF.rsp]
415 mov [xDX + VMMR0JMPBUF.SpCheck], rcx
416 mov byte [xDX + VMMR0JMPBUF.fInRing3Call], 1
417
418 ;
419 ; Do the long jump.
420 ;
421%ifdef RT_OS_WINDOWS
422 movdqa xmm6, [xDX + VMMR0JMPBUF.xmm6 ]
423 movdqa xmm7, [xDX + VMMR0JMPBUF.xmm7 ]
424 movdqa xmm8, [xDX + VMMR0JMPBUF.xmm8 ]
425 movdqa xmm9, [xDX + VMMR0JMPBUF.xmm9 ]
426 movdqa xmm10, [xDX + VMMR0JMPBUF.xmm10]
427 movdqa xmm11, [xDX + VMMR0JMPBUF.xmm11]
428 movdqa xmm12, [xDX + VMMR0JMPBUF.xmm12]
429 movdqa xmm13, [xDX + VMMR0JMPBUF.xmm13]
430 movdqa xmm14, [xDX + VMMR0JMPBUF.xmm14]
431 movdqa xmm15, [xDX + VMMR0JMPBUF.xmm15]
432%endif
433 mov rbx, [xDX + VMMR0JMPBUF.rbx]
434%ifdef ASM_CALL64_MSC
435 mov rsi, [xDX + VMMR0JMPBUF.rsi]
436 mov rdi, [xDX + VMMR0JMPBUF.rdi]
437%endif
438 mov r12, [xDX + VMMR0JMPBUF.r12]
439 mov r13, [xDX + VMMR0JMPBUF.r13]
440 mov r14, [xDX + VMMR0JMPBUF.r14]
441 mov r15, [xDX + VMMR0JMPBUF.r15]
442 mov rbp, [xDX + VMMR0JMPBUF.rbp]
443 mov rsp, [xDX + VMMR0JMPBUF.rsp]
444 push qword [xDX + VMMR0JMPBUF.rflags]
445 popf
446 leave
447 ret
448
449 ;
450 ; Failure
451 ;
452.nok:
453%ifdef VBOX_STRICT
454 pop rax ; magic
455 cmp rax, RESUME_MAGIC
456 je .magic_ok
457 mov ecx, 0123h
458 mov [rcx], edx
459.magic_ok:
460%endif
461 mov eax, VERR_VMM_LONG_JMP_ERROR
462%ifdef RT_OS_WINDOWS
463 add rsp, 0a0h ; skip XMM registers since they are unmodified.
464%endif
465 popf
466 pop rbx
467%ifdef ASM_CALL64_MSC
468 pop rsi
469 pop rdi
470%endif
471 pop r12
472 pop r13
473 pop r14
474 pop r15
475 leave
476 ret
477ENDPROC vmmR0CallRing3LongJmp
478
479
480;;
481; Internal R0 logger worker: Logger wrapper.
482;
483; @cproto VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...)
484;
485BEGINPROC_EXPORTED vmmR0LoggerWrapper
486SEH64_END_PROLOGUE
487 int3
488 int3
489 int3
490 ret
491ENDPROC vmmR0LoggerWrapper
492
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette