VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMAll.cpp@ 73246

最後變更 在這個檔案從73246是 73246,由 vboxsync 提交於 7 年 前

PGM: Working on eliminating PGMMODEDATA and the corresponding PGMCPU section so we can do mode switching in ring-0. This second part deals with shadow paging pointers and expands PGM_TYPE_NESTED & PGMMODE_NESTED into 32BIT, PAE and AMD64 variants to better map to reality at the expense of a little bit of more code. bugref:9044

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 15.8 KB
 
1/* $Id: HMAll.cpp 73246 2018-07-19 15:51:20Z vboxsync $ */
2/** @file
3 * HM - All contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.alldomusa.eu.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <VBox/vmm/hm.h>
24#include <VBox/vmm/pgm.h>
25#include "HMInternal.h"
26#include <VBox/vmm/vm.h>
27#include <VBox/vmm/hm_vmx.h>
28#include <VBox/vmm/hm_svm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <iprt/param.h>
32#include <iprt/assert.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/thread.h>
36#include <iprt/x86.h>
37#include <iprt/asm-amd64-x86.h>
38
39
40/**
41 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
42 *
43 * @retval true if used.
44 * @retval false if software virtualization (raw-mode) is used.
45 * @param pVM The cross context VM structure.
46 * @sa HMIsEnabled, HMR3IsEnabled
47 * @internal
48 */
49VMMDECL(bool) HMIsEnabledNotMacro(PVM pVM)
50{
51 Assert(pVM->bMainExecutionEngine != VM_EXEC_ENGINE_NOT_SET);
52 return pVM->fHMEnabled;
53}
54
55
56/**
57 * Queues a guest page for invalidation.
58 *
59 * @returns VBox status code.
60 * @param pVCpu The cross context virtual CPU structure.
61 * @param GCVirt Page to invalidate.
62 */
63static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)
64{
65 /* Nothing to do if a TLB flush is already pending */
66 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
67 return;
68 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
69 NOREF(GCVirt);
70}
71
72
73/**
74 * Invalidates a guest page.
75 *
76 * @returns VBox status code.
77 * @param pVCpu The cross context virtual CPU structure.
78 * @param GCVirt Page to invalidate.
79 */
80VMM_INT_DECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)
81{
82 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
83#ifdef IN_RING0
84 return HMR0InvalidatePage(pVCpu, GCVirt);
85#else
86 hmQueueInvlPage(pVCpu, GCVirt);
87 return VINF_SUCCESS;
88#endif
89}
90
91
92#ifdef IN_RING0
93
94/**
95 * Dummy RTMpOnSpecific handler since RTMpPokeCpu couldn't be used.
96 *
97 */
98static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)
99{
100 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
101 return;
102}
103
104
105/**
106 * Wrapper for RTMpPokeCpu to deal with VERR_NOT_SUPPORTED.
107 */
108static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu)
109{
110 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits);
111
112 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x);
113 int rc = RTMpPokeCpu(idHostCpu);
114 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x);
115
116 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall
117 back to a less efficient implementation (broadcast). */
118 if (rc == VERR_NOT_SUPPORTED)
119 {
120 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
121 /* synchronous. */
122 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0);
123 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
124 }
125 else
126 {
127 if (rc == VINF_SUCCESS)
128 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z);
129 else
130 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z);
131
132/** @todo If more than one CPU is going to be poked, we could optimize this
133 * operation by poking them first and wait afterwards. Would require
134 * recording who to poke and their current cWorldSwitchExits values,
135 * that's something not suitable for stack... So, pVCpu->hm.s.something
136 * then. */
137 /* Spin until the VCPU has switched back (poking is async). */
138 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)
139 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits))
140 ASMNopPause();
141
142 if (rc == VINF_SUCCESS)
143 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z);
144 else
145 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z);
146 }
147}
148
149#endif /* IN_RING0 */
150#ifndef IN_RC
151
152/**
153 * Flushes the guest TLB.
154 *
155 * @returns VBox status code.
156 * @param pVCpu The cross context virtual CPU structure.
157 */
158VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
159{
160 LogFlow(("HMFlushTLB\n"));
161
162 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
163 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
164 return VINF_SUCCESS;
165}
166
167/**
168 * Poke an EMT so it can perform the appropriate TLB shootdowns.
169 *
170 * @param pVCpu The cross context virtual CPU structure of the
171 * EMT poke.
172 * @param fAccountFlushStat Whether to account the call to
173 * StatTlbShootdownFlush or StatTlbShootdown.
174 */
175static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat)
176{
177 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush))
178 {
179 if (fAccountFlushStat)
180 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush);
181 else
182 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
183#ifdef IN_RING0
184 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu;
185 if (idHostCpu != NIL_RTCPUID)
186 hmR0PokeCpu(pVCpu, idHostCpu);
187#else
188 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
189#endif
190 }
191 else
192 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual);
193}
194
195
196/**
197 * Invalidates a guest page on all VCPUs.
198 *
199 * @returns VBox status code.
200 * @param pVM The cross context VM structure.
201 * @param GCVirt Page to invalidate.
202 */
203VMM_INT_DECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCVirt)
204{
205 /*
206 * The VT-x/AMD-V code will be flushing TLB each time a VCPU migrates to a different
207 * host CPU, see hmR0VmxFlushTaggedTlbBoth() and hmR0SvmFlushTaggedTlb().
208 *
209 * This is the reason why we do not care about thread preemption here and just
210 * execute HMInvalidatePage() assuming it might be the 'right' CPU.
211 */
212 VMCPUID idCurCpu = VMMGetCpuId(pVM);
213 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage);
214
215 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
216 {
217 PVMCPU pVCpu = &pVM->aCpus[idCpu];
218
219 /* Nothing to do if a TLB flush is already pending; the VCPU should
220 have already been poked if it were active. */
221 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
222 continue;
223
224 if (pVCpu->idCpu == idCurCpu)
225 HMInvalidatePage(pVCpu, GCVirt);
226 else
227 {
228 hmQueueInvlPage(pVCpu, GCVirt);
229 hmPokeCpuForTlbFlush(pVCpu, false /* fAccountFlushStat */);
230 }
231 }
232
233 return VINF_SUCCESS;
234}
235
236
237/**
238 * Flush the TLBs of all VCPUs.
239 *
240 * @returns VBox status code.
241 * @param pVM The cross context VM structure.
242 */
243VMM_INT_DECL(int) HMFlushTLBOnAllVCpus(PVM pVM)
244{
245 if (pVM->cCpus == 1)
246 return HMFlushTLB(&pVM->aCpus[0]);
247
248 VMCPUID idThisCpu = VMMGetCpuId(pVM);
249
250 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTlb);
251
252 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
253 {
254 PVMCPU pVCpu = &pVM->aCpus[idCpu];
255
256 /* Nothing to do if a TLB flush is already pending; the VCPU should
257 have already been poked if it were active. */
258 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH))
259 {
260 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
261 if (idThisCpu != idCpu)
262 hmPokeCpuForTlbFlush(pVCpu, true /* fAccountFlushStat */);
263 }
264 }
265
266 return VINF_SUCCESS;
267}
268
269
270/**
271 * Invalidates a guest page by physical address.
272 *
273 * @returns VBox status code.
274 * @param pVM The cross context VM structure.
275 * @param GCPhys Page to invalidate.
276 *
277 * @remarks Assumes the current instruction references this physical page
278 * though a virtual address!
279 */
280VMM_INT_DECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)
281{
282 if (!HMIsNestedPagingActive(pVM))
283 return VINF_SUCCESS;
284
285 /*
286 * AMD-V: Doesn't support invalidation with guest physical addresses.
287 *
288 * VT-x: Doesn't support invalidation with guest physical addresses.
289 * INVVPID instruction takes only a linear address while invept only flushes by EPT
290 * not individual addresses.
291 *
292 * We update the force flag and flush before the next VM-entry, see @bugref{6568}.
293 */
294 RT_NOREF(GCPhys);
295 /** @todo Remove or figure out to way to update the Phys STAT counter. */
296 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys); */
297 return HMFlushTLBOnAllVCpus(pVM);
298}
299
300
301/**
302 * Checks if nested paging is enabled.
303 *
304 * @returns true if nested paging is active, false otherwise.
305 * @param pVM The cross context VM structure.
306 *
307 * @remarks Works before hmR3InitFinalizeR0.
308 */
309VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
310{
311 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
312}
313
314
315/**
316 * Checks if both nested paging and unhampered guest execution are enabled.
317 *
318 * The almost complete guest execution in hardware is only applicable to VT-x.
319 *
320 * @returns true if we have both enabled, otherwise false.
321 * @param pVM The cross context VM structure.
322 *
323 * @remarks Works before hmR3InitFinalizeR0.
324 */
325VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
326{
327 return HMIsEnabled(pVM)
328 && pVM->hm.s.fNestedPaging
329 && ( pVM->hm.s.vmx.fUnrestrictedGuest
330 || pVM->hm.s.svm.fSupported);
331}
332
333
334/**
335 * Checks if this VM is using HM and is long-mode capable.
336 *
337 * Use VMR3IsLongModeAllowed() instead of this, when possible.
338 *
339 * @returns true if long mode is allowed, false otherwise.
340 * @param pVM The cross context VM structure.
341 * @sa VMR3IsLongModeAllowed, NEMHCIsLongModeAllowed
342 */
343VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
344{
345 return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
346}
347
348
349/**
350 * Checks if MSR bitmaps are available. It is assumed that when it's available
351 * it will be used as well.
352 *
353 * @returns true if MSR bitmaps are available, false otherwise.
354 * @param pVM The cross context VM structure.
355 */
356VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
357{
358 if (HMIsEnabled(pVM))
359 {
360 if (pVM->hm.s.svm.fSupported)
361 return true;
362
363 if ( pVM->hm.s.vmx.fSupported
364 && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
365 {
366 return true;
367 }
368 }
369 return false;
370}
371
372
373/**
374 * Checks if AMD-V is active.
375 *
376 * @returns true if AMD-V is active.
377 * @param pVM The cross context VM structure.
378 *
379 * @remarks Works before hmR3InitFinalizeR0.
380 */
381VMM_INT_DECL(bool) HMIsSvmActive(PVM pVM)
382{
383 return pVM->hm.s.svm.fSupported && HMIsEnabled(pVM);
384}
385
386
387/**
388 * Checks if VT-x is active.
389 *
390 * @returns true if AMD-V is active.
391 * @param pVM The cross context VM structure.
392 *
393 * @remarks Works before hmR3InitFinalizeR0.
394 */
395VMM_INT_DECL(bool) HMIsVmxActive(PVM pVM)
396{
397 return pVM->hm.s.vmx.fSupported && HMIsEnabled(pVM);
398}
399
400#endif /* !IN_RC */
401
402/**
403 * Checks if an interrupt event is currently pending.
404 *
405 * @returns Interrupt event pending state.
406 * @param pVM The cross context VM structure.
407 */
408VMM_INT_DECL(bool) HMHasPendingIrq(PVM pVM)
409{
410 PVMCPU pVCpu = VMMGetCpu(pVM);
411 return !!pVCpu->hm.s.Event.fPending;
412}
413
414
415/**
416 * Return the PAE PDPE entries.
417 *
418 * @returns Pointer to the PAE PDPE array.
419 * @param pVCpu The cross context virtual CPU structure.
420 */
421VMM_INT_DECL(PX86PDPE) HMGetPaePdpes(PVMCPU pVCpu)
422{
423 return &pVCpu->hm.s.aPdpes[0];
424}
425
426
427/**
428 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
429 * incorrect code bytes may be fetched after a world-switch".
430 *
431 * @param pu32Family Where to store the CPU family (can be NULL).
432 * @param pu32Model Where to store the CPU model (can be NULL).
433 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
434 * @returns true if the erratum applies, false otherwise.
435 */
436VMM_INT_DECL(int) HMAmdIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
437{
438 /*
439 * Erratum 170 which requires a forced TLB flush for each world switch:
440 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
441 *
442 * All BH-G1/2 and DH-G1/2 models include a fix:
443 * Athlon X2: 0x6b 1/2
444 * 0x68 1/2
445 * Athlon 64: 0x7f 1
446 * 0x6f 2
447 * Sempron: 0x7f 1/2
448 * 0x6f 2
449 * 0x6c 2
450 * 0x7c 2
451 * Turion 64: 0x68 2
452 */
453 uint32_t u32Dummy;
454 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
455 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
456 u32BaseFamily = (u32Version >> 8) & 0xf;
457 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
458 u32Model = ((u32Version >> 4) & 0xf);
459 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
460 u32Stepping = u32Version & 0xf;
461
462 bool fErratumApplies = false;
463 if ( u32Family == 0xf
464 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
465 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
466 {
467 fErratumApplies = true;
468 }
469
470 if (pu32Family)
471 *pu32Family = u32Family;
472 if (pu32Model)
473 *pu32Model = u32Model;
474 if (pu32Stepping)
475 *pu32Stepping = u32Stepping;
476
477 return fErratumApplies;
478}
479
480
481/**
482 * Sets or clears the single instruction flag.
483 *
484 * When set, HM will try its best to return to ring-3 after executing a single
485 * instruction. This can be used for debugging. See also
486 * EMR3HmSingleInstruction.
487 *
488 * @returns The old flag state.
489 * @param pVM The cross context VM structure.
490 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
491 * @param fEnable The new flag state.
492 */
493VMM_INT_DECL(bool) HMSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable)
494{
495 VMCPU_ASSERT_EMT(pVCpu);
496 bool fOld = pVCpu->hm.s.fSingleInstruction;
497 pVCpu->hm.s.fSingleInstruction = fEnable;
498 pVCpu->hm.s.fUseDebugLoop = fEnable || pVM->hm.s.fUseDebugLoop;
499 return fOld;
500}
501
502
503/**
504 * Notifies HM that GIM provider wants to trap \#UD.
505 *
506 * @param pVCpu The cross context virtual CPU structure.
507 */
508VMM_INT_DECL(void) HMTrapXcptUDForGIMEnable(PVMCPU pVCpu)
509{
510 pVCpu->hm.s.fGIMTrapXcptUD = true;
511 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
512 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
513 else
514 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
515}
516
517
518/**
519 * Notifies HM that GIM provider no longer wants to trap \#UD.
520 *
521 * @param pVCpu The cross context virtual CPU structure.
522 */
523VMM_INT_DECL(void) HMTrapXcptUDForGIMDisable(PVMCPU pVCpu)
524{
525 pVCpu->hm.s.fGIMTrapXcptUD = false;
526 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported)
527 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS);
528 else
529 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS);
530}
531
532
533/**
534 * VMX nested-guest VM-exit handler.
535 *
536 * @param pVCpu The cross context virtual CPU structure.
537 * @param uBasicExitReason The basic exit reason.
538 */
539VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
540{
541 RT_NOREF2(pVCpu, uBasicExitReason);
542}
543
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette