VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 105673

最後變更 在這個檔案從105673是 105673,由 vboxsync 提交於 7 月 前

VMM/IEM,TM: Do full-TB looping. Redid timer polling in the recompiler. Rewrote the Blt_CheckIrq code, eliminating a conditional. Fixed some TLB related assertions. Moved some IEMCPU members around in hope of better cache-locality. bugref:10656

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 125.4 KB
 
1/* $Id: IEMR3.cpp 105673 2024-08-14 13:57:57Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#if defined(VBOX_VMM_TARGET_ARMV8)
40# include "IEMInternal-armv8.h"
41#else
42# include "IEMInternal.h"
43#endif
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/vmapi.h>
46#include <VBox/err.h>
47#ifdef VBOX_WITH_DEBUGGER
48# include <VBox/dbg.h>
49#endif
50
51#include <iprt/assert.h>
52#include <iprt/getopt.h>
53#ifdef IEM_WITH_TLB_TRACE
54# include <iprt/mem.h>
55#endif
56#include <iprt/string.h>
57
58#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
59# include "IEMN8veRecompiler.h"
60# include "IEMThreadedFunctions.h"
61# include "IEMInline.h"
62#endif
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68static FNDBGFINFOARGVINT iemR3InfoITlb;
69static FNDBGFINFOARGVINT iemR3InfoDTlb;
70#ifdef IEM_WITH_TLB_TRACE
71static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
72#endif
73#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
74static FNDBGFINFOARGVINT iemR3InfoTb;
75#endif
76#ifdef VBOX_WITH_DEBUGGER
77static void iemR3RegisterDebuggerCommands(void);
78#endif
79
80
81#if !defined(VBOX_VMM_TARGET_ARMV8)
82static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
83{
84 switch (enmTargetCpu)
85 {
86#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
87 CASE_RET_STR(IEMTARGETCPU_8086);
88 CASE_RET_STR(IEMTARGETCPU_V20);
89 CASE_RET_STR(IEMTARGETCPU_186);
90 CASE_RET_STR(IEMTARGETCPU_286);
91 CASE_RET_STR(IEMTARGETCPU_386);
92 CASE_RET_STR(IEMTARGETCPU_486);
93 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
94 CASE_RET_STR(IEMTARGETCPU_PPRO);
95 CASE_RET_STR(IEMTARGETCPU_CURRENT);
96#undef CASE_RET_STR
97 default: return "Unknown";
98 }
99}
100#endif
101
102
103/**
104 * Initializes the interpreted execution manager.
105 *
106 * This must be called after CPUM as we're quering information from CPUM about
107 * the guest and host CPUs.
108 *
109 * @returns VBox status code.
110 * @param pVM The cross context VM structure.
111 */
112VMMR3DECL(int) IEMR3Init(PVM pVM)
113{
114 /*
115 * Read configuration.
116 */
117#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
118 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
119 int rc;
120#endif
121
122#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
123 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
124 * Controls whether the custom VBox specific CPUID host call interface is
125 * enabled or not. */
126# ifdef DEBUG_bird
127 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
128# else
129 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
130# endif
131 AssertLogRelRCReturn(rc, rc);
132#endif
133
134#ifdef VBOX_WITH_IEM_RECOMPILER
135 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
136 * Max number of TBs per EMT. */
137 uint32_t cMaxTbs = 0;
138 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
139 AssertLogRelRCReturn(rc, rc);
140 if (cMaxTbs < _16K || cMaxTbs > _8M)
141 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
142 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
143
144 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
145 * Initial (minimum) number of TBs per EMT in ring-3. */
146 uint32_t cInitialTbs = 0;
147 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
148 AssertLogRelRCReturn(rc, rc);
149 if (cInitialTbs < _16K || cInitialTbs > _8M)
150 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
151 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
152
153 /* Check that the two values makes sense together. Expect user/api to do
154 the right thing or get lost. */
155 if (cInitialTbs > cMaxTbs)
156 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
157 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
158 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
159
160 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
161 * Max executable memory for recompiled code per EMT. */
162 uint64_t cbMaxExec = 0;
163 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
164 AssertLogRelRCReturn(rc, rc);
165 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
166 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
167 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
168 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
169
170 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
171 * The executable memory allocator chunk size. */
172 uint32_t cbChunkExec = 0;
173 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
174 AssertLogRelRCReturn(rc, rc);
175 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
176 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
177 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
178 cbChunkExec, cbChunkExec, _1M, _256M);
179
180 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
181 * The initial executable memory allocator size (per EMT). The value is
182 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
183 uint64_t cbInitialExec = 0;
184 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
185 AssertLogRelRCReturn(rc, rc);
186 if (cbInitialExec > cbMaxExec)
187 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
188 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
189 cbInitialExec, cbInitialExec, cbMaxExec);
190
191 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
192 * The translation block use count value to do native recompilation at.
193 * Set to zero to disable native recompilation. */
194 uint32_t uTbNativeRecompileAtUsedCount = 16;
195 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
196 AssertLogRelRCReturn(rc, rc);
197
198#endif /* VBOX_WITH_IEM_RECOMPILER*/
199
200 /*
201 * Initialize per-CPU data and register statistics.
202 */
203#if 1
204 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
205 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
206#else
207 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
208 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
209#endif
210
211 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
212 {
213 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
214 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
215
216 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
217#ifndef VBOX_VMM_TARGET_ARMV8
218 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
219#endif
220 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
221#ifndef VBOX_VMM_TARGET_ARMV8
222 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
223 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
224 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
225 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
226#endif
227
228#ifndef VBOX_VMM_TARGET_ARMV8
229 /* Poll timers every 400 us / 2500 Hz. (source: thin air) */
230 pVCpu->iem.s.cNsIdealPollInterval = 400U * RT_NS_1US;
231 pVCpu->iem.s.cIrqChecksTillNextPoll = 128;
232 pVCpu->iem.s.cIrqChecksTillNextPollPrev = 128;
233#endif
234
235 /*
236 * Host and guest CPU information.
237 */
238 if (idCpu == 0)
239 {
240 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
241 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
242#if !defined(VBOX_VMM_TARGET_ARMV8)
243 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
244 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
245 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
246# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
247 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
248 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
249 else
250# endif
251 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
252#else
253 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
254 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
255#endif
256
257#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
258 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
259 {
260 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
261 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
262 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
263 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
264 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
265 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
266 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
267 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
268 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
269 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
270 }
271 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
272 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
273 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
274#else
275 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
276 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
277 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
278#endif
279 }
280 else
281 {
282 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
283 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
284 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
285 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
286#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
287 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
288#endif
289 }
290
291 /*
292 * Mark all buffers free.
293 */
294 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
295 while (iMemMap-- > 0)
296 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
297
298#ifdef VBOX_WITH_IEM_RECOMPILER
299 /*
300 * Recompiler state and configuration distribution.
301 */
302 pVCpu->iem.s.uRegFpCtrl = IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED;
303 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
304#endif
305
306#ifdef IEM_WITH_TLB_TRACE
307 /*
308 * Allocate trace buffer.
309 */
310 pVCpu->iem.s.idxTlbTraceEntry = 0;
311 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
312 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
313 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
314 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
315#endif
316 }
317
318
319#ifdef VBOX_WITH_IEM_RECOMPILER
320 /*
321 * Initialize the TB allocator and cache (/ hash table).
322 *
323 * This is done by each EMT to try get more optimal thread/numa locality of
324 * the allocations.
325 */
326 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
327 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
328 AssertLogRelRCReturn(rc, rc);
329#endif
330
331 /*
332 * Register statistics.
333 */
334 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
335 {
336#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
337 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
338 char szPat[128];
339 RT_NOREF_PV(szPat); /* lazy bird */
340 char szVal[128];
341 RT_NOREF_PV(szVal); /* lazy bird */
342
343 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
344 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
345 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
346 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
347 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
348 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
349 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
350 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
351 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
352 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
353 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
354 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
355 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
356 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
357 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
358 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
359 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
360 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
361 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
362 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
363
364 /* Code TLB: */
365 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
366 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
367 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
368 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
369 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
370 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
371 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
372 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
373 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
374 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
375
376 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
377 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
378 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
379 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
380 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
381 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
382
383 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
384 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
385 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
386 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
387 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
388 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
389
390 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
391 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
392 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
393 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
394 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
395 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
396
397 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
398 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
399 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
400 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
401 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
402 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
403
404 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
405 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
406 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
407 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
408 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
409 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
410# ifdef IEM_WITH_TLB_STATISTICS
411 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
412 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
413# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
414 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
415 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
416 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
417 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
418# endif
419
420 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
421 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
422 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
423
424 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
425 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
426 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
427
428 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
429 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
430 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
431 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
432
433# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
434 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
435 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
436 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
437 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
438 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
439 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
440 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
441 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
442 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
443 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
444 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
445 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
446 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
447 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
448 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
449
450 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
451 "Code TLB native misses on new page",
452 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
453 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
454 "Code TLB native misses on new page w/ offset",
455 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
456# endif
457# endif /* IEM_WITH_TLB_STATISTICS */
458
459 /* Data TLB organized as best we can... */
460 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
461 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
462 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
463 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
464 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
465 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
466 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
467 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
468 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
469 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
470
471 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
472 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
473 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
474 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
475 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
476 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
477
478 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
479 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
480 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
481 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
482 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
483 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
484
485 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
486 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
487 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
488 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
489 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
490 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
491
492 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
493 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
494 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
495 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
496 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
497 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
498
499 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
500 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
501 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
502 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
503 "Data TLB global loads",
504 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
505 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
506 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
507 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
508 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
509 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
510 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
511 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
512 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
513 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
514
515 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
516 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
517 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
518 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
519 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
520 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
521 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
522 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
523 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
524 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
525 "Data TLB global loads",
526 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
527
528# ifdef IEM_WITH_TLB_STATISTICS
529# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
530 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
531 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
532 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
533 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
534 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
535 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
536 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
537 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
538 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
539 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
540 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
541 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
542 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
543 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
544 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
545# endif
546# endif
547
548# ifdef IEM_WITH_TLB_STATISTICS
549 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
550 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
551 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
552 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
553 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
554 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
555# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
556 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
557 "Data TLB native stack access hits",
558 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
559 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
560 "Data TLB native data fetch hits",
561 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
562 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
563 "Data TLB native data store hits",
564 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
565 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
566 "Data TLB native mapped data hits",
567 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
568# endif
569 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
570 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
571 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
572
573# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
574 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
575 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
576 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
577# endif
578
579 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
580 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
581 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
582
583 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
584 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
585 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
586 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
587
588# endif /* IEM_WITH_TLB_STATISTICS */
589
590
591#ifdef VBOX_WITH_IEM_RECOMPILER
592 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
593 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
594 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
595 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
596 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
597 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
598# ifdef VBOX_WITH_STATISTICS
599 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
600 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
601 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
602 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
603# endif
604
605 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cIrqChecksTillNextPollPrev, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
606 "Timer polling interval", "/IEM/CPU%u/re/cIrqChecksTillNextPollPrev", idCpu);
607
608 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
609 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
610 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
611 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
612 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
613# ifdef VBOX_WITH_STATISTICS
614 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
615 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
616# endif
617 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
618 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
619 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
620 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
621 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
622 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
623 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
624 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
625 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
626 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
627 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
628 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
629 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
630 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
631 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
632 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
633
634 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
635 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
636 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
637
638 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
639 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
640 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
641 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
642 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
643 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
644 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
645 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
646# ifdef VBOX_WITH_STATISTICS
647 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
648 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
649# endif
650
651 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
652 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
653 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
654 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
655 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
656 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
657
658 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
659 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
660 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckTimersBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
661 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckTimersBreaks", idCpu);
662 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
663 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
664 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
665 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
666 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
667 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
668
669 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopFullTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
670 "Detected loop full TB", "/IEM/CPU%u/re/LoopFullTbDetected", idCpu);
671 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
672 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
673
674 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
675 "Number of times the exec memory allocator failed to allocate a large enough buffer",
676 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
677
678 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
679 "Number of threaded calls per TB that have been properly recompiled to native code",
680 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
681 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
682 "Number of threaded calls per TB that could not be recompiler to native code",
683 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
684 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
685 "Number of threaded calls that could not be recompiler to native code",
686 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
687
688 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
689 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
690 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
691 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
692
693# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
694# ifdef VBOX_WITH_STATISTICS
695 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
696 "Number of calls to iemNativeRegAllocFindFree.",
697 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
698# endif
699 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
700 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
701 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
702# ifdef VBOX_WITH_STATISTICS
703 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
704 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
705 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
706 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
707 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
708 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
709 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
710 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
711 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
712
713 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedArithmetic, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
714 "Skipped all status flag updating, arithmetic instructions",
715 "/IEM/CPU%u/re/NativeEFlagsSkippedArithmetic", idCpu);
716 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedLogical, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
717 "Skipped all status flag updating, logical instructions",
718 "/IEM/CPU%u/re/NativeEFlagsSkippedLogical", idCpu);
719
720 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
721 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippable", idCpu);
722 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippable", idCpu);
723 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippable", idCpu);
724 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippable", idCpu);
725 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippable", idCpu);
726
727 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfRequired", idCpu);
728 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfRequired", idCpu);
729 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfRequired", idCpu);
730 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfRequired", idCpu);
731 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfRequired", idCpu);
732 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfRequired", idCpu);
733
734# ifdef IEMLIVENESS_EXTENDED_LAYOUT
735 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfDelayable", idCpu);
736 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfDelayable", idCpu);
737 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfDelayable", idCpu);
738 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfDelayable", idCpu);
739 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfDelayable", idCpu);
740 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfDelayable", idCpu);
741# endif
742
743 /* Sum up all status bits ('_' is a sorting hack). */
744 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fSkippable*", idCpu);
745 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
746 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
747
748 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fRequired*", idCpu);
749 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
750 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusRequired", idCpu);
751
752# ifdef IEMLIVENESS_EXTENDED_LAYOUT
753 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fDelayable*", idCpu);
754 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
755 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
756# endif
757
758 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?f*", idCpu);
759 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
760 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
761
762 /* Ratio of the status bit skippables. */
763 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
764 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
765 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
766 "Total skippable EFLAGS status bit updating percentage",
767 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippablePct", idCpu);
768
769# ifdef IEMLIVENESS_EXTENDED_LAYOUT
770 /* Ratio of the status bit skippables. */
771 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
772 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
773 "Total potentially delayable EFLAGS status bit updating percentage",
774 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayablePct", idCpu);
775# endif
776
777 /* Ratios of individual bits. */
778 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlagsCf*", idCpu) - 3;
779 Assert(szPat[offFlagChar] == 'C');
780 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
781 Assert(szVal[offFlagChar] == 'C');
782 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippablePct", idCpu);
783 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippablePct", idCpu);
784 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippablePct", idCpu);
785 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippablePct", idCpu);
786 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippablePct", idCpu);
787 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippablePct", idCpu);
788
789 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
790 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
791
792# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
793 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
794 "Number of calls to iemNativeSimdRegAllocFindFree.",
795 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
796 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
797 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
798 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
799 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
800 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
801 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
802 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
803 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
804 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
805 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
806 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
807 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
808
809 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
810 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
811 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
812 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
813 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
814 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
815 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
816 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
817
818 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
819 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
820 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
821 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
822 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
823 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
824 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
825 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
826# endif
827
828 /* Ratio of the status bit skippables. */
829 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
830 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
831 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
832 "Delayed RIP updating percentage",
833 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
834
835 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
836 "Number of times the TB finishes execution completely",
837 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
838# endif /* VBOX_WITH_STATISTICS */
839 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
840 "Number of times the TB finished through the ReturnBreak label",
841 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
842 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
843 "Number of times the TB finished through the ReturnBreak label",
844 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
845 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
846 "Number of times the TB finished through the ReturnWithFlags label",
847 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
848 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
849 "Number of times the TB finished with some other status value",
850 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
851 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
852 "Number of times the TB finished via long jump / throw",
853 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
854 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
855 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
856 "Number of times the TB finished through the ObsoleteTb label",
857 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
858 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
859 "Number of times the TB finished through the NeedCsLimChecking label",
860 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
861 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
862 "Number of times the TB finished through the CheckBranchMiss label",
863 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
864 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
865 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
866# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
867# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
868# else
869# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
870# endif
871 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
872 "Number of times the TB finished raising a #DE exception",
873 RAISE_PREFIX "RaiseDe", idCpu);
874 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
875 "Number of times the TB finished raising a #UD exception",
876 RAISE_PREFIX "RaiseUd", idCpu);
877 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
878 "Number of times the TB finished raising a SSE related exception",
879 RAISE_PREFIX "RaiseSseRelated", idCpu);
880 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
881 "Number of times the TB finished raising a AVX related exception",
882 RAISE_PREFIX "RaiseAvxRelated", idCpu);
883 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
884 "Number of times the TB finished raising a SSE/AVX floating point related exception",
885 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
886 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
887 "Number of times the TB finished raising a #NM exception",
888 RAISE_PREFIX "RaiseNm", idCpu);
889 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
890 "Number of times the TB finished raising a #GP(0) exception",
891 RAISE_PREFIX "RaiseGp0", idCpu);
892 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
893 "Number of times the TB finished raising a #MF exception",
894 RAISE_PREFIX "RaiseMf", idCpu);
895 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
896 "Number of times the TB finished raising a #XF exception",
897 RAISE_PREFIX "RaiseXf", idCpu);
898
899# ifdef VBOX_WITH_STATISTICS
900 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLoopFullTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
901 "Number of full TB loops.",
902 "/IEM/CPU%u/re/NativeTbExit/LoopFullTb", idCpu);
903# endif
904
905 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
906 "Direct linking #1 with IRQ check succeeded",
907 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
908 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
909 "Direct linking #1 w/o IRQ check succeeded",
910 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
911# ifdef VBOX_WITH_STATISTICS
912 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
913 "Direct linking #1 failed: No TB in lookup table",
914 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
915 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
916 "Direct linking #1 failed: GCPhysPc mismatch",
917 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
918 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
919 "Direct linking #1 failed: TB flags mismatch",
920 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
921 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
922 "Direct linking #1 failed: IRQ or FF pending",
923 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
924# endif
925
926 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
927 "Direct linking #2 with IRQ check succeeded",
928 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
929 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
930 "Direct linking #2 w/o IRQ check succeeded",
931 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
932# ifdef VBOX_WITH_STATISTICS
933 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
934 "Direct linking #2 failed: No TB in lookup table",
935 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
936 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
937 "Direct linking #2 failed: GCPhysPc mismatch",
938 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
939 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
940 "Direct linking #2 failed: TB flags mismatch",
941 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
942 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
943 "Direct linking #2 failed: IRQ or FF pending",
944 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
945# endif
946
947 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
948 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
949 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
950 "/IEM/CPU%u/re/NativeTbExit", idCpu);
951
952
953# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
954
955
956# ifdef VBOX_WITH_STATISTICS
957 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
958 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
959 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
960 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
961 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
962 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
963 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
964 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
965# endif
966
967
968#endif /* VBOX_WITH_IEM_RECOMPILER */
969
970 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
971 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
972 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
973 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
974 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
975 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
976
977# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
978 /* Instruction statistics: */
979# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
980 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
981 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
982 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
983 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
984# include "IEMInstructionStatisticsTmpl.h"
985# undef IEM_DO_INSTR_STAT
986# endif
987
988# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
989 /* Threaded function statistics: */
990 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
991 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
992 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
993# endif
994
995#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
996 }
997
998#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
999 /*
1000 * Register the per-VM VMX APIC-access page handler type.
1001 */
1002 if (pVM->cpum.ro.GuestFeatures.fVmx)
1003 {
1004 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
1005 iemVmxApicAccessPageHandler,
1006 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
1007 AssertLogRelRCReturn(rc, rc);
1008 }
1009#endif
1010
1011 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1012 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
1013#ifdef IEM_WITH_TLB_TRACE
1014 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
1015#endif
1016#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1017 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
1018#endif
1019#ifdef VBOX_WITH_DEBUGGER
1020 iemR3RegisterDebuggerCommands();
1021#endif
1022
1023 return VINF_SUCCESS;
1024}
1025
1026
1027VMMR3DECL(int) IEMR3Term(PVM pVM)
1028{
1029 NOREF(pVM);
1030#ifdef IEM_WITH_TLB_TRACE
1031 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1032 {
1033 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1034 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1035 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1036 }
1037#endif
1038 return VINF_SUCCESS;
1039}
1040
1041
1042VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1043{
1044 RT_NOREF(pVM);
1045}
1046
1047
1048/**
1049 * Gets the name of a generic IEM exit code.
1050 *
1051 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1052 * @param uExit The IEM exit to name.
1053 */
1054VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1055{
1056 static const char * const s_apszNames[] =
1057 {
1058 /* external interrupts */
1059 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1060 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1061 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1062 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1063 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1064 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1065 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1066 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1067 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1068 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1069 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1070 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1071 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1072 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1073 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1074 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1075 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1076 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1077 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1078 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1079 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1080 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1081 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1082 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1083 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1084 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1085 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1086 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1087 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1088 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1089 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1090 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1091 /* software interrups */
1092 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1093 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1094 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1095 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1096 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1097 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1098 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1099 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1100 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1101 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1102 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1103 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1104 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1105 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1106 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1107 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1108 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1109 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1110 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1111 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1112 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1113 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1114 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1115 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1116 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1117 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1118 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1119 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1120 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1121 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1122 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1123 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1124 };
1125 if (uExit < RT_ELEMENTS(s_apszNames))
1126 return s_apszNames[uExit];
1127 return NULL;
1128}
1129
1130
1131/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1132static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1133{
1134 if (*pfHeader)
1135 return;
1136 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1137 *pfHeader = true;
1138}
1139
1140
1141#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1142#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1143
1144/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1145static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1146 uint32_t uSlot, uint32_t fFlags)
1147{
1148#ifndef VBOX_VMM_TARGET_ARMV8
1149 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1150#else
1151 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1152#endif
1153 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1154 return;
1155
1156 /* The address needs to be sign extended, thus the shifting fun here.*/
1157 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1158 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1159 const char *pszValid = "";
1160#ifndef VBOX_VMM_TARGET_ARMV8
1161 char szTmp[128];
1162 if (fFlags & IEMR3INFOTLB_F_CHECK)
1163 {
1164 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1165 PGMPTWALKFAST WalkFast;
1166 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1167 pszValid = szTmp;
1168 if (RT_FAILURE(rc))
1169 switch (rc)
1170 {
1171 case VERR_PAGE_TABLE_NOT_PRESENT:
1172 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1173 {
1174 case 1: pszValid = " stale(page-not-present)"; break;
1175 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1176 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1177 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1178 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1179 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1180 }
1181 break;
1182 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1183 }
1184 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1185 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1186 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1187 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1188 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1189 | fInvSlotG ) )
1190 pszValid = " still-valid";
1191 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1192 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1193 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1194 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1195 {
1196 case X86_PTE_A:
1197 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1198 break;
1199 case X86_PTE_D:
1200 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1201 break;
1202 case X86_PTE_D | X86_PTE_A:
1203 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1204 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1205 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1206 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1207 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1208 break;
1209 default: AssertFailed(); break;
1210 }
1211 else
1212 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1213 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1214 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1215 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1216 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1217 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1218 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1219 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1220 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1221 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1222 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1223 }
1224#else
1225 RT_NOREF(pVCpu);
1226#endif
1227
1228 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1229 uSlot,
1230 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1231 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1232 : "expired",
1233 GCPtr, /* -> */
1234 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1235 /* / */
1236 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1237 /* */
1238 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1239 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1240 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1241 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1242 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1243 !(uSlot & 1) ? "-" : "G",
1244 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1245 /* / */
1246 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1247 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1248 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1249 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1250 /* / */
1251 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1252 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1253 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1254 pszValid);
1255}
1256
1257
1258/** Displays one or more TLB slots. */
1259static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1260 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1261{
1262 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1263 {
1264 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1265 {
1266 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1267 cSlots, RT_ELEMENTS(pTlb->aEntries));
1268 cSlots = RT_ELEMENTS(pTlb->aEntries);
1269 }
1270
1271 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1272 while (cSlots-- > 0)
1273 {
1274 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1275 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1276 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1277 }
1278 }
1279 else
1280 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1281 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1282}
1283
1284
1285/** Displays the TLB slot for the given address. */
1286static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1287 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1288{
1289 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1290
1291 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1292#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1293 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1294#else
1295 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1296#endif
1297 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1298#ifndef VBOX_VMM_TARGET_ARMV8
1299 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1300#endif
1301 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1302 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1303 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1304 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1305
1306#ifndef VBOX_VMM_TARGET_ARMV8
1307 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1308 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1309 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1310 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1311#endif
1312}
1313
1314
1315/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1316static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1317{
1318 /*
1319 * This is entirely argument driven.
1320 */
1321 static RTGETOPTDEF const s_aOptions[] =
1322 {
1323 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1324 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1325 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1326 { "all", 'A', RTGETOPT_REQ_NOTHING },
1327 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1328 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1329 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1330 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1331 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1332 };
1333
1334 RTGETOPTSTATE State;
1335 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1336 AssertRCReturnVoid(rc);
1337
1338 uint32_t cActionArgs = 0;
1339 bool fNeedHeader = true;
1340 bool fAddressMode = true;
1341 uint32_t fFlags = 0;
1342 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1343 PVMCPU pVCpu = pVCpuCall;
1344 if (!pVCpu)
1345 pVCpu = VMMGetCpuById(pVM, 0);
1346
1347 RTGETOPTUNION ValueUnion;
1348 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1349 {
1350 switch (rc)
1351 {
1352 case 'c':
1353 if (ValueUnion.u32 >= pVM->cCpus)
1354 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1355 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1356 {
1357 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1358 fNeedHeader = true;
1359 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1360 {
1361 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1362 ValueUnion.u32, pVCpuCall->idCpu);
1363 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1364 }
1365 }
1366 break;
1367
1368 case 'C':
1369 if (!pVCpuCall)
1370 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1371 else if (pVCpu != pVCpuCall)
1372 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1373 pVCpu->idCpu, pVCpuCall->idCpu);
1374 else
1375 fFlags |= IEMR3INFOTLB_F_CHECK;
1376 break;
1377
1378 case 'a':
1379 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1380 ValueUnion.u64, fFlags, &fNeedHeader);
1381 fAddressMode = true;
1382 cActionArgs++;
1383 break;
1384
1385 case 'A':
1386 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1387 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1388 cActionArgs++;
1389 break;
1390
1391 case 'r':
1392 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1393 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1394 fAddressMode = false;
1395 cActionArgs++;
1396 break;
1397
1398 case 's':
1399 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1400 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1401 fAddressMode = false;
1402 cActionArgs++;
1403 break;
1404
1405 case 'v':
1406 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1407 break;
1408
1409 case VINF_GETOPT_NOT_OPTION:
1410 if (fAddressMode)
1411 {
1412 uint64_t uAddr;
1413 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1414 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1415 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1416 uAddr, fFlags, &fNeedHeader);
1417 else
1418 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1419 }
1420 else
1421 {
1422 uint32_t uSlot;
1423 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1424 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1425 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1426 uSlot, 1, fFlags, &fNeedHeader);
1427 else
1428 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1429 }
1430 cActionArgs++;
1431 break;
1432
1433 case 'h':
1434 pHlp->pfnPrintf(pHlp,
1435 "Usage: info %ctlb [options]\n"
1436 "\n"
1437 "Options:\n"
1438 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1439 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1440 " -C,--check\n"
1441 " Check valid entries against guest PTs.\n"
1442 " -A, --all, all\n"
1443 " Display all the TLB entries (default if no other args).\n"
1444 " -a<virt>, --address=<virt>\n"
1445 " Shows the TLB entry for the specified guest virtual address.\n"
1446 " -r<slot:count>, --range=<slot:count>\n"
1447 " Shows the TLB entries for the specified slot range.\n"
1448 " -s<slot>,--slot=<slot>\n"
1449 " Shows the given TLB slot.\n"
1450 " -v,--only-valid\n"
1451 " Only show valid TLB entries (TAG, not phys)\n"
1452 "\n"
1453 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1454 "defaulting to addresses if not preceeded by any of those options.\n"
1455 , fITlb ? 'i' : 'd');
1456 return;
1457
1458 default:
1459 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1460 return;
1461 }
1462 }
1463
1464 /*
1465 * If no action taken, we display all (-A) by default.
1466 */
1467 if (!cActionArgs)
1468 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1469 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1470}
1471
1472
1473/**
1474 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1475 */
1476static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1477{
1478 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1479}
1480
1481
1482/**
1483 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1484 */
1485static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1486{
1487 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1488}
1489
1490
1491#ifdef IEM_WITH_TLB_TRACE
1492/**
1493 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1494 */
1495static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1496{
1497 /*
1498 * Parse arguments.
1499 */
1500 static RTGETOPTDEF const s_aOptions[] =
1501 {
1502 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1503 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1504 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1505 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1506 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1507 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1508 };
1509
1510 RTGETOPTSTATE State;
1511 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1512 AssertRCReturnVoid(rc);
1513
1514 uint32_t cLimit = UINT32_MAX;
1515 bool fStopAtGlobalFlush = false;
1516 bool fResolveRip = false;
1517 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1518 PVMCPU pVCpu = pVCpuCall;
1519 if (!pVCpu)
1520 pVCpu = VMMGetCpuById(pVM, 0);
1521
1522 RTGETOPTUNION ValueUnion;
1523 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1524 {
1525 switch (rc)
1526 {
1527 case 'c':
1528 if (ValueUnion.u32 >= pVM->cCpus)
1529 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1530 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1531 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1532 break;
1533
1534 case 'l':
1535 cLimit = ValueUnion.u32;
1536 break;
1537
1538 case 'g':
1539 fStopAtGlobalFlush = true;
1540 break;
1541
1542 case 'r':
1543 fResolveRip = true;
1544 break;
1545
1546 case 'h':
1547 pHlp->pfnPrintf(pHlp,
1548 "Usage: info tlbtrace [options] [n]\n"
1549 "\n"
1550 "Options:\n"
1551 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1552 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1553 " [n], -l<n>, --last=<n>\n"
1554 " Limit display to the last N entries. Default: all\n"
1555 " -g, --stop-at-global-flush\n"
1556 " Stop after the first global flush entry.\n"
1557 " -r, --resolve-rip\n"
1558 " Resolve symbols for the flattened RIP addresses.\n"
1559 );
1560 return;
1561
1562 case VINF_GETOPT_NOT_OPTION:
1563 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1564 if (RT_SUCCESS(rc))
1565 break;
1566 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1567 return;
1568
1569 default:
1570 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1571 return;
1572 }
1573 }
1574
1575 /*
1576 * Get the details.
1577 */
1578 AssertReturnVoid(pVCpu);
1579 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1580 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1581 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1582 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1583 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1584 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1585 if (cLeft && paEntries)
1586 {
1587 /*
1588 * Display the entries.
1589 */
1590 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1591 while (cLeft-- > 0)
1592 {
1593 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1594 const char *pszSymbol = "";
1595 union
1596 {
1597 RTDBGSYMBOL Symbol;
1598 char ach[sizeof(RTDBGSYMBOL) + 32];
1599 } uBuf;
1600 if (fResolveRip)
1601 {
1602 RTGCINTPTR offDisp = 0;
1603 DBGFADDRESS Addr;
1604 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1605 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1606 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1607 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1608 &offDisp, &uBuf.Symbol, NULL);
1609 if (RT_SUCCESS(rc))
1610 {
1611 /* Add displacement. */
1612 if (offDisp)
1613 {
1614 size_t const cchName = strlen(uBuf.Symbol.szName);
1615 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1616 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1617 if (offDisp > 0)
1618 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1619 else
1620 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1621 }
1622
1623 /* Put a space before it. */
1624 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1625 char *pszName = uBuf.Symbol.szName;
1626 *--pszName = ' ';
1627 pszSymbol = pszName;
1628 }
1629 }
1630 static const char *s_apszTlbType[2] = { "code", "data" };
1631 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1632 switch (pCur->enmType)
1633 {
1634 case kIemTlbTraceType_InvlPg:
1635 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1636 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1637 break;
1638 case kIemTlbTraceType_EvictSlot:
1639 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1640 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1641 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1642 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1643 pCur->u64Param2, pszSymbol);
1644 break;
1645 case kIemTlbTraceType_LargeEvictSlot:
1646 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1647 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1648 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1649 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1650 pCur->u64Param2, pszSymbol);
1651 break;
1652 case kIemTlbTraceType_LargeScan:
1653 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1654 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1655 break;
1656
1657 case kIemTlbTraceType_Flush:
1658 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1659 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1660 break;
1661 case kIemTlbTraceType_FlushGlobal:
1662 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1663 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1664 if (fStopAtGlobalFlush)
1665 return;
1666 break;
1667 case kIemTlbTraceType_Load:
1668 case kIemTlbTraceType_LoadGlobal:
1669 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1670 idx, pCur->rip,
1671 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1672 pCur->u64Param,
1673 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1674 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1675 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1676 break;
1677
1678 case kIemTlbTraceType_Load_Cr0:
1679 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1680 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1681 break;
1682 case kIemTlbTraceType_Load_Cr3:
1683 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1684 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1685 break;
1686 case kIemTlbTraceType_Load_Cr4:
1687 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1688 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1689 break;
1690 case kIemTlbTraceType_Load_Efer:
1691 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1692 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1693 break;
1694
1695 case kIemTlbTraceType_Irq:
1696 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1697 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1698 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1699 pszSymbol);
1700 break;
1701 case kIemTlbTraceType_Xcpt:
1702 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1703 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1704 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1705 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1706 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1707 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1708 else
1709 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1710 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1711 break;
1712 case kIemTlbTraceType_IRet:
1713 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1714 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1715 break;
1716
1717 case kIemTlbTraceType_Tb_Compile:
1718 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1719 idx, pCur->rip, pCur->u64Param, pszSymbol);
1720 break;
1721 case kIemTlbTraceType_Tb_Exec_Threaded:
1722 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1723 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1724 break;
1725 case kIemTlbTraceType_Tb_Exec_Native:
1726 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1727 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1728 break;
1729
1730 case kIemTlbTraceType_User0:
1731 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1732 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1733 break;
1734 case kIemTlbTraceType_User1:
1735 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1736 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1737 break;
1738 case kIemTlbTraceType_User2:
1739 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1740 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1741 break;
1742 case kIemTlbTraceType_User3:
1743 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1744 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1745 break;
1746
1747 case kIemTlbTraceType_Invalid:
1748 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1749 break;
1750 }
1751 }
1752 }
1753 else
1754 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1755}
1756#endif /* IEM_WITH_TLB_TRACE */
1757
1758#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1759/**
1760 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1761 */
1762static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1763{
1764 /*
1765 * Parse arguments.
1766 */
1767 static RTGETOPTDEF const s_aOptions[] =
1768 {
1769 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1770 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1771 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1772 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1773 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1774 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1775 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1776 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1777 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1778 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1779 };
1780
1781 RTGETOPTSTATE State;
1782 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1783 AssertRCReturnVoid(rc);
1784
1785 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1786 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1787 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1788 RTGCPHYS GCVirt = NIL_RTGCPTR;
1789 uint32_t fFlags = UINT32_MAX;
1790
1791 RTGETOPTUNION ValueUnion;
1792 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1793 {
1794 switch (rc)
1795 {
1796 case 'c':
1797 if (ValueUnion.u32 >= pVM->cCpus)
1798 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1799 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1800 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1801 break;
1802
1803 case 'a':
1804 GCVirt = ValueUnion.u64;
1805 GCPhysPc = NIL_RTGCPHYS;
1806 break;
1807
1808 case 'p':
1809 GCVirt = NIL_RTGCPHYS;
1810 GCPhysPc = ValueUnion.u64;
1811 break;
1812
1813 case 'f':
1814 fFlags = ValueUnion.u32;
1815 break;
1816
1817 case 'h':
1818 pHlp->pfnPrintf(pHlp,
1819 "Usage: info tb [options]\n"
1820 "\n"
1821 "Options:\n"
1822 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1823 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1824 " -a<virt>, --address=<virt>\n"
1825 " Shows the TB for the specified guest virtual address.\n"
1826 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1827 " Shows the TB for the specified guest physical address.\n"
1828 " -f<flags>,--flags=<flags>\n"
1829 " The TB flags value (hex) to use when looking up the TB.\n"
1830 "\n"
1831 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1832 return;
1833
1834 default:
1835 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1836 return;
1837 }
1838 }
1839
1840 /* Currently, only do work on the same EMT. */
1841 if (pVCpu != pVCpuThis)
1842 {
1843 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1844 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1845 return;
1846 }
1847
1848 /*
1849 * Defaults.
1850 */
1851 if (GCPhysPc == NIL_RTGCPHYS)
1852 {
1853 if (GCVirt == NIL_RTGCPTR)
1854 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1855 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1856 if (RT_FAILURE(rc))
1857 {
1858 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1859 return;
1860 }
1861 }
1862 if (fFlags == UINT32_MAX)
1863 {
1864 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1865 fFlags = iemCalcExecFlags(pVCpu);
1866 if (pVM->cCpus == 1)
1867 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1868 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1869 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1870 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1871 fFlags |= IEMTB_F_INHIBIT_NMI;
1872 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
1873 {
1874 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
1875 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
1876 fFlags |= IEMTB_F_CS_LIM_CHECKS;
1877 }
1878 }
1879
1880 /*
1881 * Do the lookup...
1882 *
1883 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
1884 * have much choice since we don't want to increase use counters and
1885 * trigger native recompilation.
1886 */
1887 fFlags &= IEMTB_F_KEY_MASK;
1888 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
1889 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
1890 PCIEMTB pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
1891 while (pTb)
1892 {
1893 if (pTb->GCPhysPc == GCPhysPc)
1894 {
1895 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
1896 {
1897 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
1898 break;
1899 }
1900 }
1901 pTb = pTb->pNext;
1902 }
1903 if (!pTb)
1904 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
1905 else
1906 {
1907 /*
1908 * Disassemble according to type.
1909 */
1910 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
1911 {
1912# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
1913 case IEMTB_F_TYPE_NATIVE:
1914 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - native\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1915 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
1916 break;
1917# endif
1918
1919 case IEMTB_F_TYPE_THREADED:
1920 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - threaded\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1921 iemThreadedDisassembleTb(pTb, pHlp);
1922 break;
1923
1924 default:
1925 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - ??? %#x\n",
1926 GCPhysPc, fFlags, pVCpu->idCpu, pTb, pTb->fFlags);
1927 break;
1928 }
1929 }
1930}
1931#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
1932
1933
1934#ifdef VBOX_WITH_DEBUGGER
1935
1936/** @callback_method_impl{FNDBGCCMD,
1937 * Implements the '.alliem' command. }
1938 */
1939static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
1940{
1941 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
1942 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
1943 if (pVCpu)
1944 {
1945 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
1946 return VINF_SUCCESS;
1947 }
1948 RT_NOREF(paArgs, cArgs);
1949 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
1950}
1951
1952
1953/**
1954 * Called by IEMR3Init to register debugger commands.
1955 */
1956static void iemR3RegisterDebuggerCommands(void)
1957{
1958 /*
1959 * Register debugger commands.
1960 */
1961 static DBGCCMD const s_aCmds[] =
1962 {
1963 {
1964 /* .pszCmd = */ "iemflushtlb",
1965 /* .cArgsMin = */ 0,
1966 /* .cArgsMax = */ 0,
1967 /* .paArgDescs = */ NULL,
1968 /* .cArgDescs = */ 0,
1969 /* .fFlags = */ 0,
1970 /* .pfnHandler = */ iemR3DbgFlushTlbs,
1971 /* .pszSyntax = */ "",
1972 /* .pszDescription = */ "Flushed the code and data TLBs"
1973 },
1974 };
1975
1976 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
1977 AssertLogRelRC(rc);
1978}
1979
1980#endif /* VBOX_WITH_DEBUGGER */
1981
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette