VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 105630

最後變更 在這個檔案從105630是 105616,由 vboxsync 提交於 7 月 前

VMM/IEM: Another iemTlbInvalidateLargePageWorkerInner optimization attempt and some stats. bugref:10727

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 124.0 KB
 
1/* $Id: IEMR3.cpp 105616 2024-08-07 20:22:21Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#if defined(VBOX_VMM_TARGET_ARMV8)
40# include "IEMInternal-armv8.h"
41#else
42# include "IEMInternal.h"
43#endif
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/vmapi.h>
46#include <VBox/err.h>
47#ifdef VBOX_WITH_DEBUGGER
48# include <VBox/dbg.h>
49#endif
50
51#include <iprt/assert.h>
52#include <iprt/getopt.h>
53#ifdef IEM_WITH_TLB_TRACE
54# include <iprt/mem.h>
55#endif
56#include <iprt/string.h>
57
58#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
59# include "IEMN8veRecompiler.h"
60# include "IEMThreadedFunctions.h"
61# include "IEMInline.h"
62#endif
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68static FNDBGFINFOARGVINT iemR3InfoITlb;
69static FNDBGFINFOARGVINT iemR3InfoDTlb;
70#ifdef IEM_WITH_TLB_TRACE
71static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
72#endif
73#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
74static FNDBGFINFOARGVINT iemR3InfoTb;
75#endif
76#ifdef VBOX_WITH_DEBUGGER
77static void iemR3RegisterDebuggerCommands(void);
78#endif
79
80
81#if !defined(VBOX_VMM_TARGET_ARMV8)
82static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
83{
84 switch (enmTargetCpu)
85 {
86#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
87 CASE_RET_STR(IEMTARGETCPU_8086);
88 CASE_RET_STR(IEMTARGETCPU_V20);
89 CASE_RET_STR(IEMTARGETCPU_186);
90 CASE_RET_STR(IEMTARGETCPU_286);
91 CASE_RET_STR(IEMTARGETCPU_386);
92 CASE_RET_STR(IEMTARGETCPU_486);
93 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
94 CASE_RET_STR(IEMTARGETCPU_PPRO);
95 CASE_RET_STR(IEMTARGETCPU_CURRENT);
96#undef CASE_RET_STR
97 default: return "Unknown";
98 }
99}
100#endif
101
102
103/**
104 * Initializes the interpreted execution manager.
105 *
106 * This must be called after CPUM as we're quering information from CPUM about
107 * the guest and host CPUs.
108 *
109 * @returns VBox status code.
110 * @param pVM The cross context VM structure.
111 */
112VMMR3DECL(int) IEMR3Init(PVM pVM)
113{
114 /*
115 * Read configuration.
116 */
117#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
118 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
119 int rc;
120#endif
121
122#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
123 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
124 * Controls whether the custom VBox specific CPUID host call interface is
125 * enabled or not. */
126# ifdef DEBUG_bird
127 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
128# else
129 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
130# endif
131 AssertLogRelRCReturn(rc, rc);
132#endif
133
134#ifdef VBOX_WITH_IEM_RECOMPILER
135 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
136 * Max number of TBs per EMT. */
137 uint32_t cMaxTbs = 0;
138 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
139 AssertLogRelRCReturn(rc, rc);
140 if (cMaxTbs < _16K || cMaxTbs > _8M)
141 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
142 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
143
144 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
145 * Initial (minimum) number of TBs per EMT in ring-3. */
146 uint32_t cInitialTbs = 0;
147 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
148 AssertLogRelRCReturn(rc, rc);
149 if (cInitialTbs < _16K || cInitialTbs > _8M)
150 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
151 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
152
153 /* Check that the two values makes sense together. Expect user/api to do
154 the right thing or get lost. */
155 if (cInitialTbs > cMaxTbs)
156 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
157 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
158 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
159
160 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
161 * Max executable memory for recompiled code per EMT. */
162 uint64_t cbMaxExec = 0;
163 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
164 AssertLogRelRCReturn(rc, rc);
165 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
166 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
167 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
168 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
169
170 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
171 * The executable memory allocator chunk size. */
172 uint32_t cbChunkExec = 0;
173 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
174 AssertLogRelRCReturn(rc, rc);
175 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
176 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
177 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
178 cbChunkExec, cbChunkExec, _1M, _256M);
179
180 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
181 * The initial executable memory allocator size (per EMT). The value is
182 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
183 uint64_t cbInitialExec = 0;
184 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
185 AssertLogRelRCReturn(rc, rc);
186 if (cbInitialExec > cbMaxExec)
187 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
188 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
189 cbInitialExec, cbInitialExec, cbMaxExec);
190
191 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
192 * The translation block use count value to do native recompilation at.
193 * Set to zero to disable native recompilation. */
194 uint32_t uTbNativeRecompileAtUsedCount = 16;
195 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
196 AssertLogRelRCReturn(rc, rc);
197
198#endif /* VBOX_WITH_IEM_RECOMPILER*/
199
200 /*
201 * Initialize per-CPU data and register statistics.
202 */
203#if 1
204 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
205 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
206#else
207 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
208 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
209#endif
210
211 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
212 {
213 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
214 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
215
216 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
217#ifndef VBOX_VMM_TARGET_ARMV8
218 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
219#endif
220 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
221#ifndef VBOX_VMM_TARGET_ARMV8
222 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
223 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
224 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
225 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
226#endif
227
228 /*
229 * Host and guest CPU information.
230 */
231 if (idCpu == 0)
232 {
233 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
234 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
235#if !defined(VBOX_VMM_TARGET_ARMV8)
236 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
237 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
238 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
239# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
240 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
241 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
242 else
243# endif
244 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
245#else
246 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
247 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
248#endif
249
250#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
251 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
252 {
253 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
254 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
255 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
256 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
257 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
258 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
259 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
260 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
261 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
262 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
263 }
264 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
265 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
266 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
267#else
268 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
269 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
270 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
271#endif
272 }
273 else
274 {
275 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
276 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
277 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
278 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
279#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
280 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
281#endif
282 }
283
284 /*
285 * Mark all buffers free.
286 */
287 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
288 while (iMemMap-- > 0)
289 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
290
291#ifdef VBOX_WITH_IEM_RECOMPILER
292 /*
293 * Distribute recompiler configuration.
294 */
295 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
296#endif
297
298#ifdef IEM_WITH_TLB_TRACE
299 /*
300 * Allocate trace buffer.
301 */
302 pVCpu->iem.s.idxTlbTraceEntry = 0;
303 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
304 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
305 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
306 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
307#endif
308 }
309
310
311#ifdef VBOX_WITH_IEM_RECOMPILER
312 /*
313 * Initialize the TB allocator and cache (/ hash table).
314 *
315 * This is done by each EMT to try get more optimal thread/numa locality of
316 * the allocations.
317 */
318 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
319 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
320 AssertLogRelRCReturn(rc, rc);
321#endif
322
323 /*
324 * Register statistics.
325 */
326 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
327 {
328#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
329 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
330 char szPat[128];
331 RT_NOREF_PV(szPat); /* lazy bird */
332 char szVal[128];
333 RT_NOREF_PV(szVal); /* lazy bird */
334
335 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
336 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
337 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
338 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
339 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
340 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
341 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
342 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
343 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
344 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
345 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
346 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
347 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
348 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
349 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
350 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
351 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
352 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
353 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
354 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
355
356 /* Code TLB: */
357 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
358 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
359 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
360 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
361 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
362 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
363 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
364 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
365 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
366 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
367
368 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
369 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
370 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
371 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
372 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
373 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
374
375 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
376 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
377 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
378 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
379 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
380 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
381
382 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
383 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
384 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
385 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
386 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
387 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
388
389 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
390 "Code TLB page invalidation requests", "/IEM/CPU%u/Tlb/Code/InvlPg", idCpu);
391 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
392 "Code TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeGlobal", idCpu);
393 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
394 "Code TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Code/InvlPg/LargeNonGlobal", idCpu);
395
396 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
397 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
398 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
399 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
400 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
401 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
402# ifdef IEM_WITH_TLB_STATISTICS
403 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
404 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
405# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
406 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
407 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
408 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
409 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
410# endif
411
412 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
413 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
414 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
415
416 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
417 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
418 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
419
420 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
421 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
422 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
423 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
424
425# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
426 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
427 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
428 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
429 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
430 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
431 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
432 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
433 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
434 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
435 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
436 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
437 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
438 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
439 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
440 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
441
442 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
443 "Code TLB native misses on new page",
444 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
445 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
446 "Code TLB native misses on new page w/ offset",
447 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
448# endif
449# endif /* IEM_WITH_TLB_STATISTICS */
450
451 /* Data TLB organized as best we can... */
452 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
453 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
454 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
455 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
456 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
457 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
458 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
459 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
460 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
461 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
462
463 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
464 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
465 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
466 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
467 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
468 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
469
470 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
471 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
472 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
473 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
474 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
475 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
476
477 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
478 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
479 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
480 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
481 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
482 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
483
484 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPg, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
485 "Data TLB page invalidation requests", "/IEM/CPU%u/Tlb/Data/InvlPg", idCpu);
486 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
487 "Data TLB page invlpg scanning for global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeGlobal", idCpu);
488 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInvlPgLargeNonGlobal, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
489 "Data TLB page invlpg scanning for non-global large pages", "/IEM/CPU%u/Tlb/Data/InvlPg/LargeNonGlobal", idCpu);
490
491 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
492 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
493 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
494 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
495 "Data TLB global loads",
496 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
497 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
498 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
499 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
500 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
501 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
502 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
503 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
504 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
505 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
506
507 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
508 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
509 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
510 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
511 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
512 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
513 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
514 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
515 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
516 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
517 "Data TLB global loads",
518 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
519
520# ifdef IEM_WITH_TLB_STATISTICS
521# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
522 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
523 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
524 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
525 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
526 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
527 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
528 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
529 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
530 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
531 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
532 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
533 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
534 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
535 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
536 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
537# endif
538# endif
539
540# ifdef IEM_WITH_TLB_STATISTICS
541 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
542 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
543 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
544 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
545 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
546 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
547# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
548 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
549 "Data TLB native stack access hits",
550 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
551 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
552 "Data TLB native data fetch hits",
553 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
554 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
555 "Data TLB native data store hits",
556 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
557 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
558 "Data TLB native mapped data hits",
559 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
560# endif
561 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
562 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
563 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
564
565# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
566 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
567 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
568 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
569# endif
570
571 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
572 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
573 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
574
575 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
576 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
577 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
578 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
579
580# endif /* IEM_WITH_TLB_STATISTICS */
581
582
583#ifdef VBOX_WITH_IEM_RECOMPILER
584 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
585 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
586 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
587 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
588 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
589 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
590# ifdef VBOX_WITH_STATISTICS
591 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
592 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
593 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
594 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
595# endif
596
597 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
598 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
599 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
600 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
601 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
602# ifdef VBOX_WITH_STATISTICS
603 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
604 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
605# endif
606 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
607 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
608 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
609 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
610 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
611 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
612 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
613 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
614 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
615 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
616 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
617 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
618 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
619 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
620 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
621 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
622
623 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
624 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
625 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
626
627 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
628 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
629 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
630 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
631 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
632 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
633 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
634 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
635# ifdef VBOX_WITH_STATISTICS
636 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
637 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
638# endif
639
640 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
641 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
642 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
643 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
644 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
645 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
646
647 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
648 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
649 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
650 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
651 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
652 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
653 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
654 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
655# ifdef VBOX_WITH_STATISTICS
656 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
657 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
658#endif
659
660 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
661 "Number of times the exec memory allocator failed to allocate a large enough buffer",
662 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
663
664 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
665 "Number of threaded calls per TB that have been properly recompiled to native code",
666 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
667 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
668 "Number of threaded calls per TB that could not be recompiler to native code",
669 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
670 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
671 "Number of threaded calls that could not be recompiler to native code",
672 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
673
674 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
675 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
676 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
677 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
678
679# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
680# ifdef VBOX_WITH_STATISTICS
681 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
682 "Number of calls to iemNativeRegAllocFindFree.",
683 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
684# endif
685 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
686 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
687 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
688# ifdef VBOX_WITH_STATISTICS
689 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
690 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
691 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
692 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
693 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
694 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
695 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
696 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
697 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
698
699 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedArithmetic, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
700 "Skipped all status flag updating, arithmetic instructions",
701 "/IEM/CPU%u/re/NativeEFlagsSkippedArithmetic", idCpu);
702 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedLogical, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
703 "Skipped all status flag updating, logical instructions",
704 "/IEM/CPU%u/re/NativeEFlagsSkippedLogical", idCpu);
705
706 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
707 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippable", idCpu);
708 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippable", idCpu);
709 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippable", idCpu);
710 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippable", idCpu);
711 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippable", idCpu);
712
713 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfRequired", idCpu);
714 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfRequired", idCpu);
715 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfRequired", idCpu);
716 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfRequired", idCpu);
717 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfRequired", idCpu);
718 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfRequired", idCpu);
719
720# ifdef IEMLIVENESS_EXTENDED_LAYOUT
721 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfDelayable", idCpu);
722 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfDelayable", idCpu);
723 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfDelayable", idCpu);
724 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfDelayable", idCpu);
725 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfDelayable", idCpu);
726 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfDelayable", idCpu);
727# endif
728
729 /* Sum up all status bits ('_' is a sorting hack). */
730 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fSkippable*", idCpu);
731 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
732 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
733
734 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fRequired*", idCpu);
735 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
736 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusRequired", idCpu);
737
738# ifdef IEMLIVENESS_EXTENDED_LAYOUT
739 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fDelayable*", idCpu);
740 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
741 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
742# endif
743
744 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?f*", idCpu);
745 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
746 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
747
748 /* Ratio of the status bit skippables. */
749 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
750 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
751 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
752 "Total skippable EFLAGS status bit updating percentage",
753 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippablePct", idCpu);
754
755# ifdef IEMLIVENESS_EXTENDED_LAYOUT
756 /* Ratio of the status bit skippables. */
757 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
758 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
759 "Total potentially delayable EFLAGS status bit updating percentage",
760 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayablePct", idCpu);
761# endif
762
763 /* Ratios of individual bits. */
764 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlagsCf*", idCpu) - 3;
765 Assert(szPat[offFlagChar] == 'C');
766 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
767 Assert(szVal[offFlagChar] == 'C');
768 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippablePct", idCpu);
769 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippablePct", idCpu);
770 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippablePct", idCpu);
771 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippablePct", idCpu);
772 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippablePct", idCpu);
773 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippablePct", idCpu);
774
775 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
776 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
777
778# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
779 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
780 "Number of calls to iemNativeSimdRegAllocFindFree.",
781 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
782 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
783 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
784 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
785 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
786 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
787 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
788 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
789 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
790 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
791 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
792 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
793 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
794
795 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
796 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
797 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
798 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
799 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
800 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
801 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
802 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
803
804 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
805 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
806 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
807 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
808 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
809 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
810 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
811 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
812# endif
813
814 /* Ratio of the status bit skippables. */
815 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
816 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
817 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
818 "Delayed RIP updating percentage",
819 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
820
821 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
822 "Number of times the TB finishes execution completely",
823 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
824# endif /* VBOX_WITH_STATISTICS */
825 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
826 "Number of times the TB finished through the ReturnBreak label",
827 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
828 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
829 "Number of times the TB finished through the ReturnBreak label",
830 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
831 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
832 "Number of times the TB finished through the ReturnWithFlags label",
833 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
834 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
835 "Number of times the TB finished with some other status value",
836 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
837 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
838 "Number of times the TB finished via long jump / throw",
839 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
840 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
841 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
842 "Number of times the TB finished through the ObsoleteTb label",
843 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
844 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
845 "Number of times the TB finished through the NeedCsLimChecking label",
846 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
847 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
848 "Number of times the TB finished through the CheckBranchMiss label",
849 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
850 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
851 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
852# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
853# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
854# else
855# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
856# endif
857 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
858 "Number of times the TB finished raising a #DE exception",
859 RAISE_PREFIX "RaiseDe", idCpu);
860 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
861 "Number of times the TB finished raising a #UD exception",
862 RAISE_PREFIX "RaiseUd", idCpu);
863 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
864 "Number of times the TB finished raising a SSE related exception",
865 RAISE_PREFIX "RaiseSseRelated", idCpu);
866 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
867 "Number of times the TB finished raising a AVX related exception",
868 RAISE_PREFIX "RaiseAvxRelated", idCpu);
869 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
870 "Number of times the TB finished raising a SSE/AVX floating point related exception",
871 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
872 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
873 "Number of times the TB finished raising a #NM exception",
874 RAISE_PREFIX "RaiseNm", idCpu);
875 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
876 "Number of times the TB finished raising a #GP(0) exception",
877 RAISE_PREFIX "RaiseGp0", idCpu);
878 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
879 "Number of times the TB finished raising a #MF exception",
880 RAISE_PREFIX "RaiseMf", idCpu);
881 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
882 "Number of times the TB finished raising a #XF exception",
883 RAISE_PREFIX "RaiseXf", idCpu);
884
885 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
886 "Direct linking #1 with IRQ check succeeded",
887 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
888 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
889 "Direct linking #1 w/o IRQ check succeeded",
890 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
891# ifdef VBOX_WITH_STATISTICS
892 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
893 "Direct linking #1 failed: No TB in lookup table",
894 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
895 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
896 "Direct linking #1 failed: GCPhysPc mismatch",
897 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
898 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
899 "Direct linking #1 failed: TB flags mismatch",
900 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
901 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
902 "Direct linking #1 failed: IRQ or FF pending",
903 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
904# endif
905
906 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
907 "Direct linking #2 with IRQ check succeeded",
908 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
909 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
910 "Direct linking #2 w/o IRQ check succeeded",
911 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
912# ifdef VBOX_WITH_STATISTICS
913 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
914 "Direct linking #2 failed: No TB in lookup table",
915 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
916 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
917 "Direct linking #2 failed: GCPhysPc mismatch",
918 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
919 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
920 "Direct linking #2 failed: TB flags mismatch",
921 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
922 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
923 "Direct linking #2 failed: IRQ or FF pending",
924 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
925# endif
926
927 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
928 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
929 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
930 "/IEM/CPU%u/re/NativeTbExit", idCpu);
931
932
933# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
934
935
936# ifdef VBOX_WITH_STATISTICS
937 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
938 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
939 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
940 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
941 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
942 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
943 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
944 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
945# endif
946
947
948#endif /* VBOX_WITH_IEM_RECOMPILER */
949
950 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
951 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
952 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
953 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
954 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
955 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
956
957# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
958 /* Instruction statistics: */
959# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
960 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
961 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
962 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
963 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
964# include "IEMInstructionStatisticsTmpl.h"
965# undef IEM_DO_INSTR_STAT
966# endif
967
968# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
969 /* Threaded function statistics: */
970 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
971 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
972 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
973# endif
974
975#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
976 }
977
978#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
979 /*
980 * Register the per-VM VMX APIC-access page handler type.
981 */
982 if (pVM->cpum.ro.GuestFeatures.fVmx)
983 {
984 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
985 iemVmxApicAccessPageHandler,
986 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
987 AssertLogRelRCReturn(rc, rc);
988 }
989#endif
990
991 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
992 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
993#ifdef IEM_WITH_TLB_TRACE
994 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
995#endif
996#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
997 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
998#endif
999#ifdef VBOX_WITH_DEBUGGER
1000 iemR3RegisterDebuggerCommands();
1001#endif
1002
1003 return VINF_SUCCESS;
1004}
1005
1006
1007VMMR3DECL(int) IEMR3Term(PVM pVM)
1008{
1009 NOREF(pVM);
1010#ifdef IEM_WITH_TLB_TRACE
1011 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
1012 {
1013 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1014 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1015 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1016 }
1017#endif
1018 return VINF_SUCCESS;
1019}
1020
1021
1022VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1023{
1024 RT_NOREF(pVM);
1025}
1026
1027
1028/**
1029 * Gets the name of a generic IEM exit code.
1030 *
1031 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1032 * @param uExit The IEM exit to name.
1033 */
1034VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1035{
1036 static const char * const s_apszNames[] =
1037 {
1038 /* external interrupts */
1039 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1040 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1041 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1042 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1043 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1044 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1045 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1046 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1047 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1048 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1049 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1050 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1051 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1052 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1053 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1054 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1055 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1056 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1057 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1058 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1059 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1060 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1061 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1062 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1063 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1064 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1065 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1066 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1067 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1068 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1069 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1070 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1071 /* software interrups */
1072 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1073 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1074 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1075 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1076 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1077 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1078 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1079 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1080 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1081 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1082 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1083 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1084 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1085 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1086 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1087 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1088 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1089 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1090 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1091 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1092 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1093 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1094 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1095 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1096 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1097 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1098 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1099 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1100 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1101 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1102 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1103 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1104 };
1105 if (uExit < RT_ELEMENTS(s_apszNames))
1106 return s_apszNames[uExit];
1107 return NULL;
1108}
1109
1110
1111/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1112static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1113{
1114 if (*pfHeader)
1115 return;
1116 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1117 *pfHeader = true;
1118}
1119
1120
1121#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1122#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1123
1124/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1125static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1126 uint32_t uSlot, uint32_t fFlags)
1127{
1128#ifndef VBOX_VMM_TARGET_ARMV8
1129 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1130#else
1131 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1132#endif
1133 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1134 return;
1135
1136 /* The address needs to be sign extended, thus the shifting fun here.*/
1137 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1138 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1139 const char *pszValid = "";
1140#ifndef VBOX_VMM_TARGET_ARMV8
1141 char szTmp[128];
1142 if (fFlags & IEMR3INFOTLB_F_CHECK)
1143 {
1144 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1145 PGMPTWALKFAST WalkFast;
1146 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1147 pszValid = szTmp;
1148 if (RT_FAILURE(rc))
1149 switch (rc)
1150 {
1151 case VERR_PAGE_TABLE_NOT_PRESENT:
1152 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1153 {
1154 case 1: pszValid = " stale(page-not-present)"; break;
1155 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1156 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1157 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1158 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1159 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1160 }
1161 break;
1162 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1163 }
1164 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1165 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1166 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1167 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1168 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1169 | fInvSlotG ) )
1170 pszValid = " still-valid";
1171 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1172 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1173 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1174 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1175 {
1176 case X86_PTE_A:
1177 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1178 break;
1179 case X86_PTE_D:
1180 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1181 break;
1182 case X86_PTE_D | X86_PTE_A:
1183 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1184 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1185 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1186 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1187 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1188 break;
1189 default: AssertFailed(); break;
1190 }
1191 else
1192 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1193 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1194 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1195 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1196 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1197 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1198 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1199 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1200 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1201 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1202 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1203 }
1204#else
1205 RT_NOREF(pVCpu);
1206#endif
1207
1208 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1209 uSlot,
1210 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1211 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1212 : "expired",
1213 GCPtr, /* -> */
1214 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1215 /* / */
1216 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1217 /* */
1218 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1219 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1220 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1221 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1222 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1223 !(uSlot & 1) ? "-" : "G",
1224 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1225 /* / */
1226 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1227 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1228 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1229 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1230 /* / */
1231 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1232 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1233 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1234 pszValid);
1235}
1236
1237
1238/** Displays one or more TLB slots. */
1239static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1240 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1241{
1242 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1243 {
1244 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1245 {
1246 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1247 cSlots, RT_ELEMENTS(pTlb->aEntries));
1248 cSlots = RT_ELEMENTS(pTlb->aEntries);
1249 }
1250
1251 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1252 while (cSlots-- > 0)
1253 {
1254 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1255 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1256 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1257 }
1258 }
1259 else
1260 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1261 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1262}
1263
1264
1265/** Displays the TLB slot for the given address. */
1266static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1267 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1268{
1269 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1270
1271 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1272#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1273 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1274#else
1275 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1276#endif
1277 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1278#ifndef VBOX_VMM_TARGET_ARMV8
1279 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1280#endif
1281 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1282 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1283 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1284 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1285
1286#ifndef VBOX_VMM_TARGET_ARMV8
1287 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1288 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1289 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1290 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1291#endif
1292}
1293
1294
1295/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1296static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1297{
1298 /*
1299 * This is entirely argument driven.
1300 */
1301 static RTGETOPTDEF const s_aOptions[] =
1302 {
1303 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1304 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1305 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1306 { "all", 'A', RTGETOPT_REQ_NOTHING },
1307 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1308 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1309 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1310 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1311 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1312 };
1313
1314 RTGETOPTSTATE State;
1315 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1316 AssertRCReturnVoid(rc);
1317
1318 uint32_t cActionArgs = 0;
1319 bool fNeedHeader = true;
1320 bool fAddressMode = true;
1321 uint32_t fFlags = 0;
1322 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1323 PVMCPU pVCpu = pVCpuCall;
1324 if (!pVCpu)
1325 pVCpu = VMMGetCpuById(pVM, 0);
1326
1327 RTGETOPTUNION ValueUnion;
1328 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1329 {
1330 switch (rc)
1331 {
1332 case 'c':
1333 if (ValueUnion.u32 >= pVM->cCpus)
1334 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1335 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1336 {
1337 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1338 fNeedHeader = true;
1339 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1340 {
1341 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1342 ValueUnion.u32, pVCpuCall->idCpu);
1343 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1344 }
1345 }
1346 break;
1347
1348 case 'C':
1349 if (!pVCpuCall)
1350 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1351 else if (pVCpu != pVCpuCall)
1352 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1353 pVCpu->idCpu, pVCpuCall->idCpu);
1354 else
1355 fFlags |= IEMR3INFOTLB_F_CHECK;
1356 break;
1357
1358 case 'a':
1359 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1360 ValueUnion.u64, fFlags, &fNeedHeader);
1361 fAddressMode = true;
1362 cActionArgs++;
1363 break;
1364
1365 case 'A':
1366 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1367 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1368 cActionArgs++;
1369 break;
1370
1371 case 'r':
1372 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1373 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1374 fAddressMode = false;
1375 cActionArgs++;
1376 break;
1377
1378 case 's':
1379 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1380 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1381 fAddressMode = false;
1382 cActionArgs++;
1383 break;
1384
1385 case 'v':
1386 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1387 break;
1388
1389 case VINF_GETOPT_NOT_OPTION:
1390 if (fAddressMode)
1391 {
1392 uint64_t uAddr;
1393 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1394 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1395 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1396 uAddr, fFlags, &fNeedHeader);
1397 else
1398 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1399 }
1400 else
1401 {
1402 uint32_t uSlot;
1403 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1404 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1405 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1406 uSlot, 1, fFlags, &fNeedHeader);
1407 else
1408 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1409 }
1410 cActionArgs++;
1411 break;
1412
1413 case 'h':
1414 pHlp->pfnPrintf(pHlp,
1415 "Usage: info %ctlb [options]\n"
1416 "\n"
1417 "Options:\n"
1418 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1419 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1420 " -C,--check\n"
1421 " Check valid entries against guest PTs.\n"
1422 " -A, --all, all\n"
1423 " Display all the TLB entries (default if no other args).\n"
1424 " -a<virt>, --address=<virt>\n"
1425 " Shows the TLB entry for the specified guest virtual address.\n"
1426 " -r<slot:count>, --range=<slot:count>\n"
1427 " Shows the TLB entries for the specified slot range.\n"
1428 " -s<slot>,--slot=<slot>\n"
1429 " Shows the given TLB slot.\n"
1430 " -v,--only-valid\n"
1431 " Only show valid TLB entries (TAG, not phys)\n"
1432 "\n"
1433 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1434 "defaulting to addresses if not preceeded by any of those options.\n"
1435 , fITlb ? 'i' : 'd');
1436 return;
1437
1438 default:
1439 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1440 return;
1441 }
1442 }
1443
1444 /*
1445 * If no action taken, we display all (-A) by default.
1446 */
1447 if (!cActionArgs)
1448 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1449 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1450}
1451
1452
1453/**
1454 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1455 */
1456static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1457{
1458 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1459}
1460
1461
1462/**
1463 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1464 */
1465static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1466{
1467 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1468}
1469
1470
1471#ifdef IEM_WITH_TLB_TRACE
1472/**
1473 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1474 */
1475static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1476{
1477 /*
1478 * Parse arguments.
1479 */
1480 static RTGETOPTDEF const s_aOptions[] =
1481 {
1482 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1483 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1484 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1485 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1486 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1487 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1488 };
1489
1490 RTGETOPTSTATE State;
1491 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1492 AssertRCReturnVoid(rc);
1493
1494 uint32_t cLimit = UINT32_MAX;
1495 bool fStopAtGlobalFlush = false;
1496 bool fResolveRip = false;
1497 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1498 PVMCPU pVCpu = pVCpuCall;
1499 if (!pVCpu)
1500 pVCpu = VMMGetCpuById(pVM, 0);
1501
1502 RTGETOPTUNION ValueUnion;
1503 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1504 {
1505 switch (rc)
1506 {
1507 case 'c':
1508 if (ValueUnion.u32 >= pVM->cCpus)
1509 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1510 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1511 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1512 break;
1513
1514 case 'l':
1515 cLimit = ValueUnion.u32;
1516 break;
1517
1518 case 'g':
1519 fStopAtGlobalFlush = true;
1520 break;
1521
1522 case 'r':
1523 fResolveRip = true;
1524 break;
1525
1526 case 'h':
1527 pHlp->pfnPrintf(pHlp,
1528 "Usage: info tlbtrace [options] [n]\n"
1529 "\n"
1530 "Options:\n"
1531 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1532 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1533 " [n], -l<n>, --last=<n>\n"
1534 " Limit display to the last N entries. Default: all\n"
1535 " -g, --stop-at-global-flush\n"
1536 " Stop after the first global flush entry.\n"
1537 " -r, --resolve-rip\n"
1538 " Resolve symbols for the flattened RIP addresses.\n"
1539 );
1540 return;
1541
1542 case VINF_GETOPT_NOT_OPTION:
1543 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1544 if (RT_SUCCESS(rc))
1545 break;
1546 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1547 return;
1548
1549 default:
1550 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1551 return;
1552 }
1553 }
1554
1555 /*
1556 * Get the details.
1557 */
1558 AssertReturnVoid(pVCpu);
1559 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1560 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1561 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1562 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1563 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1564 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1565 if (cLeft && paEntries)
1566 {
1567 /*
1568 * Display the entries.
1569 */
1570 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1571 while (cLeft-- > 0)
1572 {
1573 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1574 const char *pszSymbol = "";
1575 union
1576 {
1577 RTDBGSYMBOL Symbol;
1578 char ach[sizeof(RTDBGSYMBOL) + 32];
1579 } uBuf;
1580 if (fResolveRip)
1581 {
1582 RTGCINTPTR offDisp = 0;
1583 DBGFADDRESS Addr;
1584 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1585 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1586 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1587 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1588 &offDisp, &uBuf.Symbol, NULL);
1589 if (RT_SUCCESS(rc))
1590 {
1591 /* Add displacement. */
1592 if (offDisp)
1593 {
1594 size_t const cchName = strlen(uBuf.Symbol.szName);
1595 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1596 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1597 if (offDisp > 0)
1598 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1599 else
1600 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1601 }
1602
1603 /* Put a space before it. */
1604 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1605 char *pszName = uBuf.Symbol.szName;
1606 *--pszName = ' ';
1607 pszSymbol = pszName;
1608 }
1609 }
1610 static const char *s_apszTlbType[2] = { "code", "data" };
1611 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1612 switch (pCur->enmType)
1613 {
1614 case kIemTlbTraceType_InvlPg:
1615 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1616 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1617 break;
1618 case kIemTlbTraceType_EvictSlot:
1619 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1620 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1621 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1622 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1623 pCur->u64Param2, pszSymbol);
1624 break;
1625 case kIemTlbTraceType_LargeEvictSlot:
1626 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1627 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1628 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1629 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1630 pCur->u64Param2, pszSymbol);
1631 break;
1632 case kIemTlbTraceType_LargeScan:
1633 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1634 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1635 break;
1636
1637 case kIemTlbTraceType_Flush:
1638 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1639 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1640 break;
1641 case kIemTlbTraceType_FlushGlobal:
1642 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1643 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1644 if (fStopAtGlobalFlush)
1645 return;
1646 break;
1647 case kIemTlbTraceType_Load:
1648 case kIemTlbTraceType_LoadGlobal:
1649 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1650 idx, pCur->rip,
1651 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1652 pCur->u64Param,
1653 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1654 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1655 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1656 break;
1657
1658 case kIemTlbTraceType_Load_Cr0:
1659 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1660 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1661 break;
1662 case kIemTlbTraceType_Load_Cr3:
1663 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1664 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1665 break;
1666 case kIemTlbTraceType_Load_Cr4:
1667 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1668 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1669 break;
1670 case kIemTlbTraceType_Load_Efer:
1671 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1672 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1673 break;
1674
1675 case kIemTlbTraceType_Irq:
1676 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1677 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1678 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1679 pszSymbol);
1680 break;
1681 case kIemTlbTraceType_Xcpt:
1682 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1683 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1684 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1685 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1686 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1687 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1688 else
1689 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1690 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1691 break;
1692 case kIemTlbTraceType_IRet:
1693 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1694 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1695 break;
1696
1697 case kIemTlbTraceType_Tb_Compile:
1698 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1699 idx, pCur->rip, pCur->u64Param, pszSymbol);
1700 break;
1701 case kIemTlbTraceType_Tb_Exec_Threaded:
1702 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1703 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1704 break;
1705 case kIemTlbTraceType_Tb_Exec_Native:
1706 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1707 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1708 break;
1709
1710 case kIemTlbTraceType_User0:
1711 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1712 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1713 break;
1714 case kIemTlbTraceType_User1:
1715 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1716 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1717 break;
1718 case kIemTlbTraceType_User2:
1719 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1720 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1721 break;
1722 case kIemTlbTraceType_User3:
1723 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1724 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1725 break;
1726
1727 case kIemTlbTraceType_Invalid:
1728 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1729 break;
1730 }
1731 }
1732 }
1733 else
1734 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1735}
1736#endif /* IEM_WITH_TLB_TRACE */
1737
1738#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1739/**
1740 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1741 */
1742static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1743{
1744 /*
1745 * Parse arguments.
1746 */
1747 static RTGETOPTDEF const s_aOptions[] =
1748 {
1749 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1750 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1751 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1752 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1753 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1754 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1755 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1756 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1757 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1758 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1759 };
1760
1761 RTGETOPTSTATE State;
1762 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1763 AssertRCReturnVoid(rc);
1764
1765 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1766 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1767 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1768 RTGCPHYS GCVirt = NIL_RTGCPTR;
1769 uint32_t fFlags = UINT32_MAX;
1770
1771 RTGETOPTUNION ValueUnion;
1772 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1773 {
1774 switch (rc)
1775 {
1776 case 'c':
1777 if (ValueUnion.u32 >= pVM->cCpus)
1778 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1779 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1780 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1781 break;
1782
1783 case 'a':
1784 GCVirt = ValueUnion.u64;
1785 GCPhysPc = NIL_RTGCPHYS;
1786 break;
1787
1788 case 'p':
1789 GCVirt = NIL_RTGCPHYS;
1790 GCPhysPc = ValueUnion.u64;
1791 break;
1792
1793 case 'f':
1794 fFlags = ValueUnion.u32;
1795 break;
1796
1797 case 'h':
1798 pHlp->pfnPrintf(pHlp,
1799 "Usage: info tb [options]\n"
1800 "\n"
1801 "Options:\n"
1802 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1803 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1804 " -a<virt>, --address=<virt>\n"
1805 " Shows the TB for the specified guest virtual address.\n"
1806 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1807 " Shows the TB for the specified guest physical address.\n"
1808 " -f<flags>,--flags=<flags>\n"
1809 " The TB flags value (hex) to use when looking up the TB.\n"
1810 "\n"
1811 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1812 return;
1813
1814 default:
1815 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1816 return;
1817 }
1818 }
1819
1820 /* Currently, only do work on the same EMT. */
1821 if (pVCpu != pVCpuThis)
1822 {
1823 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1824 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1825 return;
1826 }
1827
1828 /*
1829 * Defaults.
1830 */
1831 if (GCPhysPc == NIL_RTGCPHYS)
1832 {
1833 if (GCVirt == NIL_RTGCPTR)
1834 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1835 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1836 if (RT_FAILURE(rc))
1837 {
1838 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1839 return;
1840 }
1841 }
1842 if (fFlags == UINT32_MAX)
1843 {
1844 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1845 fFlags = iemCalcExecFlags(pVCpu);
1846 if (pVM->cCpus == 1)
1847 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1848 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1849 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1850 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1851 fFlags |= IEMTB_F_INHIBIT_NMI;
1852 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
1853 {
1854 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
1855 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
1856 fFlags |= IEMTB_F_CS_LIM_CHECKS;
1857 }
1858 }
1859
1860 /*
1861 * Do the lookup...
1862 *
1863 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
1864 * have much choice since we don't want to increase use counters and
1865 * trigger native recompilation.
1866 */
1867 fFlags &= IEMTB_F_KEY_MASK;
1868 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
1869 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
1870 PCIEMTB pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
1871 while (pTb)
1872 {
1873 if (pTb->GCPhysPc == GCPhysPc)
1874 {
1875 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
1876 {
1877 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
1878 break;
1879 }
1880 }
1881 pTb = pTb->pNext;
1882 }
1883 if (!pTb)
1884 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
1885 else
1886 {
1887 /*
1888 * Disassemble according to type.
1889 */
1890 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
1891 {
1892# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
1893 case IEMTB_F_TYPE_NATIVE:
1894 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - native\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1895 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
1896 break;
1897# endif
1898
1899 case IEMTB_F_TYPE_THREADED:
1900 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - threaded\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1901 iemThreadedDisassembleTb(pTb, pHlp);
1902 break;
1903
1904 default:
1905 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - ??? %#x\n",
1906 GCPhysPc, fFlags, pVCpu->idCpu, pTb, pTb->fFlags);
1907 break;
1908 }
1909 }
1910}
1911#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
1912
1913
1914#ifdef VBOX_WITH_DEBUGGER
1915
1916/** @callback_method_impl{FNDBGCCMD,
1917 * Implements the '.alliem' command. }
1918 */
1919static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
1920{
1921 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
1922 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
1923 if (pVCpu)
1924 {
1925 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
1926 return VINF_SUCCESS;
1927 }
1928 RT_NOREF(paArgs, cArgs);
1929 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
1930}
1931
1932
1933/**
1934 * Called by IEMR3Init to register debugger commands.
1935 */
1936static void iemR3RegisterDebuggerCommands(void)
1937{
1938 /*
1939 * Register debugger commands.
1940 */
1941 static DBGCCMD const s_aCmds[] =
1942 {
1943 {
1944 /* .pszCmd = */ "iemflushtlb",
1945 /* .cArgsMin = */ 0,
1946 /* .cArgsMax = */ 0,
1947 /* .paArgDescs = */ NULL,
1948 /* .cArgDescs = */ 0,
1949 /* .fFlags = */ 0,
1950 /* .pfnHandler = */ iemR3DbgFlushTlbs,
1951 /* .pszSyntax = */ "",
1952 /* .pszDescription = */ "Flushed the code and data TLBs"
1953 },
1954 };
1955
1956 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
1957 AssertLogRelRC(rc);
1958}
1959
1960#endif /* VBOX_WITH_DEBUGGER */
1961
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette