VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/IEMR3.cpp@ 105591

最後變更 在這個檔案從105591是 105591,由 vboxsync 提交於 8 月 前

VMM/IEM: Fixed a bug in iemTlbInvalidateLargePageWorkerInner for TLB sizes less than 512 for 2MB pages, and 1024 for 4MB pages. bugref:10727

  • 屬性 svn:eol-style 設為 native
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 122.5 KB
 
1/* $Id: IEMR3.cpp 105591 2024-08-05 23:05:33Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.alldomusa.eu.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_EM
33#define VMCPU_INCL_CPUM_GST_CTX
34#include <VBox/vmm/iem.h>
35#include <VBox/vmm/cpum.h>
36#include <VBox/vmm/dbgf.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#if defined(VBOX_VMM_TARGET_ARMV8)
40# include "IEMInternal-armv8.h"
41#else
42# include "IEMInternal.h"
43#endif
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/vmapi.h>
46#include <VBox/err.h>
47#ifdef VBOX_WITH_DEBUGGER
48# include <VBox/dbg.h>
49#endif
50
51#include <iprt/assert.h>
52#include <iprt/getopt.h>
53#ifdef IEM_WITH_TLB_TRACE
54# include <iprt/mem.h>
55#endif
56#include <iprt/string.h>
57
58#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
59# include "IEMN8veRecompiler.h"
60# include "IEMThreadedFunctions.h"
61# include "IEMInline.h"
62#endif
63
64
65/*********************************************************************************************************************************
66* Internal Functions *
67*********************************************************************************************************************************/
68static FNDBGFINFOARGVINT iemR3InfoITlb;
69static FNDBGFINFOARGVINT iemR3InfoDTlb;
70#ifdef IEM_WITH_TLB_TRACE
71static FNDBGFINFOARGVINT iemR3InfoTlbTrace;
72#endif
73#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
74static FNDBGFINFOARGVINT iemR3InfoTb;
75#endif
76#ifdef VBOX_WITH_DEBUGGER
77static void iemR3RegisterDebuggerCommands(void);
78#endif
79
80
81#if !defined(VBOX_VMM_TARGET_ARMV8)
82static const char *iemGetTargetCpuName(uint32_t enmTargetCpu)
83{
84 switch (enmTargetCpu)
85 {
86#define CASE_RET_STR(enmValue) case enmValue: return #enmValue + (sizeof("IEMTARGETCPU_") - 1)
87 CASE_RET_STR(IEMTARGETCPU_8086);
88 CASE_RET_STR(IEMTARGETCPU_V20);
89 CASE_RET_STR(IEMTARGETCPU_186);
90 CASE_RET_STR(IEMTARGETCPU_286);
91 CASE_RET_STR(IEMTARGETCPU_386);
92 CASE_RET_STR(IEMTARGETCPU_486);
93 CASE_RET_STR(IEMTARGETCPU_PENTIUM);
94 CASE_RET_STR(IEMTARGETCPU_PPRO);
95 CASE_RET_STR(IEMTARGETCPU_CURRENT);
96#undef CASE_RET_STR
97 default: return "Unknown";
98 }
99}
100#endif
101
102
103/**
104 * Initializes the interpreted execution manager.
105 *
106 * This must be called after CPUM as we're quering information from CPUM about
107 * the guest and host CPUs.
108 *
109 * @returns VBox status code.
110 * @param pVM The cross context VM structure.
111 */
112VMMR3DECL(int) IEMR3Init(PVM pVM)
113{
114 /*
115 * Read configuration.
116 */
117#if (!defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)) || defined(VBOX_WITH_IEM_RECOMPILER)
118 PCFGMNODE const pIem = CFGMR3GetChild(CFGMR3GetRoot(pVM), "IEM");
119 int rc;
120#endif
121
122#if !defined(VBOX_VMM_TARGET_ARMV8) && !defined(VBOX_WITHOUT_CPUID_HOST_CALL)
123 /** @cfgm{/IEM/CpuIdHostCall, boolean, false}
124 * Controls whether the custom VBox specific CPUID host call interface is
125 * enabled or not. */
126# ifdef DEBUG_bird
127 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, true);
128# else
129 rc = CFGMR3QueryBoolDef(pIem, "CpuIdHostCall", &pVM->iem.s.fCpuIdHostCall, false);
130# endif
131 AssertLogRelRCReturn(rc, rc);
132#endif
133
134#ifdef VBOX_WITH_IEM_RECOMPILER
135 /** @cfgm{/IEM/MaxTbCount, uint32_t, 524288}
136 * Max number of TBs per EMT. */
137 uint32_t cMaxTbs = 0;
138 rc = CFGMR3QueryU32Def(pIem, "MaxTbCount", &cMaxTbs, _512K);
139 AssertLogRelRCReturn(rc, rc);
140 if (cMaxTbs < _16K || cMaxTbs > _8M)
141 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
142 "MaxTbCount value %u (%#x) is out of range (min %u, max %u)", cMaxTbs, cMaxTbs, _16K, _8M);
143
144 /** @cfgm{/IEM/InitialTbCount, uint32_t, 32678}
145 * Initial (minimum) number of TBs per EMT in ring-3. */
146 uint32_t cInitialTbs = 0;
147 rc = CFGMR3QueryU32Def(pIem, "InitialTbCount", &cInitialTbs, RT_MIN(cMaxTbs, _32K));
148 AssertLogRelRCReturn(rc, rc);
149 if (cInitialTbs < _16K || cInitialTbs > _8M)
150 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
151 "InitialTbCount value %u (%#x) is out of range (min %u, max %u)", cInitialTbs, cInitialTbs, _16K, _8M);
152
153 /* Check that the two values makes sense together. Expect user/api to do
154 the right thing or get lost. */
155 if (cInitialTbs > cMaxTbs)
156 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
157 "InitialTbCount value %u (%#x) is higher than the MaxTbCount value %u (%#x)",
158 cInitialTbs, cInitialTbs, cMaxTbs, cMaxTbs);
159
160 /** @cfgm{/IEM/MaxExecMem, uint64_t, 512 MiB}
161 * Max executable memory for recompiled code per EMT. */
162 uint64_t cbMaxExec = 0;
163 rc = CFGMR3QueryU64Def(pIem, "MaxExecMem", &cbMaxExec, _512M);
164 AssertLogRelRCReturn(rc, rc);
165 if (cbMaxExec < _1M || cbMaxExec > 16*_1G64)
166 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
167 "MaxExecMem value %'RU64 (%#RX64) is out of range (min %'RU64, max %'RU64)",
168 cbMaxExec, cbMaxExec, (uint64_t)_1M, 16*_1G64);
169
170 /** @cfgm{/IEM/ExecChunkSize, uint32_t, 0 (auto)}
171 * The executable memory allocator chunk size. */
172 uint32_t cbChunkExec = 0;
173 rc = CFGMR3QueryU32Def(pIem, "ExecChunkSize", &cbChunkExec, 0);
174 AssertLogRelRCReturn(rc, rc);
175 if (cbChunkExec != 0 && cbChunkExec != UINT32_MAX && (cbChunkExec < _1M || cbChunkExec > _256M))
176 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
177 "ExecChunkSize value %'RU32 (%#RX32) is out of range (min %'RU32, max %'RU32)",
178 cbChunkExec, cbChunkExec, _1M, _256M);
179
180 /** @cfgm{/IEM/InitialExecMemSize, uint64_t, 1}
181 * The initial executable memory allocator size (per EMT). The value is
182 * rounded up to the nearest chunk size, so 1 byte means one chunk. */
183 uint64_t cbInitialExec = 0;
184 rc = CFGMR3QueryU64Def(pIem, "InitialExecMemSize", &cbInitialExec, 0);
185 AssertLogRelRCReturn(rc, rc);
186 if (cbInitialExec > cbMaxExec)
187 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
188 "InitialExecMemSize value %'RU64 (%#RX64) is out of range (max %'RU64)",
189 cbInitialExec, cbInitialExec, cbMaxExec);
190
191 /** @cfgm{/IEM/NativeRecompileAtUsedCount, uint32_t, 16}
192 * The translation block use count value to do native recompilation at.
193 * Set to zero to disable native recompilation. */
194 uint32_t uTbNativeRecompileAtUsedCount = 16;
195 rc = CFGMR3QueryU32Def(pIem, "NativeRecompileAtUsedCount", &uTbNativeRecompileAtUsedCount, 16);
196 AssertLogRelRCReturn(rc, rc);
197
198#endif /* VBOX_WITH_IEM_RECOMPILER*/
199
200 /*
201 * Initialize per-CPU data and register statistics.
202 */
203#if 1
204 uint64_t const uInitialTlbRevision = UINT64_C(0) - (IEMTLB_REVISION_INCR * 200U);
205 uint64_t const uInitialTlbPhysRev = UINT64_C(0) - (IEMTLB_PHYS_REV_INCR * 100U);
206#else
207 uint64_t const uInitialTlbRevision = UINT64_C(0) + (IEMTLB_REVISION_INCR * 4U);
208 uint64_t const uInitialTlbPhysRev = UINT64_C(0) + (IEMTLB_PHYS_REV_INCR * 4U);
209#endif
210
211 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
212 {
213 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
214 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
215
216 pVCpu->iem.s.CodeTlb.uTlbRevision = pVCpu->iem.s.DataTlb.uTlbRevision = uInitialTlbRevision;
217#ifndef VBOX_VMM_TARGET_ARMV8
218 pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal = pVCpu->iem.s.DataTlb.uTlbRevisionGlobal = uInitialTlbRevision;
219#endif
220 pVCpu->iem.s.CodeTlb.uTlbPhysRev = pVCpu->iem.s.DataTlb.uTlbPhysRev = uInitialTlbPhysRev;
221#ifndef VBOX_VMM_TARGET_ARMV8
222 pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
223 pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
224 pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag = UINT64_MAX;
225 pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag = UINT64_MAX;
226#endif
227
228 /*
229 * Host and guest CPU information.
230 */
231 if (idCpu == 0)
232 {
233 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM);
234 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM);
235#if !defined(VBOX_VMM_TARGET_ARMV8)
236 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL
237 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/
238 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD;
239# if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
240 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor)
241 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
242 else
243# endif
244 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
245#else
246 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE;
247 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0];
248#endif
249
250#if !defined(VBOX_VMM_TARGET_ARMV8) && (IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC)
251 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch)
252 {
253 case kCpumMicroarch_Intel_8086: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_8086; break;
254 case kCpumMicroarch_Intel_80186: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_186; break;
255 case kCpumMicroarch_Intel_80286: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_286; break;
256 case kCpumMicroarch_Intel_80386: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_386; break;
257 case kCpumMicroarch_Intel_80486: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_486; break;
258 case kCpumMicroarch_Intel_P5: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PENTIUM; break;
259 case kCpumMicroarch_Intel_P6: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_PPRO; break;
260 case kCpumMicroarch_NEC_V20: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
261 case kCpumMicroarch_NEC_V30: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_V20; break;
262 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break;
263 }
264 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
265 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
266 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
267#else
268 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n",
269 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch),
270 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1]));
271#endif
272 }
273 else
274 {
275 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor;
276 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor;
277 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0];
278 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1];
279#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
280 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;
281#endif
282 }
283
284 /*
285 * Mark all buffers free.
286 */
287 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
288 while (iMemMap-- > 0)
289 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
290
291#ifdef VBOX_WITH_IEM_RECOMPILER
292 /*
293 * Distribute recompiler configuration.
294 */
295 pVCpu->iem.s.uTbNativeRecompileAtUsedCount = uTbNativeRecompileAtUsedCount;
296#endif
297
298#ifdef IEM_WITH_TLB_TRACE
299 /*
300 * Allocate trace buffer.
301 */
302 pVCpu->iem.s.idxTlbTraceEntry = 0;
303 pVCpu->iem.s.cTlbTraceEntriesShift = 16;
304 pVCpu->iem.s.paTlbTraceEntries = (PIEMTLBTRACEENTRY)RTMemPageAlloc( RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift)
305 * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
306 AssertLogRelReturn(pVCpu->iem.s.paTlbTraceEntries, VERR_NO_PAGE_MEMORY);
307#endif
308 }
309
310
311#ifdef VBOX_WITH_IEM_RECOMPILER
312 /*
313 * Initialize the TB allocator and cache (/ hash table).
314 *
315 * This is done by each EMT to try get more optimal thread/numa locality of
316 * the allocations.
317 */
318 rc = VMR3ReqCallWait(pVM, VMCPUID_ALL, (PFNRT)iemTbInit, 6,
319 pVM, cInitialTbs, cMaxTbs, cbInitialExec, cbMaxExec, cbChunkExec);
320 AssertLogRelRCReturn(rc, rc);
321#endif
322
323 /*
324 * Register statistics.
325 */
326 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
327 {
328#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) /* quick fix for stupid structure duplication non-sense */
329 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
330 char szPat[128];
331 RT_NOREF_PV(szPat); /* lazy bird */
332 char szVal[128];
333 RT_NOREF_PV(szVal); /* lazy bird */
334
335 STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
336 "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu);
337 STAMR3RegisterF(pVM, &pVCpu->iem.s.cLongJumps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
338 "Number of longjmp calls", "/IEM/CPU%u/cLongJumps", idCpu);
339 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
340 "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu);
341 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
342 "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu);
343 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
344 "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu);
345 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
346 "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu);
347 STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
348 "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu);
349 STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
350 "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu);
351 STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
352 "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu);
353 STAMR3RegisterF(pVM, &pVCpu->iem.s.cMisalignedAtomics, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES,
354 "Number of misaligned (for the host) atomic instructions", "/IEM/CPU%u/cMisalignedAtomics", idCpu);
355
356 /* Code TLB: */
357 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
358 "Code TLB non-global revision", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobal", idCpu);
359 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
360 "Code TLB global revision", "/IEM/CPU%u/Tlb/Code/RevisionGlobal", idCpu);
361 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
362 "Code TLB non-global flushes", "/IEM/CPU%u/Tlb/Code/RevisionNonGlobalFlushes", idCpu);
363 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
364 "Code TLB global flushes", "/IEM/CPU%u/Tlb/Code/RevisionGlobalFlushes", idCpu);
365 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
366 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/RevisionRollovers", idCpu);
367
368 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.CodeTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
369 "Code TLB physical revision", "/IEM/CPU%u/Tlb/Code/PhysicalRevision", idCpu);
370 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
371 "Code TLB revision flushes", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionFlushes", idCpu);
372 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
373 "Code TLB revision rollovers", "/IEM/CPU%u/Tlb/Code/PhysicalRevisionRollovers", idCpu);
374
375 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
376 "Code TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageGlobalCurLoads", idCpu);
377 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
378 "Code TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalFirstTag", idCpu);
379 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
380 "Code TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageGlobalLastTag", idCpu);
381
382 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
383 "Code TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalCurLoads", idCpu);
384 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
385 "Code TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalFirstTag", idCpu);
386 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
387 "Code TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Code/LargePageNonGlobalLastTag", idCpu);
388
389 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
390 "Code TLB misses", "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
391 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
392 "Code TLB global loads", "/IEM/CPU%u/Tlb/Code/Misses/GlobalLoads", idCpu);
393 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbSlowCodeReadPath, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
394 "Code TLB slow read path", "/IEM/CPU%u/Tlb/Code/SlowReads", idCpu);
395# ifdef IEM_WITH_TLB_STATISTICS
396 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
397 "Code TLB hits (non-native)", "/IEM/CPU%u/Tlb/Code/Hits/Other", idCpu);
398# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
399 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
400 "Code TLB native hits on new page", "/IEM/CPU%u/Tlb/Code/Hits/New-Page", idCpu);
401 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbHitsForNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
402 "Code TLB native hits on new page /w offset", "/IEM/CPU%u/Tlb/Code/Hits/New-Page-With-Offset", idCpu);
403# endif
404
405 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits/*", idCpu);
406 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB hits",
407 "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
408
409 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Hits|/IEM/CPU%u/Tlb/Code/Misses", idCpu, idCpu);
410 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Code TLB lookups (sum of hits and misses)",
411 "/IEM/CPU%u/Tlb/Code/AllLookups", idCpu);
412
413 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Code/Misses", idCpu);
414 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Code/Hits", idCpu);
415 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
416 "Code TLB actual miss rate", "/IEM/CPU%u/Tlb/Code/RateMisses", idCpu);
417
418# if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
419 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
420 "Code TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
421 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Tag", idCpu);
422 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
423 "Code TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
424 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
425 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
426 "Code TLB misses in native code: Alignment [not directly included grand parent sum]",
427 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/Alignment", idCpu);
428 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
429 "Code TLB misses in native code: Cross page [not directly included grand parent sum]",
430 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/CrossPage", idCpu);
431 STAMR3RegisterF(pVM, &pVCpu->iem.s.CodeTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
432 "Code TLB misses in native code: Non-canonical [not directly included grand parent sum]",
433 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown/NonCanonical", idCpu);
434
435 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
436 "Code TLB native misses on new page",
437 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page", idCpu);
438 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCodeTlbMissesNewPageWithOffset, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
439 "Code TLB native misses on new page w/ offset",
440 "/IEM/CPU%u/Tlb/Code/Misses/NativeBreakdown2/New-Page-With-Offset", idCpu);
441# endif
442# endif /* IEM_WITH_TLB_STATISTICS */
443
444 /* Data TLB organized as best we can... */
445 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevision, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
446 "Data TLB non-global revision", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobal", idCpu);
447 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.uTlbRevisionGlobal, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
448 "Data TLB global revision", "/IEM/CPU%u/Tlb/Data/RevisionGlobal", idCpu);
449 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
450 "Data TLB non-global flushes", "/IEM/CPU%u/Tlb/Data/RevisionNonGlobalFlushes", idCpu);
451 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlsGlobalFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
452 "Data TLB global flushes", "/IEM/CPU%u/Tlb/Data/RevisionGlobalFlushes", idCpu);
453 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbRevisionRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
454 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/RevisionRollovers", idCpu);
455
456 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.DataTlb.uTlbPhysRev, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
457 "Data TLB physical revision", "/IEM/CPU%u/Tlb/Data/PhysicalRevision", idCpu);
458 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevFlushes, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
459 "Data TLB revision flushes", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionFlushes", idCpu);
460 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbPhysRevRollovers, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE,
461 "Data TLB revision rollovers", "/IEM/CPU%u/Tlb/Data/PhysicalRevisionRollovers", idCpu);
462
463 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
464 "Data TLB global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageGlobalCurLoads", idCpu);
465 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
466 "Data TLB global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalFirstTag", idCpu);
467 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.GlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
468 "Data TLB global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageGlobalLastTag", idCpu);
469
470 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNonGlobalLargePageCurLoads, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
471 "Data TLB non-global large page loads since flush", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalCurLoads", idCpu);
472 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uFirstTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
473 "Data TLB non-global large page range: lowest tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalFirstTag", idCpu);
474 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.NonGlobalLargePageRange.uLastTag, STAMTYPE_X64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
475 "Data TLB non-global large page range: last tag", "/IEM/CPU%u/Tlb/Data/LargePageNonGlobalLastTag", idCpu);
476
477 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
478 "Data TLB core misses (iemMemMap, direct iemMemMapJmp (not safe path))",
479 "/IEM/CPU%u/Tlb/Data/Misses/Core", idCpu);
480 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
481 "Data TLB global loads",
482 "/IEM/CPU%u/Tlb/Data/Misses/Core/GlobalLoads", idCpu);
483 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeReadPath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
484 "Data TLB safe read path (inline/native misses going to iemMemMapJmp)",
485 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Reads", idCpu);
486 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeWritePath, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
487 "Data TLB safe write path (inline/native misses going to iemMemMapJmp)",
488 "/IEM/CPU%u/Tlb/Data/Misses/Safe/Writes", idCpu);
489 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/*", idCpu);
490 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB misses",
491 "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
492
493 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Misses/Safe/*", idCpu);
494 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB actual safe path calls (read + write)",
495 "/IEM/CPU%u/Tlb/Data/Misses/Safe", idCpu);
496 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
497 "Data TLB hits in iemMemMapJmp - not part of safe-path total",
498 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartHits", idCpu);
499 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeMisses, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
500 "Data TLB misses in iemMemMapJmp - not part of safe-path total",
501 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses", idCpu);
502 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbSafeGlobalLoads, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
503 "Data TLB global loads",
504 "/IEM/CPU%u/Tlb/Data/Misses/Safe/SubPartMisses/GlobalLoads", idCpu);
505
506# ifdef IEM_WITH_TLB_STATISTICS
507# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
508 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissTag, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
509 "Data TLB misses in native code: Tag mismatch [not directly included grand parent sum]",
510 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Tag", idCpu);
511 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissFlagsAndPhysRev, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
512 "Data TLB misses in native code: Flags or physical revision mistmatch [not directly included grand parent sum]",
513 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/FlagsAndPhysRev", idCpu);
514 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissAlignment, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
515 "Data TLB misses in native code: Alignment [not directly included grand parent sum]",
516 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/Alignment", idCpu);
517 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissCrossPage, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
518 "Data TLB misses in native code: Cross page [not directly included grand parent sum]",
519 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/CrossPage", idCpu);
520 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbNativeMissNonCanonical, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
521 "Data TLB misses in native code: Non-canonical [not directly included grand parent sum]",
522 "/IEM/CPU%u/Tlb/Data/Misses/NativeBreakdown/NonCanonical", idCpu);
523# endif
524# endif
525
526# ifdef IEM_WITH_TLB_STATISTICS
527 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbCoreHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
528 "Data TLB core hits (iemMemMap, direct iemMemMapJmp (not safe path))",
529 "/IEM/CPU%u/Tlb/Data/Hits/Core", idCpu);
530 STAMR3RegisterF(pVM, &pVCpu->iem.s.DataTlb.cTlbInlineCodeHits, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
531 "Data TLB hits in IEMAllMemRWTmplInline.cpp.h",
532 "/IEM/CPU%u/Tlb/Data/Hits/Inline", idCpu);
533# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
534 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStack, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
535 "Data TLB native stack access hits",
536 "/IEM/CPU%u/Tlb/Data/Hits/Native/Stack", idCpu);
537 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForFetch, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
538 "Data TLB native data fetch hits",
539 "/IEM/CPU%u/Tlb/Data/Hits/Native/Fetch", idCpu);
540 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForStore, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
541 "Data TLB native data store hits",
542 "/IEM/CPU%u/Tlb/Data/Hits/Native/Store", idCpu);
543 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeTlbHitsForMapped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
544 "Data TLB native mapped data hits",
545 "/IEM/CPU%u/Tlb/Data/Hits/Native/Mapped", idCpu);
546# endif
547 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/*", idCpu);
548 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits",
549 "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
550
551# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
552 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits/Native/*", idCpu);
553 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB hits from native code",
554 "/IEM/CPU%u/Tlb/Data/Hits/Native", idCpu);
555# endif
556
557 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Hits|/IEM/CPU%u/Tlb/Data/Misses", idCpu, idCpu);
558 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Data TLB lookups (sum of hits and misses)",
559 "/IEM/CPU%u/Tlb/Data/AllLookups", idCpu);
560
561 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/Tlb/Data/Misses", idCpu);
562 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/Tlb/Data/Hits", idCpu);
563 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PPM, szVal, true, szPat,
564 "Data TLB actual miss rate", "/IEM/CPU%u/Tlb/Data/RateMisses", idCpu);
565
566# endif /* IEM_WITH_TLB_STATISTICS */
567
568
569#ifdef VBOX_WITH_IEM_RECOMPILER
570 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecNative, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
571 "Executed native translation block", "/IEM/CPU%u/re/cTbExecNative", idCpu);
572 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.cTbExecThreaded, STAMTYPE_U64_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
573 "Executed threaded translation block", "/IEM/CPU%u/re/cTbExecThreaded", idCpu);
574 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
575 "Times threaded TB execution was interrupted/broken off", "/IEM/CPU%u/re/cTbExecThreadedBreaks", idCpu);
576# ifdef VBOX_WITH_STATISTICS
577 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
578 "Times threaded TB execution was interrupted/broken off on a call with lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithLookup", idCpu);
579 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedExecBreaksWithoutLookup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
580 "Times threaded TB execution was interrupted/broken off on a call without lookup entries", "/IEM/CPU%u/re/cTbExecThreadedBreaksWithoutLookup", idCpu);
581# endif
582
583 PIEMTBALLOCATOR const pTbAllocator = pVCpu->iem.s.pTbAllocatorR3;
584 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatAllocs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
585 "Translation block allocations", "/IEM/CPU%u/re/cTbAllocCalls", idCpu);
586 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatFrees, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS,
587 "Translation block frees", "/IEM/CPU%u/re/cTbFreeCalls", idCpu);
588# ifdef VBOX_WITH_STATISTICS
589 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
590 "Time spent freeing up TBs when full at alloc", "/IEM/CPU%u/re/TbPruningAlloc", idCpu);
591# endif
592 STAMR3RegisterF(pVM, (void *)&pTbAllocator->StatPruneNative, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
593 "Time spent freeing up native TBs when out of executable memory", "/IEM/CPU%u/re/ExecMem/TbPruningNative", idCpu);
594 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cAllocatedChunks, STAMTYPE_U16, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
595 "Populated TB chunks", "/IEM/CPU%u/re/cTbChunks", idCpu);
596 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxChunks, STAMTYPE_U8, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
597 "Max number of TB chunks", "/IEM/CPU%u/re/cTbChunksMax", idCpu);
598 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cTotalTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
599 "Total number of TBs in the allocator", "/IEM/CPU%u/re/cTbTotal", idCpu);
600 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cMaxTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
601 "Max total number of TBs allowed", "/IEM/CPU%u/re/cTbTotalMax", idCpu);
602 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cInUseTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
603 "Number of currently allocated TBs", "/IEM/CPU%u/re/cTbAllocated", idCpu);
604 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cNativeTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
605 "Number of currently allocated native TBs", "/IEM/CPU%u/re/cTbAllocatedNative", idCpu);
606 STAMR3RegisterF(pVM, (void *)&pTbAllocator->cThreadedTbs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
607 "Number of currently allocated threaded TBs", "/IEM/CPU%u/re/cTbAllocatedThreaded", idCpu);
608
609 PIEMTBCACHE const pTbCache = pVCpu->iem.s.pTbCacheR3;
610 STAMR3RegisterF(pVM, (void *)&pTbCache->cHash, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
611 "Translation block lookup table size", "/IEM/CPU%u/re/cTbHashTab", idCpu);
612
613 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHits, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
614 "Translation block lookup hits", "/IEM/CPU%u/re/cTbLookupHits", idCpu);
615 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupHitsViaTbLookupTable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
616 "Translation block lookup hits via TB lookup table associated with the previous TB", "/IEM/CPU%u/re/cTbLookupHitsViaTbLookupTable", idCpu);
617 STAMR3RegisterF(pVM, (void *)&pTbCache->cLookupMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
618 "Translation block lookup misses", "/IEM/CPU%u/re/cTbLookupMisses", idCpu);
619 STAMR3RegisterF(pVM, (void *)&pTbCache->cCollisions, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
620 "Translation block hash table collisions", "/IEM/CPU%u/re/cTbCollisions", idCpu);
621# ifdef VBOX_WITH_STATISTICS
622 STAMR3RegisterF(pVM, (void *)&pTbCache->StatPrune, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
623 "Time spent shortening collision lists", "/IEM/CPU%u/re/TbPruningCollisions", idCpu);
624# endif
625
626 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbThreadedCalls, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
627 "Calls per threaded translation block", "/IEM/CPU%u/re/ThrdCallsPerTb", idCpu);
628 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbInstr, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
629 "Instruction per threaded translation block", "/IEM/CPU%u/re/ThrdInstrPerTb", idCpu);
630 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLookupEntries, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_INSTR_PER_TB,
631 "TB lookup table entries per threaded translation block", "/IEM/CPU%u/re/ThrdLookupEntriesPerTb", idCpu);
632
633 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckIrqBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
634 "TB breaks by CheckIrq", "/IEM/CPU%u/re/CheckIrqBreaks", idCpu);
635 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckModeBreaks, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
636 "TB breaks by CheckMode", "/IEM/CPU%u/re/CheckModeBreaks", idCpu);
637 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
638 "Branch target misses", "/IEM/CPU%u/re/CheckTbJmpMisses", idCpu);
639 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
640 "Needing CS.LIM checking TB after branch or on page crossing", "/IEM/CPU%u/re/CheckTbNeedCsLimChecking", idCpu);
641# ifdef VBOX_WITH_STATISTICS
642 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbLoopInTbDetected, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
643 "Detected loop within TB", "/IEM/CPU%u/re/LoopInTbDetected", idCpu);
644#endif
645
646 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeExecMemInstrBufAllocFailed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
647 "Number of times the exec memory allocator failed to allocate a large enough buffer",
648 "/IEM/CPU%u/re/NativeExecMemInstrBufAllocFailed", idCpu);
649
650 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsRecompiled, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
651 "Number of threaded calls per TB that have been properly recompiled to native code",
652 "/IEM/CPU%u/re/NativeCallsRecompiledPerTb", idCpu);
653 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeCallsThreaded, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS_PER_TB,
654 "Number of threaded calls per TB that could not be recompiler to native code",
655 "/IEM/CPU%u/re/NativeCallsThreadedPerTb", idCpu);
656 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeFullyRecompiledTbs, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
657 "Number of threaded calls that could not be recompiler to native code",
658 "/IEM/CPU%u/re/NativeFullyRecompiledTbs", idCpu);
659
660 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatTbNativeCode, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES_PER_TB,
661 "Size of native code per TB", "/IEM/CPU%u/re/NativeCodeSizePerTb", idCpu);
662 STAMR3RegisterF(pVM, (void *)&pVCpu->iem.s.StatNativeRecompilation, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL,
663 "Profiling iemNativeRecompile()", "/IEM/CPU%u/re/NativeRecompilation", idCpu);
664
665# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
666# ifdef VBOX_WITH_STATISTICS
667 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
668 "Number of calls to iemNativeRegAllocFindFree.",
669 "/IEM/CPU%u/re/NativeRegFindFree", idCpu);
670# endif
671 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
672 "Number of times iemNativeRegAllocFindFree needed to free a variable.",
673 "/IEM/CPU%u/re/NativeRegFindFreeVar", idCpu);
674# ifdef VBOX_WITH_STATISTICS
675 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
676 "Number of times iemNativeRegAllocFindFree did not needed to free any variables.",
677 "/IEM/CPU%u/re/NativeRegFindFreeNoVar", idCpu);
678 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
679 "Times liveness info freeed up shadowed guest registers in iemNativeRegAllocFindFree.",
680 "/IEM/CPU%u/re/NativeRegFindFreeLivenessUnshadowed", idCpu);
681 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
682 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.",
683 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu);
684
685 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedArithmetic, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
686 "Skipped all status flag updating, arithmetic instructions",
687 "/IEM/CPU%u/re/NativeEFlagsSkippedArithmetic", idCpu);
688 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflSkippedLogical, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
689 "Skipped all status flag updating, logical instructions",
690 "/IEM/CPU%u/re/NativeEFlagsSkippedLogical", idCpu);
691
692 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
693 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippable", idCpu);
694 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippable", idCpu);
695 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippable", idCpu);
696 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippable", idCpu);
697 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippable", idCpu);
698
699 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfRequired", idCpu);
700 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfRequired", idCpu);
701 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfRequired", idCpu);
702 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfRequired", idCpu);
703 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfRequired", idCpu);
704 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfRequired, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Required EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfRequired", idCpu);
705
706# ifdef IEMLIVENESS_EXTENDED_LAYOUT
707 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfDelayable", idCpu);
708 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflPfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.PF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsPfDelayable", idCpu);
709 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflAfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.AF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsAfDelayable", idCpu);
710 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflZfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.ZF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsZfDelayable", idCpu);
711 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflSfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.SF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsSfDelayable", idCpu);
712 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflOfDelayable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Maybe delayable EFLAGS.OF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsOfDelayable", idCpu);
713# endif
714
715 /* Sum up all status bits ('_' is a sorting hack). */
716 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fSkippable*", idCpu);
717 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total skippable EFLAGS status bit updating",
718 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
719
720 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fRequired*", idCpu);
721 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total required STATUS status bit updating",
722 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusRequired", idCpu);
723
724# ifdef IEMLIVENESS_EXTENDED_LAYOUT
725 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?fDelayable*", idCpu);
726 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total potentially delayable STATUS status bit updating",
727 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
728# endif
729
730 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags?f*", idCpu);
731 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat, "Total STATUS status bit events of any kind",
732 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
733
734 /* Ratio of the status bit skippables. */
735 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusTotal", idCpu);
736 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippable", idCpu);
737 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
738 "Total skippable EFLAGS status bit updating percentage",
739 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusSkippablePct", idCpu);
740
741# ifdef IEMLIVENESS_EXTENDED_LAYOUT
742 /* Ratio of the status bit skippables. */
743 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayable", idCpu);
744 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
745 "Total potentially delayable EFLAGS status bit updating percentage",
746 "/IEM/CPU%u/re/NativeLivenessEFlags_StatusDelayablePct", idCpu);
747# endif
748
749 /* Ratios of individual bits. */
750 size_t const offFlagChar = RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeLivenessEFlagsCf*", idCpu) - 3;
751 Assert(szPat[offFlagChar] == 'C');
752 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu);
753 Assert(szVal[offFlagChar] == 'C');
754 szPat[offFlagChar] = szVal[offFlagChar] = 'C'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.CF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippablePct", idCpu);
755 szPat[offFlagChar] = szVal[offFlagChar] = 'P'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.PF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsPfSkippablePct", idCpu);
756 szPat[offFlagChar] = szVal[offFlagChar] = 'A'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.AF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsAfSkippablePct", idCpu);
757 szPat[offFlagChar] = szVal[offFlagChar] = 'Z'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.ZF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsZfSkippablePct", idCpu);
758 szPat[offFlagChar] = szVal[offFlagChar] = 'S'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.SF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsSfSkippablePct", idCpu);
759 szPat[offFlagChar] = szVal[offFlagChar] = 'O'; STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, true, szPat, "Skippable EFLAGS.OF updating percentage", "/IEM/CPU%u/re/NativeLivenessEFlagsOfSkippablePct", idCpu);
760
761 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateTotal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Total RIP updates", "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
762 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativePcUpdateDelayed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Delayed RIP updates", "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
763
764# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
765 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFree, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
766 "Number of calls to iemNativeSimdRegAllocFindFree.",
767 "/IEM/CPU%u/re/NativeSimdRegFindFree", idCpu);
768 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
769 "Number of times iemNativeSimdRegAllocFindFree needed to free a variable.",
770 "/IEM/CPU%u/re/NativeSimdRegFindFreeVar", idCpu);
771 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeNoVar, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
772 "Number of times iemNativeSimdRegAllocFindFree did not needed to free any variables.",
773 "/IEM/CPU%u/re/NativeSimdRegFindFreeNoVar", idCpu);
774 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessUnshadowed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
775 "Times liveness info freeed up shadowed guest registers in iemNativeSimdRegAllocFindFree.",
776 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessUnshadowed", idCpu);
777 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeSimdRegFindFreeLivenessHelped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
778 "Times liveness info helped finding the return register in iemNativeSimdRegAllocFindFree.",
779 "/IEM/CPU%u/re/NativeSimdRegFindFreeLivenessHelped", idCpu);
780
781 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks",
782 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckPotential", idCpu);
783 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks",
784 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckPotential", idCpu);
785 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks",
786 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckPotential", idCpu);
787 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckPotential, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks",
788 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckPotential", idCpu);
789
790 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted",
791 "/IEM/CPU%u/re/NativeMaybeDeviceNotAvailXcptCheckOmitted", idCpu);
792 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted",
793 "/IEM/CPU%u/re/NativeMaybeWaitDeviceNotAvailXcptCheckOmitted", idCpu);
794 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeSseXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted",
795 "/IEM/CPU%u/re/NativeMaybeSseXcptCheckOmitted", idCpu);
796 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeMaybeAvxXcptCheckOmitted, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted",
797 "/IEM/CPU%u/re/NativeMaybeAvxXcptCheckOmitted", idCpu);
798# endif
799
800 /* Ratio of the status bit skippables. */
801 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativePcUpdateTotal", idCpu);
802 RTStrPrintf(szVal, sizeof(szVal), "/IEM/CPU%u/re/NativePcUpdateDelayed", idCpu);
803 STAMR3RegisterPctOfSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, szVal, false, szPat,
804 "Delayed RIP updating percentage",
805 "/IEM/CPU%u/re/NativePcUpdateDelayed_StatusDelayedPct", idCpu);
806
807 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbFinished, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
808 "Number of times the TB finishes execution completely",
809 "/IEM/CPU%u/re/NativeTbFinished", idCpu);
810# endif /* VBOX_WITH_STATISTICS */
811 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreak, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
812 "Number of times the TB finished through the ReturnBreak label",
813 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak", idCpu);
814 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnBreakFF, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
815 "Number of times the TB finished through the ReturnBreak label",
816 "/IEM/CPU%u/re/NativeTbExit/ReturnBreakFF", idCpu);
817 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnWithFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
818 "Number of times the TB finished through the ReturnWithFlags label",
819 "/IEM/CPU%u/re/NativeTbExit/ReturnWithFlags", idCpu);
820 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitReturnOtherStatus, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
821 "Number of times the TB finished with some other status value",
822 "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus", idCpu);
823 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitLongJump, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
824 "Number of times the TB finished via long jump / throw",
825 "/IEM/CPU%u/re/NativeTbExit/LongJumps", idCpu);
826 /* These end up returning VINF_IEM_REEXEC_BREAK and are thus already counted under NativeTbExit/ReturnBreak: */
827 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitObsoleteTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
828 "Number of times the TB finished through the ObsoleteTb label",
829 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/ObsoleteTb", idCpu);
830 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckNeedCsLimChecking, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
831 "Number of times the TB finished through the NeedCsLimChecking label",
832 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/NeedCsLimChecking", idCpu);
833 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatCheckBranchMisses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
834 "Number of times the TB finished through the CheckBranchMiss label",
835 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/CheckBranchMiss", idCpu);
836 /* Raising stuff will either increment NativeTbExit/LongJumps or NativeTbExit/ReturnOtherStatus
837 depending on whether VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is defined: */
838# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
839# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/ReturnOtherStatus/"
840# else
841# define RAISE_PREFIX "/IEM/CPU%u/re/NativeTbExit/LongJumps/"
842# endif
843 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseDe, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
844 "Number of times the TB finished raising a #DE exception",
845 RAISE_PREFIX "RaiseDe", idCpu);
846 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseUd, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
847 "Number of times the TB finished raising a #UD exception",
848 RAISE_PREFIX "RaiseUd", idCpu);
849 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
850 "Number of times the TB finished raising a SSE related exception",
851 RAISE_PREFIX "RaiseSseRelated", idCpu);
852 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseAvxRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
853 "Number of times the TB finished raising a AVX related exception",
854 RAISE_PREFIX "RaiseAvxRelated", idCpu);
855 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseSseAvxFpRelated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
856 "Number of times the TB finished raising a SSE/AVX floating point related exception",
857 RAISE_PREFIX "RaiseSseAvxFpRelated", idCpu);
858 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseNm, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
859 "Number of times the TB finished raising a #NM exception",
860 RAISE_PREFIX "RaiseNm", idCpu);
861 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseGp0, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
862 "Number of times the TB finished raising a #GP(0) exception",
863 RAISE_PREFIX "RaiseGp0", idCpu);
864 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseMf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
865 "Number of times the TB finished raising a #MF exception",
866 RAISE_PREFIX "RaiseMf", idCpu);
867 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitRaiseXf, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
868 "Number of times the TB finished raising a #XF exception",
869 RAISE_PREFIX "RaiseXf", idCpu);
870
871 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
872 "Direct linking #1 with IRQ check succeeded",
873 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1Irq", idCpu);
874 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
875 "Direct linking #1 w/o IRQ check succeeded",
876 "/IEM/CPU%u/re/NativeTbExit/DirectLinking1NoIrq", idCpu);
877# ifdef VBOX_WITH_STATISTICS
878 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
879 "Direct linking #1 failed: No TB in lookup table",
880 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1NoTb", idCpu);
881 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
882 "Direct linking #1 failed: GCPhysPc mismatch",
883 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchGCPhysPc", idCpu);
884 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
885 "Direct linking #1 failed: TB flags mismatch",
886 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1MismatchFlags", idCpu);
887 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking1PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
888 "Direct linking #1 failed: IRQ or FF pending",
889 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking1PendingIrq", idCpu);
890# endif
891
892 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2Irq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
893 "Direct linking #2 with IRQ check succeeded",
894 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2Irq", idCpu);
895 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
896 "Direct linking #2 w/o IRQ check succeeded",
897 "/IEM/CPU%u/re/NativeTbExit/DirectLinking2NoIrq", idCpu);
898# ifdef VBOX_WITH_STATISTICS
899 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2NoTb, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
900 "Direct linking #2 failed: No TB in lookup table",
901 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2NoTb", idCpu);
902 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchGCPhysPc, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
903 "Direct linking #2 failed: GCPhysPc mismatch",
904 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchGCPhysPc", idCpu);
905 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2MismatchFlags, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
906 "Direct linking #2 failed: TB flags mismatch",
907 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2MismatchFlags", idCpu);
908 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeTbExitDirectLinking2PendingIrq, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
909 "Direct linking #2 failed: IRQ or FF pending",
910 "/IEM/CPU%u/re/NativeTbExit/ReturnBreak/DirectLinking2PendingIrq", idCpu);
911# endif
912
913 RTStrPrintf(szPat, sizeof(szPat), "/IEM/CPU%u/re/NativeTbExit/*", idCpu); /* only immediate children, no sub folders */
914 STAMR3RegisterSum(pVM->pUVM, STAMVISIBILITY_ALWAYS, szPat,
915 "Number of times native TB execution finished before the end (not counting thrown memory++ exceptions)",
916 "/IEM/CPU%u/re/NativeTbExit", idCpu);
917
918
919# endif /* VBOX_WITH_IEM_NATIVE_RECOMPILER */
920
921
922# ifdef VBOX_WITH_STATISTICS
923 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
924 "iemMemMapJmp calls", "/IEM/CPU%u/iemMemMapJmp", idCpu);
925 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemMapNoJmp, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
926 "iemMemMap calls", "/IEM/CPU%u/iemMemMapNoJmp", idCpu);
927 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferCrossPage, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
928 "iemMemBounceBufferMapCrossPage calls", "/IEM/CPU%u/iemMemMapBounceBufferCrossPage", idCpu);
929 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatMemBounceBufferMapPhys, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
930 "iemMemBounceBufferMapPhys calls", "/IEM/CPU%u/iemMemMapBounceBufferMapPhys", idCpu);
931# endif
932
933
934#endif /* VBOX_WITH_IEM_RECOMPILER */
935
936 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatXcpts); i++)
937 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatXcpts[i], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
938 "", "/IEM/CPU%u/Exceptions/%02x", idCpu, i);
939 for (uint32_t i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aStatInts); i++)
940 STAMR3RegisterF(pVM, &pVCpu->iem.s.aStatInts[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
941 "", "/IEM/CPU%u/Interrupts/%02x", idCpu, i);
942
943# if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING)
944 /* Instruction statistics: */
945# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) \
946 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsRZ.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
947 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-RZ/" #a_Name, idCpu); \
948 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatsR3.a_Name, STAMTYPE_U32_RESET, STAMVISIBILITY_USED, \
949 STAMUNIT_COUNT, a_szDesc, "/IEM/CPU%u/instr-R3/" #a_Name, idCpu);
950# include "IEMInstructionStatisticsTmpl.h"
951# undef IEM_DO_INSTR_STAT
952# endif
953
954# if defined(VBOX_WITH_STATISTICS) && defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
955 /* Threaded function statistics: */
956 for (unsigned i = 1; i < (unsigned)kIemThreadedFunc_End; i++)
957 STAMR3RegisterF(pVM, &pVCpu->iem.s.acThreadedFuncStats[i], STAMTYPE_U32_RESET, STAMVISIBILITY_USED,
958 STAMUNIT_COUNT, NULL, "/IEM/CPU%u/ThrdFuncs/%s", idCpu, g_apszIemThreadedFunctionStats[i]);
959# endif
960
961#endif /* !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX) - quick fix for stupid structure duplication non-sense */
962 }
963
964#if !defined(VBOX_VMM_TARGET_ARMV8) && defined(VBOX_WITH_NESTED_HWVIRT_VMX)
965 /*
966 * Register the per-VM VMX APIC-access page handler type.
967 */
968 if (pVM->cpum.ro.GuestFeatures.fVmx)
969 {
970 rc = PGMR3HandlerPhysicalTypeRegister(pVM, PGMPHYSHANDLERKIND_ALL, PGMPHYSHANDLER_F_NOT_IN_HM,
971 iemVmxApicAccessPageHandler,
972 "VMX APIC-access page", &pVM->iem.s.hVmxApicAccessPage);
973 AssertLogRelRCReturn(rc, rc);
974 }
975#endif
976
977 DBGFR3InfoRegisterInternalArgv(pVM, "itlb", "IEM instruction TLB", iemR3InfoITlb, DBGFINFO_FLAGS_RUN_ON_EMT);
978 DBGFR3InfoRegisterInternalArgv(pVM, "dtlb", "IEM instruction TLB", iemR3InfoDTlb, DBGFINFO_FLAGS_RUN_ON_EMT);
979#ifdef IEM_WITH_TLB_TRACE
980 DBGFR3InfoRegisterInternalArgv(pVM, "tlbtrace", "IEM TLB trace log", iemR3InfoTlbTrace, DBGFINFO_FLAGS_RUN_ON_EMT);
981#endif
982#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
983 DBGFR3InfoRegisterInternalArgv(pVM, "tb", "IEM translation block", iemR3InfoTb, DBGFINFO_FLAGS_RUN_ON_EMT);
984#endif
985#ifdef VBOX_WITH_DEBUGGER
986 iemR3RegisterDebuggerCommands();
987#endif
988
989 return VINF_SUCCESS;
990}
991
992
993VMMR3DECL(int) IEMR3Term(PVM pVM)
994{
995 NOREF(pVM);
996#ifdef IEM_WITH_TLB_TRACE
997 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
998 {
999 PVMCPU const pVCpu = pVM->apCpusR3[idCpu];
1000 RTMemPageFree(pVCpu->iem.s.paTlbTraceEntries,
1001 RT_BIT_Z(pVCpu->iem.s.cTlbTraceEntriesShift) * sizeof(*pVCpu->iem.s.paTlbTraceEntries));
1002 }
1003#endif
1004 return VINF_SUCCESS;
1005}
1006
1007
1008VMMR3DECL(void) IEMR3Relocate(PVM pVM)
1009{
1010 RT_NOREF(pVM);
1011}
1012
1013
1014/**
1015 * Gets the name of a generic IEM exit code.
1016 *
1017 * @returns Pointer to read only string if @a uExit is known, otherwise NULL.
1018 * @param uExit The IEM exit to name.
1019 */
1020VMMR3DECL(const char *) IEMR3GetExitName(uint32_t uExit)
1021{
1022 static const char * const s_apszNames[] =
1023 {
1024 /* external interrupts */
1025 "ExtInt 00h", "ExtInt 01h", "ExtInt 02h", "ExtInt 03h", "ExtInt 04h", "ExtInt 05h", "ExtInt 06h", "ExtInt 07h",
1026 "ExtInt 08h", "ExtInt 09h", "ExtInt 0ah", "ExtInt 0bh", "ExtInt 0ch", "ExtInt 0dh", "ExtInt 0eh", "ExtInt 0fh",
1027 "ExtInt 10h", "ExtInt 11h", "ExtInt 12h", "ExtInt 13h", "ExtInt 14h", "ExtInt 15h", "ExtInt 16h", "ExtInt 17h",
1028 "ExtInt 18h", "ExtInt 19h", "ExtInt 1ah", "ExtInt 1bh", "ExtInt 1ch", "ExtInt 1dh", "ExtInt 1eh", "ExtInt 1fh",
1029 "ExtInt 20h", "ExtInt 21h", "ExtInt 22h", "ExtInt 23h", "ExtInt 24h", "ExtInt 25h", "ExtInt 26h", "ExtInt 27h",
1030 "ExtInt 28h", "ExtInt 29h", "ExtInt 2ah", "ExtInt 2bh", "ExtInt 2ch", "ExtInt 2dh", "ExtInt 2eh", "ExtInt 2fh",
1031 "ExtInt 30h", "ExtInt 31h", "ExtInt 32h", "ExtInt 33h", "ExtInt 34h", "ExtInt 35h", "ExtInt 36h", "ExtInt 37h",
1032 "ExtInt 38h", "ExtInt 39h", "ExtInt 3ah", "ExtInt 3bh", "ExtInt 3ch", "ExtInt 3dh", "ExtInt 3eh", "ExtInt 3fh",
1033 "ExtInt 40h", "ExtInt 41h", "ExtInt 42h", "ExtInt 43h", "ExtInt 44h", "ExtInt 45h", "ExtInt 46h", "ExtInt 47h",
1034 "ExtInt 48h", "ExtInt 49h", "ExtInt 4ah", "ExtInt 4bh", "ExtInt 4ch", "ExtInt 4dh", "ExtInt 4eh", "ExtInt 4fh",
1035 "ExtInt 50h", "ExtInt 51h", "ExtInt 52h", "ExtInt 53h", "ExtInt 54h", "ExtInt 55h", "ExtInt 56h", "ExtInt 57h",
1036 "ExtInt 58h", "ExtInt 59h", "ExtInt 5ah", "ExtInt 5bh", "ExtInt 5ch", "ExtInt 5dh", "ExtInt 5eh", "ExtInt 5fh",
1037 "ExtInt 60h", "ExtInt 61h", "ExtInt 62h", "ExtInt 63h", "ExtInt 64h", "ExtInt 65h", "ExtInt 66h", "ExtInt 67h",
1038 "ExtInt 68h", "ExtInt 69h", "ExtInt 6ah", "ExtInt 6bh", "ExtInt 6ch", "ExtInt 6dh", "ExtInt 6eh", "ExtInt 6fh",
1039 "ExtInt 70h", "ExtInt 71h", "ExtInt 72h", "ExtInt 73h", "ExtInt 74h", "ExtInt 75h", "ExtInt 76h", "ExtInt 77h",
1040 "ExtInt 78h", "ExtInt 79h", "ExtInt 7ah", "ExtInt 7bh", "ExtInt 7ch", "ExtInt 7dh", "ExtInt 7eh", "ExtInt 7fh",
1041 "ExtInt 80h", "ExtInt 81h", "ExtInt 82h", "ExtInt 83h", "ExtInt 84h", "ExtInt 85h", "ExtInt 86h", "ExtInt 87h",
1042 "ExtInt 88h", "ExtInt 89h", "ExtInt 8ah", "ExtInt 8bh", "ExtInt 8ch", "ExtInt 8dh", "ExtInt 8eh", "ExtInt 8fh",
1043 "ExtInt 90h", "ExtInt 91h", "ExtInt 92h", "ExtInt 93h", "ExtInt 94h", "ExtInt 95h", "ExtInt 96h", "ExtInt 97h",
1044 "ExtInt 98h", "ExtInt 99h", "ExtInt 9ah", "ExtInt 9bh", "ExtInt 9ch", "ExtInt 9dh", "ExtInt 9eh", "ExtInt 9fh",
1045 "ExtInt a0h", "ExtInt a1h", "ExtInt a2h", "ExtInt a3h", "ExtInt a4h", "ExtInt a5h", "ExtInt a6h", "ExtInt a7h",
1046 "ExtInt a8h", "ExtInt a9h", "ExtInt aah", "ExtInt abh", "ExtInt ach", "ExtInt adh", "ExtInt aeh", "ExtInt afh",
1047 "ExtInt b0h", "ExtInt b1h", "ExtInt b2h", "ExtInt b3h", "ExtInt b4h", "ExtInt b5h", "ExtInt b6h", "ExtInt b7h",
1048 "ExtInt b8h", "ExtInt b9h", "ExtInt bah", "ExtInt bbh", "ExtInt bch", "ExtInt bdh", "ExtInt beh", "ExtInt bfh",
1049 "ExtInt c0h", "ExtInt c1h", "ExtInt c2h", "ExtInt c3h", "ExtInt c4h", "ExtInt c5h", "ExtInt c6h", "ExtInt c7h",
1050 "ExtInt c8h", "ExtInt c9h", "ExtInt cah", "ExtInt cbh", "ExtInt cch", "ExtInt cdh", "ExtInt ceh", "ExtInt cfh",
1051 "ExtInt d0h", "ExtInt d1h", "ExtInt d2h", "ExtInt d3h", "ExtInt d4h", "ExtInt d5h", "ExtInt d6h", "ExtInt d7h",
1052 "ExtInt d8h", "ExtInt d9h", "ExtInt dah", "ExtInt dbh", "ExtInt dch", "ExtInt ddh", "ExtInt deh", "ExtInt dfh",
1053 "ExtInt e0h", "ExtInt e1h", "ExtInt e2h", "ExtInt e3h", "ExtInt e4h", "ExtInt e5h", "ExtInt e6h", "ExtInt e7h",
1054 "ExtInt e8h", "ExtInt e9h", "ExtInt eah", "ExtInt ebh", "ExtInt ech", "ExtInt edh", "ExtInt eeh", "ExtInt efh",
1055 "ExtInt f0h", "ExtInt f1h", "ExtInt f2h", "ExtInt f3h", "ExtInt f4h", "ExtInt f5h", "ExtInt f6h", "ExtInt f7h",
1056 "ExtInt f8h", "ExtInt f9h", "ExtInt fah", "ExtInt fbh", "ExtInt fch", "ExtInt fdh", "ExtInt feh", "ExtInt ffh",
1057 /* software interrups */
1058 "SoftInt 00h", "SoftInt 01h", "SoftInt 02h", "SoftInt 03h", "SoftInt 04h", "SoftInt 05h", "SoftInt 06h", "SoftInt 07h",
1059 "SoftInt 08h", "SoftInt 09h", "SoftInt 0ah", "SoftInt 0bh", "SoftInt 0ch", "SoftInt 0dh", "SoftInt 0eh", "SoftInt 0fh",
1060 "SoftInt 10h", "SoftInt 11h", "SoftInt 12h", "SoftInt 13h", "SoftInt 14h", "SoftInt 15h", "SoftInt 16h", "SoftInt 17h",
1061 "SoftInt 18h", "SoftInt 19h", "SoftInt 1ah", "SoftInt 1bh", "SoftInt 1ch", "SoftInt 1dh", "SoftInt 1eh", "SoftInt 1fh",
1062 "SoftInt 20h", "SoftInt 21h", "SoftInt 22h", "SoftInt 23h", "SoftInt 24h", "SoftInt 25h", "SoftInt 26h", "SoftInt 27h",
1063 "SoftInt 28h", "SoftInt 29h", "SoftInt 2ah", "SoftInt 2bh", "SoftInt 2ch", "SoftInt 2dh", "SoftInt 2eh", "SoftInt 2fh",
1064 "SoftInt 30h", "SoftInt 31h", "SoftInt 32h", "SoftInt 33h", "SoftInt 34h", "SoftInt 35h", "SoftInt 36h", "SoftInt 37h",
1065 "SoftInt 38h", "SoftInt 39h", "SoftInt 3ah", "SoftInt 3bh", "SoftInt 3ch", "SoftInt 3dh", "SoftInt 3eh", "SoftInt 3fh",
1066 "SoftInt 40h", "SoftInt 41h", "SoftInt 42h", "SoftInt 43h", "SoftInt 44h", "SoftInt 45h", "SoftInt 46h", "SoftInt 47h",
1067 "SoftInt 48h", "SoftInt 49h", "SoftInt 4ah", "SoftInt 4bh", "SoftInt 4ch", "SoftInt 4dh", "SoftInt 4eh", "SoftInt 4fh",
1068 "SoftInt 50h", "SoftInt 51h", "SoftInt 52h", "SoftInt 53h", "SoftInt 54h", "SoftInt 55h", "SoftInt 56h", "SoftInt 57h",
1069 "SoftInt 58h", "SoftInt 59h", "SoftInt 5ah", "SoftInt 5bh", "SoftInt 5ch", "SoftInt 5dh", "SoftInt 5eh", "SoftInt 5fh",
1070 "SoftInt 60h", "SoftInt 61h", "SoftInt 62h", "SoftInt 63h", "SoftInt 64h", "SoftInt 65h", "SoftInt 66h", "SoftInt 67h",
1071 "SoftInt 68h", "SoftInt 69h", "SoftInt 6ah", "SoftInt 6bh", "SoftInt 6ch", "SoftInt 6dh", "SoftInt 6eh", "SoftInt 6fh",
1072 "SoftInt 70h", "SoftInt 71h", "SoftInt 72h", "SoftInt 73h", "SoftInt 74h", "SoftInt 75h", "SoftInt 76h", "SoftInt 77h",
1073 "SoftInt 78h", "SoftInt 79h", "SoftInt 7ah", "SoftInt 7bh", "SoftInt 7ch", "SoftInt 7dh", "SoftInt 7eh", "SoftInt 7fh",
1074 "SoftInt 80h", "SoftInt 81h", "SoftInt 82h", "SoftInt 83h", "SoftInt 84h", "SoftInt 85h", "SoftInt 86h", "SoftInt 87h",
1075 "SoftInt 88h", "SoftInt 89h", "SoftInt 8ah", "SoftInt 8bh", "SoftInt 8ch", "SoftInt 8dh", "SoftInt 8eh", "SoftInt 8fh",
1076 "SoftInt 90h", "SoftInt 91h", "SoftInt 92h", "SoftInt 93h", "SoftInt 94h", "SoftInt 95h", "SoftInt 96h", "SoftInt 97h",
1077 "SoftInt 98h", "SoftInt 99h", "SoftInt 9ah", "SoftInt 9bh", "SoftInt 9ch", "SoftInt 9dh", "SoftInt 9eh", "SoftInt 9fh",
1078 "SoftInt a0h", "SoftInt a1h", "SoftInt a2h", "SoftInt a3h", "SoftInt a4h", "SoftInt a5h", "SoftInt a6h", "SoftInt a7h",
1079 "SoftInt a8h", "SoftInt a9h", "SoftInt aah", "SoftInt abh", "SoftInt ach", "SoftInt adh", "SoftInt aeh", "SoftInt afh",
1080 "SoftInt b0h", "SoftInt b1h", "SoftInt b2h", "SoftInt b3h", "SoftInt b4h", "SoftInt b5h", "SoftInt b6h", "SoftInt b7h",
1081 "SoftInt b8h", "SoftInt b9h", "SoftInt bah", "SoftInt bbh", "SoftInt bch", "SoftInt bdh", "SoftInt beh", "SoftInt bfh",
1082 "SoftInt c0h", "SoftInt c1h", "SoftInt c2h", "SoftInt c3h", "SoftInt c4h", "SoftInt c5h", "SoftInt c6h", "SoftInt c7h",
1083 "SoftInt c8h", "SoftInt c9h", "SoftInt cah", "SoftInt cbh", "SoftInt cch", "SoftInt cdh", "SoftInt ceh", "SoftInt cfh",
1084 "SoftInt d0h", "SoftInt d1h", "SoftInt d2h", "SoftInt d3h", "SoftInt d4h", "SoftInt d5h", "SoftInt d6h", "SoftInt d7h",
1085 "SoftInt d8h", "SoftInt d9h", "SoftInt dah", "SoftInt dbh", "SoftInt dch", "SoftInt ddh", "SoftInt deh", "SoftInt dfh",
1086 "SoftInt e0h", "SoftInt e1h", "SoftInt e2h", "SoftInt e3h", "SoftInt e4h", "SoftInt e5h", "SoftInt e6h", "SoftInt e7h",
1087 "SoftInt e8h", "SoftInt e9h", "SoftInt eah", "SoftInt ebh", "SoftInt ech", "SoftInt edh", "SoftInt eeh", "SoftInt efh",
1088 "SoftInt f0h", "SoftInt f1h", "SoftInt f2h", "SoftInt f3h", "SoftInt f4h", "SoftInt f5h", "SoftInt f6h", "SoftInt f7h",
1089 "SoftInt f8h", "SoftInt f9h", "SoftInt fah", "SoftInt fbh", "SoftInt fch", "SoftInt fdh", "SoftInt feh", "SoftInt ffh",
1090 };
1091 if (uExit < RT_ELEMENTS(s_apszNames))
1092 return s_apszNames[uExit];
1093 return NULL;
1094}
1095
1096
1097/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1098static void iemR3InfoTlbPrintHeader(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, bool *pfHeader)
1099{
1100 if (*pfHeader)
1101 return;
1102 pHlp->pfnPrintf(pHlp, "%cTLB for CPU %u:\n", &pVCpu->iem.s.CodeTlb == pTlb ? 'I' : 'D', pVCpu->idCpu);
1103 *pfHeader = true;
1104}
1105
1106
1107#define IEMR3INFOTLB_F_ONLY_VALID RT_BIT_32(0)
1108#define IEMR3INFOTLB_F_CHECK RT_BIT_32(1)
1109
1110/** Worker for iemR3InfoTlbPrintSlots and iemR3InfoTlbPrintAddress. */
1111static void iemR3InfoTlbPrintSlot(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb, IEMTLBENTRY const *pTlbe,
1112 uint32_t uSlot, uint32_t fFlags)
1113{
1114#ifndef VBOX_VMM_TARGET_ARMV8
1115 uint64_t const uTlbRevision = !(uSlot & 1) ? pTlb->uTlbRevision : pTlb->uTlbRevisionGlobal;
1116#else
1117 uint64_t const uTlbRevision = pTlb->uTlbRevision;
1118#endif
1119 if ((fFlags & IEMR3INFOTLB_F_ONLY_VALID) && (pTlbe->uTag & IEMTLB_REVISION_MASK) != uTlbRevision)
1120 return;
1121
1122 /* The address needs to be sign extended, thus the shifting fun here.*/
1123 RTGCPTR const GCPtr = (RTGCINTPTR)((pTlbe->uTag & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1124 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT);
1125 const char *pszValid = "";
1126#ifndef VBOX_VMM_TARGET_ARMV8
1127 char szTmp[128];
1128 if (fFlags & IEMR3INFOTLB_F_CHECK)
1129 {
1130 uint32_t const fInvSlotG = (uint32_t)!(uSlot & 1) << X86_PTE_BIT_G;
1131 PGMPTWALKFAST WalkFast;
1132 int rc = PGMGstQueryPageFast(pVCpu, GCPtr, 0 /*fFlags - don't check or modify anything */, &WalkFast);
1133 pszValid = szTmp;
1134 if (RT_FAILURE(rc))
1135 switch (rc)
1136 {
1137 case VERR_PAGE_TABLE_NOT_PRESENT:
1138 switch ((WalkFast.fFailed & PGM_WALKFAIL_LEVEL_MASK) >> PGM_WALKFAIL_LEVEL_SHIFT)
1139 {
1140 case 1: pszValid = " stale(page-not-present)"; break;
1141 case 2: pszValid = " stale(pd-entry-not-present)"; break;
1142 case 3: pszValid = " stale(pdptr-entry-not-present)"; break;
1143 case 4: pszValid = " stale(pml4-entry-not-present)"; break;
1144 case 5: pszValid = " stale(pml5-entry-not-present)"; break;
1145 default: pszValid = " stale(VERR_PAGE_TABLE_NOT_PRESENT)"; break;
1146 }
1147 break;
1148 default: RTStrPrintf(szTmp, sizeof(szTmp), " stale(rc=%d)", rc); break;
1149 }
1150 else if (WalkFast.GCPhys != pTlbe->GCPhys)
1151 RTStrPrintf(szTmp, sizeof(szTmp), " stale(GCPhys=%RGp)", WalkFast.GCPhys);
1152 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G | X86_PTE_A | X86_PTE_D))
1153 == ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1154 | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED))
1155 | fInvSlotG ) )
1156 pszValid = " still-valid";
1157 else if ( (~WalkFast.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_G))
1158 == ((pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER)) | fInvSlotG) )
1159 switch ( (~WalkFast.fEffective & (X86_PTE_A | X86_PTE_D))
1160 ^ (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_ACCESSED)) )
1161 {
1162 case X86_PTE_A:
1163 pszValid = WalkFast.fEffective & X86_PTE_A ? " still-valid(accessed-now)" : " still-valid(accessed-no-more)";
1164 break;
1165 case X86_PTE_D:
1166 pszValid = WalkFast.fEffective & X86_PTE_D ? " still-valid(dirty-now)" : " still-valid(dirty-no-more)";
1167 break;
1168 case X86_PTE_D | X86_PTE_A:
1169 RTStrPrintf(szTmp, sizeof(szTmp), " still-valid(%s%s)",
1170 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1171 : WalkFast.fEffective & X86_PTE_D ? "dirty-now" : "dirty-no-more",
1172 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1173 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1174 break;
1175 default: AssertFailed(); break;
1176 }
1177 else
1178 RTStrPrintf(szTmp, sizeof(szTmp), " stale(%s%s%s%s%s)",
1179 (~WalkFast.fEffective & X86_PTE_RW) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE) ? ""
1180 : WalkFast.fEffective & X86_PTE_RW ? "writeable-now" : "writable-no-more",
1181 (~WalkFast.fEffective & X86_PTE_US) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) ? ""
1182 : WalkFast.fEffective & X86_PTE_US ? " user-now" : " user-no-more",
1183 (~WalkFast.fEffective & X86_PTE_G) == fInvSlotG ? ""
1184 : WalkFast.fEffective & X86_PTE_G ? " global-now" : " global-no-more",
1185 (~WalkFast.fEffective & X86_PTE_D) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY) ? ""
1186 : WalkFast.fEffective & X86_PTE_D ? " dirty-now" : " dirty-no-more",
1187 (~WalkFast.fEffective & X86_PTE_A) == (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED) ? ""
1188 : WalkFast.fEffective & X86_PTE_A ? " accessed-now" : " accessed-no-more");
1189 }
1190#else
1191 RT_NOREF(pVCpu);
1192#endif
1193
1194 pHlp->pfnPrintf(pHlp, IEMTLB_SLOT_FMT ": %s %#018RX64 -> %RGp / %p / %#05x %s%s%s%s%s%s%s/%s%s%s%s/%s %s%s\n",
1195 uSlot,
1196 (pTlbe->uTag & IEMTLB_REVISION_MASK) == uTlbRevision ? "valid "
1197 : (pTlbe->uTag & IEMTLB_REVISION_MASK) == 0 ? "empty "
1198 : "expired",
1199 GCPtr, /* -> */
1200 pTlbe->GCPhys, /* / */ pTlbe->pbMappingR3,
1201 /* / */
1202 (uint32_t)(pTlbe->fFlagsAndPhysRev & ~IEMTLBE_F_PHYS_REV),
1203 /* */
1204 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE ? "R-" : "RW",
1205 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC ? "-" : "X",
1206 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_ACCESSED ? "-" : "A",
1207 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_DIRTY ? "-" : "D",
1208 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER ? "U" : "S",
1209 !(uSlot & 1) ? "-" : "G",
1210 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_LARGE_PAGE ? "4K" : "2M",
1211 /* / */
1212 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_WRITE ? "-" : "w",
1213 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? "-" : "r",
1214 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? "u" : "-",
1215 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_CODE_PAGE ? "c" : "-",
1216 /* / */
1217 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3 ? "N" : "M",
1218 (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pTlb->uTlbPhysRev ? "phys-valid"
1219 : (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == 0 ? "phys-empty" : "phys-expired",
1220 pszValid);
1221}
1222
1223
1224/** Displays one or more TLB slots. */
1225static void iemR3InfoTlbPrintSlots(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1226 uint32_t uSlot, uint32_t cSlots, uint32_t fFlags, bool *pfHeader)
1227{
1228 if (uSlot < RT_ELEMENTS(pTlb->aEntries))
1229 {
1230 if (cSlots > RT_ELEMENTS(pTlb->aEntries))
1231 {
1232 pHlp->pfnPrintf(pHlp, "error: Too many slots given: %u, adjusting it down to the max (%u)\n",
1233 cSlots, RT_ELEMENTS(pTlb->aEntries));
1234 cSlots = RT_ELEMENTS(pTlb->aEntries);
1235 }
1236
1237 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1238 while (cSlots-- > 0)
1239 {
1240 IEMTLBENTRY const Tlbe = pTlb->aEntries[uSlot];
1241 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &Tlbe, uSlot, fFlags);
1242 uSlot = (uSlot + 1) % RT_ELEMENTS(pTlb->aEntries);
1243 }
1244 }
1245 else
1246 pHlp->pfnPrintf(pHlp, "error: TLB slot is out of range: %u (%#x), max %u (%#x)\n",
1247 uSlot, uSlot, RT_ELEMENTS(pTlb->aEntries) - 1, RT_ELEMENTS(pTlb->aEntries) - 1);
1248}
1249
1250
1251/** Displays the TLB slot for the given address. */
1252static void iemR3InfoTlbPrintAddress(PVMCPU pVCpu, PCDBGFINFOHLP pHlp, IEMTLB const *pTlb,
1253 uint64_t uAddress, uint32_t fFlags, bool *pfHeader)
1254{
1255 iemR3InfoTlbPrintHeader(pVCpu, pHlp, pTlb, pfHeader);
1256
1257 uint64_t const uTag = IEMTLB_CALC_TAG_NO_REV(uAddress);
1258#ifdef IEMTLB_TAG_TO_EVEN_INDEX
1259 uint32_t const uSlot = IEMTLB_TAG_TO_EVEN_INDEX(uTag);
1260#else
1261 uint32_t const uSlot = IEMTLB_TAG_TO_INDEX(uTag);
1262#endif
1263 IEMTLBENTRY const TlbeL = pTlb->aEntries[uSlot];
1264#ifndef VBOX_VMM_TARGET_ARMV8
1265 IEMTLBENTRY const TlbeG = pTlb->aEntries[uSlot + 1];
1266#endif
1267 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot,
1268 TlbeL.uTag == (uTag | pTlb->uTlbRevision) ? "match"
1269 : (TlbeL.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1270 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeL, uSlot, fFlags);
1271
1272#ifndef VBOX_VMM_TARGET_ARMV8
1273 pHlp->pfnPrintf(pHlp, "Address %#RX64 -> slot %#x - %s\n", uAddress, uSlot + 1,
1274 TlbeG.uTag == (uTag | pTlb->uTlbRevisionGlobal) ? "match"
1275 : (TlbeG.uTag & ~IEMTLB_REVISION_MASK) == uTag ? "expired" : "mismatch");
1276 iemR3InfoTlbPrintSlot(pVCpu, pHlp, pTlb, &TlbeG, uSlot + 1, fFlags);
1277#endif
1278}
1279
1280
1281/** Common worker for iemR3InfoDTlb and iemR3InfoITlb. */
1282static void iemR3InfoTlbCommon(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs, bool fITlb)
1283{
1284 /*
1285 * This is entirely argument driven.
1286 */
1287 static RTGETOPTDEF const s_aOptions[] =
1288 {
1289 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1290 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1291 { "--check", 'C', RTGETOPT_REQ_NOTHING },
1292 { "all", 'A', RTGETOPT_REQ_NOTHING },
1293 { "--all", 'A', RTGETOPT_REQ_NOTHING },
1294 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1295 { "--range", 'r', RTGETOPT_REQ_UINT32_PAIR | RTGETOPT_FLAG_HEX },
1296 { "--slot", 's', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1297 { "--only-valid", 'v', RTGETOPT_REQ_NOTHING },
1298 };
1299
1300 RTGETOPTSTATE State;
1301 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1302 AssertRCReturnVoid(rc);
1303
1304 uint32_t cActionArgs = 0;
1305 bool fNeedHeader = true;
1306 bool fAddressMode = true;
1307 uint32_t fFlags = 0;
1308 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1309 PVMCPU pVCpu = pVCpuCall;
1310 if (!pVCpu)
1311 pVCpu = VMMGetCpuById(pVM, 0);
1312
1313 RTGETOPTUNION ValueUnion;
1314 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1315 {
1316 switch (rc)
1317 {
1318 case 'c':
1319 if (ValueUnion.u32 >= pVM->cCpus)
1320 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1321 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1322 {
1323 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1324 fNeedHeader = true;
1325 if (!pVCpuCall || pVCpuCall->idCpu != ValueUnion.u32)
1326 {
1327 pHlp->pfnPrintf(pHlp, "info: Can't check guest PTs when switching to a different VCpu! Targetting %u, on %u.\n",
1328 ValueUnion.u32, pVCpuCall->idCpu);
1329 fFlags &= ~IEMR3INFOTLB_F_CHECK;
1330 }
1331 }
1332 break;
1333
1334 case 'C':
1335 if (!pVCpuCall)
1336 pHlp->pfnPrintf(pHlp, "error: Can't check guest PT when not running on an EMT!\n");
1337 else if (pVCpu != pVCpuCall)
1338 pHlp->pfnPrintf(pHlp, "error: Can't check guest PTs when on a different EMT! Targetting %u, on %u.\n",
1339 pVCpu->idCpu, pVCpuCall->idCpu);
1340 else
1341 fFlags |= IEMR3INFOTLB_F_CHECK;
1342 break;
1343
1344 case 'a':
1345 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1346 ValueUnion.u64, fFlags, &fNeedHeader);
1347 fAddressMode = true;
1348 cActionArgs++;
1349 break;
1350
1351 case 'A':
1352 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1353 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1354 cActionArgs++;
1355 break;
1356
1357 case 'r':
1358 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1359 ValueUnion.PairU32.uFirst, ValueUnion.PairU32.uSecond, fFlags, &fNeedHeader);
1360 fAddressMode = false;
1361 cActionArgs++;
1362 break;
1363
1364 case 's':
1365 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1366 ValueUnion.u32, 1, fFlags, &fNeedHeader);
1367 fAddressMode = false;
1368 cActionArgs++;
1369 break;
1370
1371 case 'v':
1372 fFlags |= IEMR3INFOTLB_F_ONLY_VALID;
1373 break;
1374
1375 case VINF_GETOPT_NOT_OPTION:
1376 if (fAddressMode)
1377 {
1378 uint64_t uAddr;
1379 rc = RTStrToUInt64Full(ValueUnion.psz, 16, &uAddr);
1380 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1381 iemR3InfoTlbPrintAddress(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1382 uAddr, fFlags, &fNeedHeader);
1383 else
1384 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed guest address '%s': %Rrc\n", ValueUnion.psz, rc);
1385 }
1386 else
1387 {
1388 uint32_t uSlot;
1389 rc = RTStrToUInt32Full(ValueUnion.psz, 16, &uSlot);
1390 if (RT_SUCCESS(rc) && rc != VWRN_NUMBER_TOO_BIG)
1391 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1392 uSlot, 1, fFlags, &fNeedHeader);
1393 else
1394 pHlp->pfnPrintf(pHlp, "error: Invalid or malformed TLB slot number '%s': %Rrc\n", ValueUnion.psz, rc);
1395 }
1396 cActionArgs++;
1397 break;
1398
1399 case 'h':
1400 pHlp->pfnPrintf(pHlp,
1401 "Usage: info %ctlb [options]\n"
1402 "\n"
1403 "Options:\n"
1404 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1405 " Selects the CPU which TLBs we're looking at. Default: Caller / 0\n"
1406 " -C,--check\n"
1407 " Check valid entries against guest PTs.\n"
1408 " -A, --all, all\n"
1409 " Display all the TLB entries (default if no other args).\n"
1410 " -a<virt>, --address=<virt>\n"
1411 " Shows the TLB entry for the specified guest virtual address.\n"
1412 " -r<slot:count>, --range=<slot:count>\n"
1413 " Shows the TLB entries for the specified slot range.\n"
1414 " -s<slot>,--slot=<slot>\n"
1415 " Shows the given TLB slot.\n"
1416 " -v,--only-valid\n"
1417 " Only show valid TLB entries (TAG, not phys)\n"
1418 "\n"
1419 "Non-options are interpreted according to the last -a, -r or -s option,\n"
1420 "defaulting to addresses if not preceeded by any of those options.\n"
1421 , fITlb ? 'i' : 'd');
1422 return;
1423
1424 default:
1425 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1426 return;
1427 }
1428 }
1429
1430 /*
1431 * If no action taken, we display all (-A) by default.
1432 */
1433 if (!cActionArgs)
1434 iemR3InfoTlbPrintSlots(pVCpu, pHlp, fITlb ? &pVCpu->iem.s.CodeTlb : &pVCpu->iem.s.DataTlb,
1435 0, RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries), fFlags, &fNeedHeader);
1436}
1437
1438
1439/**
1440 * @callback_method_impl{FNDBGFINFOARGVINT, itlb}
1441 */
1442static DECLCALLBACK(void) iemR3InfoITlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1443{
1444 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, true /*fITlb*/);
1445}
1446
1447
1448/**
1449 * @callback_method_impl{FNDBGFINFOARGVINT, dtlb}
1450 */
1451static DECLCALLBACK(void) iemR3InfoDTlb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1452{
1453 return iemR3InfoTlbCommon(pVM, pHlp, cArgs, papszArgs, false /*fITlb*/);
1454}
1455
1456
1457#ifdef IEM_WITH_TLB_TRACE
1458/**
1459 * @callback_method_impl{FNDBGFINFOARGVINT, tlbtrace}
1460 */
1461static DECLCALLBACK(void) iemR3InfoTlbTrace(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1462{
1463 /*
1464 * Parse arguments.
1465 */
1466 static RTGETOPTDEF const s_aOptions[] =
1467 {
1468 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1469 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1470 { "--last", 'l', RTGETOPT_REQ_UINT32 },
1471 { "--limit", 'l', RTGETOPT_REQ_UINT32 },
1472 { "--stop-at-global-flush", 'g', RTGETOPT_REQ_NOTHING },
1473 { "--resolve-rip", 'r', RTGETOPT_REQ_NOTHING },
1474 };
1475
1476 RTGETOPTSTATE State;
1477 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1478 AssertRCReturnVoid(rc);
1479
1480 uint32_t cLimit = UINT32_MAX;
1481 bool fStopAtGlobalFlush = false;
1482 bool fResolveRip = false;
1483 PVMCPU const pVCpuCall = VMMGetCpu(pVM);
1484 PVMCPU pVCpu = pVCpuCall;
1485 if (!pVCpu)
1486 pVCpu = VMMGetCpuById(pVM, 0);
1487
1488 RTGETOPTUNION ValueUnion;
1489 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1490 {
1491 switch (rc)
1492 {
1493 case 'c':
1494 if (ValueUnion.u32 >= pVM->cCpus)
1495 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1496 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1497 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1498 break;
1499
1500 case 'l':
1501 cLimit = ValueUnion.u32;
1502 break;
1503
1504 case 'g':
1505 fStopAtGlobalFlush = true;
1506 break;
1507
1508 case 'r':
1509 fResolveRip = true;
1510 break;
1511
1512 case 'h':
1513 pHlp->pfnPrintf(pHlp,
1514 "Usage: info tlbtrace [options] [n]\n"
1515 "\n"
1516 "Options:\n"
1517 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1518 " Selects the CPU which TLB trace we're looking at. Default: Caller / 0\n"
1519 " [n], -l<n>, --last=<n>\n"
1520 " Limit display to the last N entries. Default: all\n"
1521 " -g, --stop-at-global-flush\n"
1522 " Stop after the first global flush entry.\n"
1523 " -r, --resolve-rip\n"
1524 " Resolve symbols for the flattened RIP addresses.\n"
1525 );
1526 return;
1527
1528 case VINF_GETOPT_NOT_OPTION:
1529 rc = RTStrToUInt32Full(ValueUnion.psz, 0, &cLimit);
1530 if (RT_SUCCESS(rc))
1531 break;
1532 pHlp->pfnPrintf(pHlp, "error: failed to convert '%s' to a number: %Rrc\n", ValueUnion.psz, rc);
1533 return;
1534
1535 default:
1536 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1537 return;
1538 }
1539 }
1540
1541 /*
1542 * Get the details.
1543 */
1544 AssertReturnVoid(pVCpu);
1545 Assert(pVCpu->iem.s.cTlbTraceEntriesShift <= 28);
1546 uint32_t idx = pVCpu->iem.s.idxTlbTraceEntry;
1547 uint32_t const cShift = RT_MIN(pVCpu->iem.s.cTlbTraceEntriesShift, 28);
1548 uint32_t const fMask = RT_BIT_32(cShift) - 1;
1549 uint32_t cLeft = RT_MIN(RT_MIN(idx, RT_BIT_32(cShift)), cLimit);
1550 PCIEMTLBTRACEENTRY paEntries = pVCpu->iem.s.paTlbTraceEntries;
1551 if (cLeft && paEntries)
1552 {
1553 /*
1554 * Display the entries.
1555 */
1556 pHlp->pfnPrintf(pHlp, "TLB Trace for CPU %u:\n", pVCpu->idCpu);
1557 while (cLeft-- > 0)
1558 {
1559 PCIEMTLBTRACEENTRY const pCur = &paEntries[--idx & fMask];
1560 const char *pszSymbol = "";
1561 union
1562 {
1563 RTDBGSYMBOL Symbol;
1564 char ach[sizeof(RTDBGSYMBOL) + 32];
1565 } uBuf;
1566 if (fResolveRip)
1567 {
1568 RTGCINTPTR offDisp = 0;
1569 DBGFADDRESS Addr;
1570 rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, pCur->rip),
1571 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL
1572 | RTDBGSYMADDR_FLAGS_SKIP_ABS
1573 | RTDBGSYMADDR_FLAGS_SKIP_ABS_IN_DEFERRED,
1574 &offDisp, &uBuf.Symbol, NULL);
1575 if (RT_SUCCESS(rc))
1576 {
1577 /* Add displacement. */
1578 if (offDisp)
1579 {
1580 size_t const cchName = strlen(uBuf.Symbol.szName);
1581 char * const pszEndName = &uBuf.Symbol.szName[cchName];
1582 size_t const cbLeft = sizeof(uBuf) - sizeof(uBuf.Symbol) + sizeof(uBuf.Symbol.szName) - cchName;
1583 if (offDisp > 0)
1584 RTStrPrintf(pszEndName, cbLeft, "+%#1RGv", offDisp);
1585 else
1586 RTStrPrintf(pszEndName, cbLeft, "-%#1RGv", -offDisp);
1587 }
1588
1589 /* Put a space before it. */
1590 AssertCompile(RTASSERT_OFFSET_OF(RTDBGSYMBOL, szName) > 0);
1591 char *pszName = uBuf.Symbol.szName;
1592 *--pszName = ' ';
1593 pszSymbol = pszName;
1594 }
1595 }
1596 static const char *s_apszTlbType[2] = { "code", "data" };
1597 static const char *s_apszScanType[4] = { "skipped", "global", "non-global", "both" };
1598 switch (pCur->enmType)
1599 {
1600 case kIemTlbTraceType_InvlPg:
1601 pHlp->pfnPrintf(pHlp, "%u: %016RX64 invlpg %RGv slot=" IEMTLB_SLOT_FMT "%s\n", idx, pCur->rip,
1602 pCur->u64Param, (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param), pszSymbol);
1603 break;
1604 case kIemTlbTraceType_EvictSlot:
1605 pHlp->pfnPrintf(pHlp, "%u: %016RX64 evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1606 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1607 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1608 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1609 pCur->u64Param2, pszSymbol);
1610 break;
1611 case kIemTlbTraceType_LargeEvictSlot:
1612 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large evict %s slot=" IEMTLB_SLOT_FMT " %RGv (%#RX64) gcphys=%RGp%s\n",
1613 idx, pCur->rip, s_apszTlbType[pCur->bParam & 1], pCur->u32Param,
1614 (RTGCINTPTR)((pCur->u64Param & ~IEMTLB_REVISION_MASK) << (64 - IEMTLB_TAG_ADDR_WIDTH))
1615 >> (64 - IEMTLB_TAG_ADDR_WIDTH - GUEST_PAGE_SHIFT), pCur->u64Param,
1616 pCur->u64Param2, pszSymbol);
1617 break;
1618 case kIemTlbTraceType_LargeScan:
1619 pHlp->pfnPrintf(pHlp, "%u: %016RX64 large scan %s %s%s\n", idx, pCur->rip, s_apszTlbType[pCur->bParam & 1],
1620 s_apszScanType[pCur->u32Param & 3], pszSymbol);
1621 break;
1622
1623 case kIemTlbTraceType_Flush:
1624 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64%s\n", idx, pCur->rip,
1625 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pszSymbol);
1626 break;
1627 case kIemTlbTraceType_FlushGlobal:
1628 pHlp->pfnPrintf(pHlp, "%u: %016RX64 flush %s rev=%#RX64 grev=%#RX64%s\n", idx, pCur->rip,
1629 s_apszTlbType[pCur->bParam & 1], pCur->u64Param, pCur->u64Param2, pszSymbol);
1630 if (fStopAtGlobalFlush)
1631 return;
1632 break;
1633 case kIemTlbTraceType_Load:
1634 case kIemTlbTraceType_LoadGlobal:
1635 pHlp->pfnPrintf(pHlp, "%u: %016RX64 %cload %s %RGv slot=" IEMTLB_SLOT_FMT " gcphys=%RGp fTlb=%#RX32%s\n",
1636 idx, pCur->rip,
1637 pCur->enmType == kIemTlbTraceType_LoadGlobal ? 'g' : 'l', s_apszTlbType[pCur->bParam & 1],
1638 pCur->u64Param,
1639 (uint32_t)IEMTLB_ADDR_TO_EVEN_INDEX(pCur->u64Param)
1640 | (pCur->enmType == kIemTlbTraceType_LoadGlobal),
1641 (RTGCPTR)pCur->u64Param2, pCur->u32Param, pszSymbol);
1642 break;
1643
1644 case kIemTlbTraceType_Load_Cr0:
1645 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr0 %08RX64 (was %08RX64)%s\n",
1646 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1647 break;
1648 case kIemTlbTraceType_Load_Cr3:
1649 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr3 %016RX64 (was %016RX64)%s\n",
1650 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1651 break;
1652 case kIemTlbTraceType_Load_Cr4:
1653 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load cr4 %08RX64 (was %08RX64)%s\n",
1654 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1655 break;
1656 case kIemTlbTraceType_Load_Efer:
1657 pHlp->pfnPrintf(pHlp, "%u: %016RX64 load efer %016RX64 (was %016RX64)%s\n",
1658 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pszSymbol);
1659 break;
1660
1661 case kIemTlbTraceType_Irq:
1662 pHlp->pfnPrintf(pHlp, "%u: %016RX64 irq %#04x flags=%#x eflboth=%#RX64%s\n",
1663 idx, pCur->rip, pCur->bParam, pCur->u32Param,
1664 pCur->u64Param & ((RT_BIT_64(CPUMX86EFLAGS_HW_BITS) - 1) | CPUMX86EFLAGS_INT_MASK_64),
1665 pszSymbol);
1666 break;
1667 case kIemTlbTraceType_Xcpt:
1668 if (pCur->u32Param & IEM_XCPT_FLAGS_CR2)
1669 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x cr2=%RX64%s\n",
1670 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pCur->u64Param2, pszSymbol);
1671 else if (pCur->u32Param & IEM_XCPT_FLAGS_ERR)
1672 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x errcd=%#x%s\n",
1673 idx, pCur->rip, pCur->bParam, pCur->u32Param, pCur->u64Param, pszSymbol);
1674 else
1675 pHlp->pfnPrintf(pHlp, "%u: %016RX64 xcpt %#04x flags=%#x%s\n",
1676 idx, pCur->rip, pCur->bParam, pCur->u32Param, pszSymbol);
1677 break;
1678 case kIemTlbTraceType_IRet:
1679 pHlp->pfnPrintf(pHlp, "%u: %016RX64 iret cs:rip=%04x:%016RX64 efl=%08RX32%s\n",
1680 idx, pCur->rip, pCur->u32Param, pCur->u64Param, (uint32_t)pCur->u64Param2, pszSymbol);
1681 break;
1682
1683 case kIemTlbTraceType_Tb_Compile:
1684 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb comp GCPhysPc=%012RX64%s\n",
1685 idx, pCur->rip, pCur->u64Param, pszSymbol);
1686 break;
1687 case kIemTlbTraceType_Tb_Exec_Threaded:
1688 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb thrd GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1689 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1690 break;
1691 case kIemTlbTraceType_Tb_Exec_Native:
1692 pHlp->pfnPrintf(pHlp, "%u: %016RX64 tb n8ve GCPhysPc=%012RX64 tb=%p used=%u%s\n",
1693 idx, pCur->rip, pCur->u64Param, (uintptr_t)pCur->u64Param2, pCur->u32Param, pszSymbol);
1694 break;
1695
1696 case kIemTlbTraceType_User0:
1697 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user0 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1698 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1699 break;
1700 case kIemTlbTraceType_User1:
1701 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user1 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1702 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1703 break;
1704 case kIemTlbTraceType_User2:
1705 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user2 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1706 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1707 break;
1708 case kIemTlbTraceType_User3:
1709 pHlp->pfnPrintf(pHlp, "%u: %016RX64 user3 %016RX64 %016RX64 %08RX32 %02RX8%s\n",
1710 idx, pCur->rip, pCur->u64Param, pCur->u64Param2, pCur->u32Param, pCur->bParam, pszSymbol);
1711 break;
1712
1713 case kIemTlbTraceType_Invalid:
1714 pHlp->pfnPrintf(pHlp, "%u: Invalid!\n");
1715 break;
1716 }
1717 }
1718 }
1719 else
1720 pHlp->pfnPrintf(pHlp, "No trace entries to display\n");
1721}
1722#endif /* IEM_WITH_TLB_TRACE */
1723
1724#if defined(VBOX_WITH_IEM_RECOMPILER) && !defined(VBOX_VMM_TARGET_ARMV8)
1725/**
1726 * @callback_method_impl{FNDBGFINFOARGVINT, tb}
1727 */
1728static DECLCALLBACK(void) iemR3InfoTb(PVM pVM, PCDBGFINFOHLP pHlp, int cArgs, char **papszArgs)
1729{
1730 /*
1731 * Parse arguments.
1732 */
1733 static RTGETOPTDEF const s_aOptions[] =
1734 {
1735 { "--cpu", 'c', RTGETOPT_REQ_UINT32 },
1736 { "--vcpu", 'c', RTGETOPT_REQ_UINT32 },
1737 { "--addr", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1738 { "--address", 'a', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1739 { "--phys", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1740 { "--physical", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1741 { "--phys-addr", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1742 { "--phys-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1743 { "--physical-address", 'p', RTGETOPT_REQ_UINT64 | RTGETOPT_FLAG_HEX },
1744 { "--flags", 'f', RTGETOPT_REQ_UINT32 | RTGETOPT_FLAG_HEX },
1745 };
1746
1747 RTGETOPTSTATE State;
1748 int rc = RTGetOptInit(&State, cArgs, papszArgs, s_aOptions, RT_ELEMENTS(s_aOptions), 0 /*iFirst*/, 0 /*fFlags*/);
1749 AssertRCReturnVoid(rc);
1750
1751 PVMCPU const pVCpuThis = VMMGetCpu(pVM);
1752 PVMCPU pVCpu = pVCpuThis ? pVCpuThis : VMMGetCpuById(pVM, 0);
1753 RTGCPHYS GCPhysPc = NIL_RTGCPHYS;
1754 RTGCPHYS GCVirt = NIL_RTGCPTR;
1755 uint32_t fFlags = UINT32_MAX;
1756
1757 RTGETOPTUNION ValueUnion;
1758 while ((rc = RTGetOpt(&State, &ValueUnion)) != 0)
1759 {
1760 switch (rc)
1761 {
1762 case 'c':
1763 if (ValueUnion.u32 >= pVM->cCpus)
1764 pHlp->pfnPrintf(pHlp, "error: Invalid CPU ID: %u\n", ValueUnion.u32);
1765 else if (!pVCpu || pVCpu->idCpu != ValueUnion.u32)
1766 pVCpu = VMMGetCpuById(pVM, ValueUnion.u32);
1767 break;
1768
1769 case 'a':
1770 GCVirt = ValueUnion.u64;
1771 GCPhysPc = NIL_RTGCPHYS;
1772 break;
1773
1774 case 'p':
1775 GCVirt = NIL_RTGCPHYS;
1776 GCPhysPc = ValueUnion.u64;
1777 break;
1778
1779 case 'f':
1780 fFlags = ValueUnion.u32;
1781 break;
1782
1783 case 'h':
1784 pHlp->pfnPrintf(pHlp,
1785 "Usage: info tb [options]\n"
1786 "\n"
1787 "Options:\n"
1788 " -c<n>, --cpu=<n>, --vcpu=<n>\n"
1789 " Selects the CPU which TBs we're looking at. Default: Caller / 0\n"
1790 " -a<virt>, --address=<virt>\n"
1791 " Shows the TB for the specified guest virtual address.\n"
1792 " -p<phys>, --phys=<phys>, --phys-addr=<phys>\n"
1793 " Shows the TB for the specified guest physical address.\n"
1794 " -f<flags>,--flags=<flags>\n"
1795 " The TB flags value (hex) to use when looking up the TB.\n"
1796 "\n"
1797 "The default is to use CS:RIP and derive flags from the CPU mode.\n");
1798 return;
1799
1800 default:
1801 pHlp->pfnGetOptError(pHlp, rc, &ValueUnion, &State);
1802 return;
1803 }
1804 }
1805
1806 /* Currently, only do work on the same EMT. */
1807 if (pVCpu != pVCpuThis)
1808 {
1809 pHlp->pfnPrintf(pHlp, "TODO: Cross EMT calling not supported yet: targeting %u, caller on %d\n",
1810 pVCpu->idCpu, pVCpuThis ? (int)pVCpuThis->idCpu : -1);
1811 return;
1812 }
1813
1814 /*
1815 * Defaults.
1816 */
1817 if (GCPhysPc == NIL_RTGCPHYS)
1818 {
1819 if (GCVirt == NIL_RTGCPTR)
1820 GCVirt = CPUMGetGuestFlatPC(pVCpu);
1821 rc = PGMPhysGCPtr2GCPhys(pVCpu, GCVirt, &GCPhysPc);
1822 if (RT_FAILURE(rc))
1823 {
1824 pHlp->pfnPrintf(pHlp, "Failed to convert %%%RGv to an guest physical address: %Rrc\n", GCVirt, rc);
1825 return;
1826 }
1827 }
1828 if (fFlags == UINT32_MAX)
1829 {
1830 /* Note! This is duplicating code in IEMAllThrdRecompiler. */
1831 fFlags = iemCalcExecFlags(pVCpu);
1832 if (pVM->cCpus == 1)
1833 fFlags |= IEM_F_X86_DISREGARD_LOCK;
1834 if (CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx))
1835 fFlags |= IEMTB_F_INHIBIT_SHADOW;
1836 if (CPUMAreInterruptsInhibitedByNmiEx(&pVCpu->cpum.GstCtx))
1837 fFlags |= IEMTB_F_INHIBIT_NMI;
1838 if ((IEM_F_MODE_CPUMODE_MASK & fFlags) != IEMMODE_64BIT)
1839 {
1840 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
1841 if (offFromLim < X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
1842 fFlags |= IEMTB_F_CS_LIM_CHECKS;
1843 }
1844 }
1845
1846 /*
1847 * Do the lookup...
1848 *
1849 * Note! This is also duplicating code in IEMAllThrdRecompiler. We don't
1850 * have much choice since we don't want to increase use counters and
1851 * trigger native recompilation.
1852 */
1853 fFlags &= IEMTB_F_KEY_MASK;
1854 IEMTBCACHE const * const pTbCache = pVCpu->iem.s.pTbCacheR3;
1855 uint32_t const idxHash = IEMTBCACHE_HASH(pTbCache, fFlags, GCPhysPc);
1856 PCIEMTB pTb = IEMTBCACHE_PTR_GET_TB(pTbCache->apHash[idxHash]);
1857 while (pTb)
1858 {
1859 if (pTb->GCPhysPc == GCPhysPc)
1860 {
1861 if ((pTb->fFlags & IEMTB_F_KEY_MASK) == fFlags)
1862 {
1863 /// @todo if (pTb->x86.fAttr == (uint16_t)pVCpu->cpum.GstCtx.cs.Attr.u)
1864 break;
1865 }
1866 }
1867 pTb = pTb->pNext;
1868 }
1869 if (!pTb)
1870 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x - no TB found on #%u\n", GCPhysPc, fFlags, pVCpu->idCpu);
1871 else
1872 {
1873 /*
1874 * Disassemble according to type.
1875 */
1876 switch (pTb->fFlags & IEMTB_F_TYPE_MASK)
1877 {
1878# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER
1879 case IEMTB_F_TYPE_NATIVE:
1880 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - native\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1881 iemNativeDisassembleTb(pVCpu, pTb, pHlp);
1882 break;
1883# endif
1884
1885 case IEMTB_F_TYPE_THREADED:
1886 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - threaded\n", GCPhysPc, fFlags, pVCpu->idCpu, pTb);
1887 iemThreadedDisassembleTb(pTb, pHlp);
1888 break;
1889
1890 default:
1891 pHlp->pfnPrintf(pHlp, "PC=%RGp fFlags=%#x on #%u: %p - ??? %#x\n",
1892 GCPhysPc, fFlags, pVCpu->idCpu, pTb, pTb->fFlags);
1893 break;
1894 }
1895 }
1896}
1897#endif /* VBOX_WITH_IEM_RECOMPILER && !VBOX_VMM_TARGET_ARMV8 */
1898
1899
1900#ifdef VBOX_WITH_DEBUGGER
1901
1902/** @callback_method_impl{FNDBGCCMD,
1903 * Implements the '.alliem' command. }
1904 */
1905static DECLCALLBACK(int) iemR3DbgFlushTlbs(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)
1906{
1907 VMCPUID idCpu = DBGCCmdHlpGetCurrentCpu(pCmdHlp);
1908 PVMCPU pVCpu = VMMR3GetCpuByIdU(pUVM, idCpu);
1909 if (pVCpu)
1910 {
1911 VMR3ReqPriorityCallVoidWaitU(pUVM, idCpu, (PFNRT)IEMTlbInvalidateAllGlobal, 1, pVCpu);
1912 return VINF_SUCCESS;
1913 }
1914 RT_NOREF(paArgs, cArgs);
1915 return DBGCCmdHlpFail(pCmdHlp, pCmd, "failed to get the PVMCPU for the current CPU");
1916}
1917
1918
1919/**
1920 * Called by IEMR3Init to register debugger commands.
1921 */
1922static void iemR3RegisterDebuggerCommands(void)
1923{
1924 /*
1925 * Register debugger commands.
1926 */
1927 static DBGCCMD const s_aCmds[] =
1928 {
1929 {
1930 /* .pszCmd = */ "iemflushtlb",
1931 /* .cArgsMin = */ 0,
1932 /* .cArgsMax = */ 0,
1933 /* .paArgDescs = */ NULL,
1934 /* .cArgDescs = */ 0,
1935 /* .fFlags = */ 0,
1936 /* .pfnHandler = */ iemR3DbgFlushTlbs,
1937 /* .pszSyntax = */ "",
1938 /* .pszDescription = */ "Flushed the code and data TLBs"
1939 },
1940 };
1941
1942 int rc = DBGCRegisterCommands(&s_aCmds[0], RT_ELEMENTS(s_aCmds));
1943 AssertLogRelRC(rc);
1944}
1945
1946#endif /* VBOX_WITH_DEBUGGER */
1947
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette