VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103214

最後變更 在這個檔案從103214是 103214,由 vboxsync 提交於 14 月 前

VMM/IEMAllInst*: Liveness analysis, part 7: Flag input & modification annotations checks. bugref:10372

  • 屬性 svn:eol-style 設為 LF
  • 屬性 svn:executable 設為 *
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 158.9 KB
 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103214 2024-02-06 02:03:41Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.alldomusa.eu.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103214 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 if self.oParent.oMcBlock.iInFunction == 0:
625 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
626 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
627
628 def getThreadedFunctionName(self):
629 sName = self.oParent.oMcBlock.sFunction;
630 if sName.startswith('iemOp_'):
631 sName = sName[len('iemOp_'):];
632 if self.oParent.oMcBlock.iInFunction == 0:
633 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
634 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getLivenessFunctionName(self):
640 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
641
642 def getShortName(self):
643 sName = self.oParent.oMcBlock.sFunction;
644 if sName.startswith('iemOp_'):
645 sName = sName[len('iemOp_'):];
646 if self.oParent.oMcBlock.iInFunction == 0:
647 return '%s%s' % ( sName, self.sVariation, );
648 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
649
650 def isWithFlagsCheckingAndClearingVariation(self):
651 """
652 Checks if this is a variation that checks and clears EFLAGS.
653 """
654 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
655
656 #
657 # Analysis and code morphing.
658 #
659
660 def raiseProblem(self, sMessage):
661 """ Raises a problem. """
662 self.oParent.raiseProblem(sMessage);
663
664 def warning(self, sMessage):
665 """ Emits a warning. """
666 self.oParent.warning(sMessage);
667
668 def analyzeReferenceToType(self, sRef):
669 """
670 Translates a variable or structure reference to a type.
671 Returns type name.
672 Raises exception if unable to figure it out.
673 """
674 ch0 = sRef[0];
675 if ch0 == 'u':
676 if sRef.startswith('u32'):
677 return 'uint32_t';
678 if sRef.startswith('u8') or sRef == 'uReg':
679 return 'uint8_t';
680 if sRef.startswith('u64'):
681 return 'uint64_t';
682 if sRef.startswith('u16'):
683 return 'uint16_t';
684 elif ch0 == 'b':
685 return 'uint8_t';
686 elif ch0 == 'f':
687 return 'bool';
688 elif ch0 == 'i':
689 if sRef.startswith('i8'):
690 return 'int8_t';
691 if sRef.startswith('i16'):
692 return 'int16_t';
693 if sRef.startswith('i32'):
694 return 'int32_t';
695 if sRef.startswith('i64'):
696 return 'int64_t';
697 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
698 return 'uint8_t';
699 elif ch0 == 'p':
700 if sRef.find('-') < 0:
701 return 'uintptr_t';
702 if sRef.startswith('pVCpu->iem.s.'):
703 sField = sRef[len('pVCpu->iem.s.') : ];
704 if sField in g_kdIemFieldToType:
705 if g_kdIemFieldToType[sField][0]:
706 return g_kdIemFieldToType[sField][0];
707 elif ch0 == 'G' and sRef.startswith('GCPtr'):
708 return 'uint64_t';
709 elif ch0 == 'e':
710 if sRef == 'enmEffOpSize':
711 return 'IEMMODE';
712 elif ch0 == 'o':
713 if sRef.startswith('off32'):
714 return 'uint32_t';
715 elif sRef == 'cbFrame': # enter
716 return 'uint16_t';
717 elif sRef == 'cShift': ## @todo risky
718 return 'uint8_t';
719
720 self.raiseProblem('Unknown reference: %s' % (sRef,));
721 return None; # Shut up pylint 2.16.2.
722
723 def analyzeCallToType(self, sFnRef):
724 """
725 Determins the type of an indirect function call.
726 """
727 assert sFnRef[0] == 'p';
728
729 #
730 # Simple?
731 #
732 if sFnRef.find('-') < 0:
733 oDecoderFunction = self.oParent.oMcBlock.oFunction;
734
735 # Try the argument list of the function defintion macro invocation first.
736 iArg = 2;
737 while iArg < len(oDecoderFunction.asDefArgs):
738 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
739 return oDecoderFunction.asDefArgs[iArg - 1];
740 iArg += 1;
741
742 # Then check out line that includes the word and looks like a variable declaration.
743 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
744 for sLine in oDecoderFunction.asLines:
745 oMatch = oRe.match(sLine);
746 if oMatch:
747 if not oMatch.group(1).startswith('const'):
748 return oMatch.group(1);
749 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
750
751 #
752 # Deal with the pImpl->pfnXxx:
753 #
754 elif sFnRef.startswith('pImpl->pfn'):
755 sMember = sFnRef[len('pImpl->') : ];
756 sBaseType = self.analyzeCallToType('pImpl');
757 offBits = sMember.rfind('U') + 1;
758 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
759 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
760 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
761 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
762 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
763 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
764 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
765 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
766 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
767 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
768
769 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
770
771 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
772 return None; # Shut up pylint 2.16.2.
773
774 def analyze8BitGRegStmt(self, oStmt):
775 """
776 Gets the 8-bit general purpose register access details of the given statement.
777 ASSUMES the statement is one accessing an 8-bit GREG.
778 """
779 idxReg = 0;
780 if ( oStmt.sName.find('_FETCH_') > 0
781 or oStmt.sName.find('_REF_') > 0
782 or oStmt.sName.find('_TO_LOCAL') > 0):
783 idxReg = 1;
784
785 sRegRef = oStmt.asParams[idxReg];
786 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
787 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
788 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
789 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
790 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
791 else:
792 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
793
794 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
795 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
796 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
797 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
798 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
799 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
800 else:
801 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
802 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
803 sStdRef = 'bOther8Ex';
804
805 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
806 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
807 return (idxReg, sOrgExpr, sStdRef);
808
809
810 ## Maps memory related MCs to info for FLAT conversion.
811 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
812 ## segmentation checking for every memory access. Only applied to access
813 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
814 ## the latter (CS) is just to keep things simple (we could safely fetch via
815 ## it, but only in 64-bit mode could we safely write via it, IIRC).
816 kdMemMcToFlatInfo = {
817 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
818 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
819 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
820 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
821 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
822 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
823 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
824 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
825 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
826 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
827 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
828 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
829 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
830 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
831 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
832 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
833 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
834 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
835 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
836 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
837 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
838 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
839 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
840 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
841 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
842 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
843 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
844 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
845 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
846 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
847 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
848 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
849 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
850 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
851 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
852 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
853 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
854 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
855 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
856 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
857 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
858 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
859 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
860 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
861 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
862 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
863 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
864 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
865 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
866 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
867 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
868 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
869 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
870 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
871 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
872 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
873 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
874 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
875 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
876 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
877 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
878 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
879 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
880 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
881 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
882 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
883 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
884 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
885 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
886 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
887 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
888 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
889 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
890 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
891 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
892 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
893 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
894 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
895 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
896 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
897 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
898 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
899 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
900 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
901 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
902 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
903 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
904 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
905 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
906 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
907 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
908 };
909
910 kdMemMcToFlatInfoStack = {
911 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
912 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
913 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
914 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
915 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
916 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
917 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
918 };
919
920 kdThreadedCalcRmEffAddrMcByVariation = {
921 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
922 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
923 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
924 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
925 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
926 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
927 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
928 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
929 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
930 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
931 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
932 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
933 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
934 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
935 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
936 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
937 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
938 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
939 };
940
941 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
942 """
943 Transforms (copy) the statements into those for the threaded function.
944
945 Returns list/tree of statements (aoStmts is not modified) and the new
946 iParamRef value.
947 """
948 #
949 # We'll be traversing aoParamRefs in parallel to the statements, so we
950 # must match the traversal in analyzeFindThreadedParamRefs exactly.
951 #
952 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
953 aoThreadedStmts = [];
954 for oStmt in aoStmts:
955 # Skip C++ statements that is purely related to decoding.
956 if not oStmt.isCppStmt() or not oStmt.fDecode:
957 # Copy the statement. Make a deep copy to make sure we've got our own
958 # copies of all instance variables, even if a bit overkill at the moment.
959 oNewStmt = copy.deepcopy(oStmt);
960 aoThreadedStmts.append(oNewStmt);
961 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
962
963 # If the statement has parameter references, process the relevant parameters.
964 # We grab the references relevant to this statement and apply them in reserve order.
965 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
966 iParamRefFirst = iParamRef;
967 while True:
968 iParamRef += 1;
969 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
970 break;
971
972 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
973 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
974 oCurRef = self.aoParamRefs[iCurRef];
975 if oCurRef.iParam is not None:
976 assert oCurRef.oStmt == oStmt;
977 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
978 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
979 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
980 or oCurRef.fCustomRef), \
981 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
982 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
983 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
984 + oCurRef.sNewName \
985 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
986
987 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
988 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
989 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
990 assert len(oNewStmt.asParams) == 3;
991
992 if self.sVariation in self.kdVariationsWithFlatAddr16:
993 oNewStmt.asParams = [
994 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
995 ];
996 else:
997 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
998 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
999 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1000
1001 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1002 oNewStmt.asParams = [
1003 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1004 ];
1005 else:
1006 oNewStmt.asParams = [
1007 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1008 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1009 ];
1010 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1011 elif ( oNewStmt.sName
1012 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1013 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1014 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1015 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1016 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1017 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1018 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1019 and self.sVariation not in self.kdVariationsOnlyPre386):
1020 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1021 oNewStmt.sName += '_THREADED';
1022 if self.sVariation in self.kdVariationsOnly64NoFlags:
1023 oNewStmt.sName += '_PC64';
1024 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1025 oNewStmt.sName += '_PC64_WITH_FLAGS';
1026 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1027 oNewStmt.sName += '_PC16';
1028 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1029 oNewStmt.sName += '_PC16_WITH_FLAGS';
1030 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1031 assert self.sVariation != self.ksVariation_Default;
1032 oNewStmt.sName += '_PC32';
1033 else:
1034 oNewStmt.sName += '_PC32_WITH_FLAGS';
1035
1036 # This is making the wrong branch of conditionals break out of the TB.
1037 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1038 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1039 sExitTbStatus = 'VINF_SUCCESS';
1040 if self.sVariation in self.kdVariationsWithConditional:
1041 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1042 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1043 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1044 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1045 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1046 oNewStmt.asParams.append(sExitTbStatus);
1047
1048 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1049 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1050 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1051 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1052 oNewStmt.sName += '_THREADED';
1053
1054 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1055 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1056 oNewStmt.sName += '_THREADED';
1057 oNewStmt.idxFn += 1;
1058 oNewStmt.idxParams += 1;
1059 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1060
1061 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1062 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1063 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1064 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1065 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1066 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1067 if idxEffSeg != -1:
1068 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1069 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1070 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1071 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1072 oNewStmt.asParams.pop(idxEffSeg);
1073 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1074
1075 # ... PUSH and POP also needs flat variants, but these differ a little.
1076 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1077 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1078 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1079 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1080 self.kdVariationsWithFlat64StackAddress)];
1081
1082
1083 # Process branches of conditionals recursively.
1084 if isinstance(oStmt, iai.McStmtCond):
1085 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
1086 if oStmt.aoElseBranch:
1087 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
1088
1089 return (aoThreadedStmts, iParamRef);
1090
1091
1092 def analyzeConsolidateThreadedParamRefs(self):
1093 """
1094 Consolidate threaded function parameter references into a dictionary
1095 with lists of the references to each variable/field.
1096 """
1097 # Gather unique parameters.
1098 self.dParamRefs = {};
1099 for oRef in self.aoParamRefs:
1100 if oRef.sStdRef not in self.dParamRefs:
1101 self.dParamRefs[oRef.sStdRef] = [oRef,];
1102 else:
1103 self.dParamRefs[oRef.sStdRef].append(oRef);
1104
1105 # Generate names for them for use in the threaded function.
1106 dParamNames = {};
1107 for sName, aoRefs in self.dParamRefs.items():
1108 # Morph the reference expression into a name.
1109 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1110 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1111 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1112 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1113 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1114 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1115 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1116 else:
1117 sName += 'P';
1118
1119 # Ensure it's unique.
1120 if sName in dParamNames:
1121 for i in range(10):
1122 if sName + str(i) not in dParamNames:
1123 sName += str(i);
1124 break;
1125 dParamNames[sName] = True;
1126
1127 # Update all the references.
1128 for oRef in aoRefs:
1129 oRef.sNewName = sName;
1130
1131 # Organize them by size too for the purpose of optimize them.
1132 dBySize = {} # type: Dict[str, str]
1133 for sStdRef, aoRefs in self.dParamRefs.items():
1134 if aoRefs[0].sType[0] != 'P':
1135 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1136 assert(cBits <= 64);
1137 else:
1138 cBits = 64;
1139
1140 if cBits not in dBySize:
1141 dBySize[cBits] = [sStdRef,]
1142 else:
1143 dBySize[cBits].append(sStdRef);
1144
1145 # Pack the parameters as best as we can, starting with the largest ones
1146 # and ASSUMING a 64-bit parameter size.
1147 self.cMinParams = 0;
1148 offNewParam = 0;
1149 for cBits in sorted(dBySize.keys(), reverse = True):
1150 for sStdRef in dBySize[cBits]:
1151 if offNewParam == 0 or offNewParam + cBits > 64:
1152 self.cMinParams += 1;
1153 offNewParam = cBits;
1154 else:
1155 offNewParam += cBits;
1156 assert(offNewParam <= 64);
1157
1158 for oRef in self.dParamRefs[sStdRef]:
1159 oRef.iNewParam = self.cMinParams - 1;
1160 oRef.offNewParam = offNewParam - cBits;
1161
1162 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1163 if self.cMinParams >= 4:
1164 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1165 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1166
1167 return True;
1168
1169 ksHexDigits = '0123456789abcdefABCDEF';
1170
1171 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1172 """
1173 Scans the statements for things that have to passed on to the threaded
1174 function (populates self.aoParamRefs).
1175 """
1176 for oStmt in aoStmts:
1177 # Some statements we can skip alltogether.
1178 if isinstance(oStmt, iai.McCppPreProc):
1179 continue;
1180 if oStmt.isCppStmt() and oStmt.fDecode:
1181 continue;
1182 if oStmt.sName in ('IEM_MC_BEGIN',):
1183 continue;
1184
1185 if isinstance(oStmt, iai.McStmtVar):
1186 if oStmt.sValue is None:
1187 continue;
1188 aiSkipParams = { 0: True, 1: True, 3: True };
1189 else:
1190 aiSkipParams = {};
1191
1192 # Several statements have implicit parameters and some have different parameters.
1193 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1194 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1195 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1196 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1197 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1198 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1199
1200 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1201 and self.sVariation not in self.kdVariationsOnlyPre386):
1202 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1203
1204 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1205 # This is being pretty presumptive about bRm always being the RM byte...
1206 assert len(oStmt.asParams) == 3;
1207 assert oStmt.asParams[1] == 'bRm';
1208
1209 if self.sVariation in self.kdVariationsWithFlatAddr16:
1210 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1211 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1212 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1213 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1214 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1215 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1216 'uint8_t', oStmt, sStdRef = 'bSib'));
1217 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1218 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1219 else:
1220 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1221 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1222 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1223 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1224 'uint8_t', oStmt, sStdRef = 'bSib'));
1225 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1226 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1227 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1228 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1229 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1230
1231 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1232 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1233 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1234 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1235 aiSkipParams[idxReg] = True; # Skip the parameter below.
1236
1237 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1238 if ( self.sVariation in self.kdVariationsWithFlatAddress
1239 and oStmt.sName in self.kdMemMcToFlatInfo
1240 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1241 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1242
1243 # Inspect the target of calls to see if we need to pass down a
1244 # function pointer or function table pointer for it to work.
1245 if isinstance(oStmt, iai.McStmtCall):
1246 if oStmt.sFn[0] == 'p':
1247 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1248 elif ( oStmt.sFn[0] != 'i'
1249 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1250 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1251 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1252 aiSkipParams[oStmt.idxFn] = True;
1253
1254 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1255 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1256 assert oStmt.idxFn == 2;
1257 aiSkipParams[0] = True;
1258
1259
1260 # Check all the parameters for bogus references.
1261 for iParam, sParam in enumerate(oStmt.asParams):
1262 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1263 # The parameter may contain a C expression, so we have to try
1264 # extract the relevant bits, i.e. variables and fields while
1265 # ignoring operators and parentheses.
1266 offParam = 0;
1267 while offParam < len(sParam):
1268 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1269 ch = sParam[offParam];
1270 if ch.isalpha() or ch == '_':
1271 offStart = offParam;
1272 offParam += 1;
1273 while offParam < len(sParam):
1274 ch = sParam[offParam];
1275 if not ch.isalnum() and ch != '_' and ch != '.':
1276 if ch != '-' or sParam[offParam + 1] != '>':
1277 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1278 if ( ch == '('
1279 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1280 offParam += len('(pVM)->') - 1;
1281 else:
1282 break;
1283 offParam += 1;
1284 offParam += 1;
1285 sRef = sParam[offStart : offParam];
1286
1287 # For register references, we pass the full register indexes instead as macros
1288 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1289 # threaded function will be more efficient if we just pass the register index
1290 # as a 4-bit param.
1291 if ( sRef.startswith('IEM_GET_MODRM')
1292 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1293 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1294 if sParam[offParam] != '(':
1295 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1296 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1297 if asMacroParams is None:
1298 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1299 offParam = offCloseParam + 1;
1300 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1301 oStmt, iParam, offStart));
1302
1303 # We can skip known variables.
1304 elif sRef in self.oParent.dVariables:
1305 pass;
1306
1307 # Skip certain macro invocations.
1308 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1309 'IEM_GET_GUEST_CPU_FEATURES',
1310 'IEM_IS_GUEST_CPU_AMD',
1311 'IEM_IS_16BIT_CODE',
1312 'IEM_IS_32BIT_CODE',
1313 'IEM_IS_64BIT_CODE',
1314 ):
1315 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1316 if sParam[offParam] != '(':
1317 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1318 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1319 if asMacroParams is None:
1320 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1321 offParam = offCloseParam + 1;
1322
1323 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1324 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1325 'IEM_IS_16BIT_CODE',
1326 'IEM_IS_32BIT_CODE',
1327 'IEM_IS_64BIT_CODE',
1328 ):
1329 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1330 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1331 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1332 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1333 offParam += 1;
1334
1335 # Skip constants, globals, types (casts), sizeof and macros.
1336 elif ( sRef.startswith('IEM_OP_PRF_')
1337 or sRef.startswith('IEM_ACCESS_')
1338 or sRef.startswith('IEMINT_')
1339 or sRef.startswith('X86_GREG_')
1340 or sRef.startswith('X86_SREG_')
1341 or sRef.startswith('X86_EFL_')
1342 or sRef.startswith('X86_FSW_')
1343 or sRef.startswith('X86_FCW_')
1344 or sRef.startswith('X86_XCPT_')
1345 or sRef.startswith('IEMMODE_')
1346 or sRef.startswith('IEM_F_')
1347 or sRef.startswith('IEM_CIMPL_F_')
1348 or sRef.startswith('g_')
1349 or sRef.startswith('iemAImpl_')
1350 or sRef.startswith('kIemNativeGstReg_')
1351 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1352 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1353 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1354 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1355 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1356 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1357 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1358 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1359 'NIL_RTGCPTR',) ):
1360 pass;
1361
1362 # Skip certain macro invocations.
1363 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1364 elif ( ( '.' not in sRef
1365 and '-' not in sRef
1366 and sRef not in ('pVCpu', ) )
1367 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1368 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1369 oStmt, iParam, offStart));
1370 # Number.
1371 elif ch.isdigit():
1372 if ( ch == '0'
1373 and offParam + 2 <= len(sParam)
1374 and sParam[offParam + 1] in 'xX'
1375 and sParam[offParam + 2] in self.ksHexDigits ):
1376 offParam += 2;
1377 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1378 offParam += 1;
1379 else:
1380 while offParam < len(sParam) and sParam[offParam].isdigit():
1381 offParam += 1;
1382 # Comment?
1383 elif ( ch == '/'
1384 and offParam + 4 <= len(sParam)
1385 and sParam[offParam + 1] == '*'):
1386 offParam += 2;
1387 offNext = sParam.find('*/', offParam);
1388 if offNext < offParam:
1389 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1390 offParam = offNext + 2;
1391 # Whatever else.
1392 else:
1393 offParam += 1;
1394
1395 # Traverse the branches of conditionals.
1396 if isinstance(oStmt, iai.McStmtCond):
1397 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1398 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1399 return True;
1400
1401 def analyzeVariation(self, aoStmts):
1402 """
1403 2nd part of the analysis, done on each variation.
1404
1405 The variations may differ in parameter requirements and will end up with
1406 slightly different MC sequences. Thus this is done on each individually.
1407
1408 Returns dummy True - raises exception on trouble.
1409 """
1410 # Now scan the code for variables and field references that needs to
1411 # be passed to the threaded function because they are related to the
1412 # instruction decoding.
1413 self.analyzeFindThreadedParamRefs(aoStmts);
1414 self.analyzeConsolidateThreadedParamRefs();
1415
1416 # Morph the statement stream for the block into what we'll be using in the threaded function.
1417 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1418 if iParamRef != len(self.aoParamRefs):
1419 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1420
1421 return True;
1422
1423 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1424 """
1425 Produces generic C++ statments that emits a call to the thread function
1426 variation and any subsequent checks that may be necessary after that.
1427
1428 The sCallVarNm is the name of the variable with the threaded function
1429 to call. This is for the case where all the variations have the same
1430 parameters and only the threaded function number differs.
1431 """
1432 aoStmts = [
1433 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1434 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1435 cchIndent = cchIndent), # Scope and a hook for various stuff.
1436 ];
1437
1438 # The call to the threaded function.
1439 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1440 for iParam in range(self.cMinParams):
1441 asFrags = [];
1442 for aoRefs in self.dParamRefs.values():
1443 oRef = aoRefs[0];
1444 if oRef.iNewParam == iParam:
1445 sCast = '(uint64_t)'
1446 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1447 sCast = '(uint64_t)(u' + oRef.sType + ')';
1448 if oRef.offNewParam == 0:
1449 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1450 else:
1451 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1452 assert asFrags;
1453 asCallArgs.append(' | '.join(asFrags));
1454
1455 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1456
1457 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1458 # emit this mode check from the compilation loop. On the
1459 # plus side, this means we eliminate unnecessary call at
1460 # end of the TB. :-)
1461 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1462 ## mask and maybe emit additional checks.
1463 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1464 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1465 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1466 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1467 # cchIndent = cchIndent));
1468
1469 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1470 if not sCImplFlags:
1471 sCImplFlags = '0'
1472 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1473
1474 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1475 # indicates we should do so.
1476 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1477 asEndTbFlags = [];
1478 asTbBranchedFlags = [];
1479 for sFlag in self.oParent.dsCImplFlags:
1480 if self.kdCImplFlags[sFlag] is True:
1481 asEndTbFlags.append(sFlag);
1482 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1483 asTbBranchedFlags.append(sFlag);
1484 if ( asTbBranchedFlags
1485 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1486 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1487 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1488 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1489 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1490 if asEndTbFlags:
1491 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1492 cchIndent = cchIndent));
1493
1494 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1495 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1496
1497 return aoStmts;
1498
1499
1500class ThreadedFunction(object):
1501 """
1502 A threaded function.
1503 """
1504
1505 def __init__(self, oMcBlock: iai.McBlock) -> None:
1506 self.oMcBlock = oMcBlock # type: iai.McBlock
1507 # The remaining fields are only useful after analyze() has been called:
1508 ## Variations for this block. There is at least one.
1509 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1510 ## Variation dictionary containing the same as aoVariations.
1511 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1512 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1513 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1514 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1515 ## and those determined by analyzeCodeOperation().
1516 self.dsCImplFlags = {} # type: Dict[str, bool]
1517
1518 @staticmethod
1519 def dummyInstance():
1520 """ Gets a dummy instance. """
1521 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1522 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1523
1524 def hasWithFlagsCheckingAndClearingVariation(self):
1525 """
1526 Check if there is one or more with flags checking and clearing
1527 variations for this threaded function.
1528 """
1529 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1530 if sVarWithFlags in self.dVariations:
1531 return True;
1532 return False;
1533
1534 #
1535 # Analysis and code morphing.
1536 #
1537
1538 def raiseProblem(self, sMessage):
1539 """ Raises a problem. """
1540 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1541
1542 def error(self, sMessage, oGenerator):
1543 """ Emits an error via the generator object, causing it to fail. """
1544 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1545
1546 def warning(self, sMessage):
1547 """ Emits a warning. """
1548 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1549
1550 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1551 """ Scans the statements for MC variables and call arguments. """
1552 for oStmt in aoStmts:
1553 if isinstance(oStmt, iai.McStmtVar):
1554 if oStmt.sVarName in self.dVariables:
1555 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1556 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1557
1558 # There shouldn't be any variables or arguments declared inside if/
1559 # else blocks, but scan them too to be on the safe side.
1560 if isinstance(oStmt, iai.McStmtCond):
1561 #cBefore = len(self.dVariables);
1562 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1563 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1564 #if len(self.dVariables) != cBefore:
1565 # raise Exception('Variables/arguments defined in conditional branches!');
1566 return True;
1567
1568 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1569 """
1570 Analyzes the code looking clues as to additional side-effects.
1571
1572 Currently this is simply looking for branching and adding the relevant
1573 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1574 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1575
1576 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1577
1578 Returns annotation on return style.
1579 """
1580 sAnnotation = None;
1581 for oStmt in aoStmts:
1582 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1583 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1584 assert not fSeenConditional;
1585 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1586 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1587 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1588 if fSeenConditional:
1589 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1590
1591 # Check for CIMPL and AIMPL calls.
1592 if oStmt.sName.startswith('IEM_MC_CALL_'):
1593 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1594 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1595 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1596 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1597 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1598 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1599 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1600 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1601 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1602 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1603 else:
1604 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1605
1606 # Check for return statements.
1607 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1608 assert sAnnotation is None;
1609 sAnnotation = g_ksFinishAnnotation_Advance;
1610 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1611 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1612 assert sAnnotation is None;
1613 sAnnotation = g_ksFinishAnnotation_RelJmp;
1614 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1615 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1616 assert sAnnotation is None;
1617 sAnnotation = g_ksFinishAnnotation_SetJmp;
1618 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1619 assert sAnnotation is None;
1620 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1621
1622 # Collect MCs working on EFLAGS. Caller will check this.
1623 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1624 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1625 dEflStmts[oStmt.sName] = oStmt;
1626 elif isinstance(oStmt, iai.McStmtCall):
1627 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1628 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1629 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1630 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1631 dEflStmts[oStmt.sName] = oStmt;
1632
1633 # Process branches of conditionals recursively.
1634 if isinstance(oStmt, iai.McStmtCond):
1635 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1636 if oStmt.aoElseBranch:
1637 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1638
1639 return sAnnotation;
1640
1641 def analyze(self, oGenerator):
1642 """
1643 Analyzes the code, identifying the number of parameters it requires and such.
1644
1645 Returns dummy True - raises exception on trouble.
1646 """
1647
1648 # Check the block for errors before we proceed (will decode it).
1649 asErrors = self.oMcBlock.check();
1650 if asErrors:
1651 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1652 for sError in asErrors]));
1653
1654 # Decode the block into a list/tree of McStmt objects.
1655 aoStmts = self.oMcBlock.decode();
1656
1657 # Scan the statements for local variables and call arguments (self.dVariables).
1658 self.analyzeFindVariablesAndCallArgs(aoStmts);
1659
1660 # Scan the code for IEM_CIMPL_F_ and other clues.
1661 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1662 dEflStmts = {};
1663 self.analyzeCodeOperation(aoStmts, dEflStmts);
1664 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1665 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1666 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1667 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1668
1669 # Analyse EFLAGS related MCs and @opflmodify and friends.
1670 if dEflStmts:
1671 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1672 if ( oInstruction is None
1673 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1674 sMcNames = '+'.join(dEflStmts.keys());
1675 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1676 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1677 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1678 if not oInstruction.asFlModify:
1679 if oInstruction.sMnemonic not in [ 'not', ]:
1680 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
1681 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
1682 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
1683 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
1684 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
1685 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
1686 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
1687 if not oInstruction.asFlModify:
1688 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
1689 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
1690 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
1691 if not oInstruction.asFlTest:
1692 if oInstruction.sMnemonic not in [ 'not', ]:
1693 self.error('Expected @opfltest!', oGenerator);
1694 if oInstruction and oInstruction.asFlSet:
1695 for sFlag in oInstruction.asFlSet:
1696 if sFlag not in oInstruction.asFlModify:
1697 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
1698 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
1699 if oInstruction and oInstruction.asFlClear:
1700 for sFlag in oInstruction.asFlClear:
1701 if sFlag not in oInstruction.asFlModify:
1702 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
1703 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
1704
1705 # Create variations as needed.
1706 if iai.McStmt.findStmtByNames(aoStmts,
1707 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1708 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1709 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1710 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1711 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1712
1713 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1714 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1715 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1716 'IEM_MC_FETCH_MEM_U32' : True,
1717 'IEM_MC_FETCH_MEM_U64' : True,
1718 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1719 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1720 'IEM_MC_STORE_MEM_U32' : True,
1721 'IEM_MC_STORE_MEM_U64' : True, }):
1722 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1723 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1724 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1725 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1726 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1727 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1728 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1729 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1730 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1731 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1732 else:
1733 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1734 else:
1735 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1736 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1737 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1738 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1739 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1740 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1741 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1742 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1743 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1744 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1745 else:
1746 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1747
1748 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
1749 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
1750 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
1751 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
1752 asVariationsBase = asVariations;
1753 asVariations = [];
1754 for sVariation in asVariationsBase:
1755 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
1756 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
1757
1758 if not iai.McStmt.findStmtByNames(aoStmts,
1759 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1760 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1761 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1762 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1763 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1764 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1765 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1766 }):
1767 asVariations = [sVariation for sVariation in asVariations
1768 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1769
1770 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1771
1772 # Dictionary variant of the list.
1773 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1774
1775 # Continue the analysis on each variation.
1776 for oVariation in self.aoVariations:
1777 oVariation.analyzeVariation(aoStmts);
1778
1779 return True;
1780
1781 ## Used by emitThreadedCallStmts.
1782 kdVariationsWithNeedForPrefixCheck = {
1783 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1784 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1785 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1786 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1787 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1788 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1789 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1790 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1791 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1792 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1793 };
1794
1795 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
1796 """
1797 Worker for morphInputCode that returns a list of statements that emits
1798 the call to the threaded functions for the block.
1799
1800 The sBranch parameter is used with conditional branches where we'll emit
1801 different threaded calls depending on whether we're in the jump-taken or
1802 no-jump code path.
1803 """
1804 # Special case for only default variation:
1805 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1806 assert not sBranch;
1807 return self.aoVariations[0].emitThreadedCallStmts(0);
1808
1809 #
1810 # Case statement sub-class.
1811 #
1812 dByVari = self.dVariations;
1813 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1814 class Case:
1815 def __init__(self, sCond, sVarNm = None):
1816 self.sCond = sCond;
1817 self.sVarNm = sVarNm;
1818 self.oVar = dByVari[sVarNm] if sVarNm else None;
1819 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1820
1821 def toCode(self):
1822 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1823 if self.aoBody:
1824 aoStmts.extend(self.aoBody);
1825 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1826 return aoStmts;
1827
1828 def toFunctionAssignment(self):
1829 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1830 if self.aoBody:
1831 aoStmts.extend([
1832 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1833 iai.McCppGeneric('break;', cchIndent = 8),
1834 ]);
1835 return aoStmts;
1836
1837 def isSame(self, oThat):
1838 if not self.aoBody: # fall thru always matches.
1839 return True;
1840 if len(self.aoBody) != len(oThat.aoBody):
1841 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1842 return False;
1843 for iStmt, oStmt in enumerate(self.aoBody):
1844 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1845 assert isinstance(oStmt, iai.McCppGeneric);
1846 assert not isinstance(oStmt, iai.McStmtCond);
1847 if isinstance(oStmt, iai.McStmtCond):
1848 return False;
1849 if oStmt.sName != oThatStmt.sName:
1850 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1851 return False;
1852 if len(oStmt.asParams) != len(oThatStmt.asParams):
1853 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1854 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1855 return False;
1856 for iParam, sParam in enumerate(oStmt.asParams):
1857 if ( sParam != oThatStmt.asParams[iParam]
1858 and ( iParam != 1
1859 or not isinstance(oStmt, iai.McCppCall)
1860 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1861 or sParam != self.oVar.getIndexName()
1862 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1863 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1864 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1865 return False;
1866 return True;
1867
1868 #
1869 # Determine what we're switch on.
1870 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1871 #
1872 fSimple = True;
1873 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1874 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1875 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1876 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1877 # is not writable in 32-bit mode (at least), thus the penalty mode
1878 # for any accesses via it (simpler this way).)
1879 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1880 fSimple = False; # threaded functions.
1881 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1882 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1883 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1884
1885 #
1886 # Generate the case statements.
1887 #
1888 # pylintx: disable=x
1889 aoCases = [];
1890 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1891 assert not fSimple and not sBranch;
1892 aoCases.extend([
1893 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1894 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1895 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1896 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1897 ]);
1898 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1899 aoCases.extend([
1900 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1901 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1902 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1903 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1904 ]);
1905 elif ThrdFnVar.ksVariation_64 in dByVari:
1906 assert fSimple and not sBranch;
1907 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1908 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1909 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1910 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
1911 assert fSimple and sBranch;
1912 aoCases.append(Case('IEMMODE_64BIT',
1913 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
1914 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
1915 aoCases.append(Case('IEMMODE_64BIT | 32',
1916 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
1917
1918 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1919 assert not fSimple and not sBranch;
1920 aoCases.extend([
1921 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1922 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1923 Case('IEMMODE_32BIT | 16', None), # fall thru
1924 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1925 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1926 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1927 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1928 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1929 ]);
1930 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1931 aoCases.extend([
1932 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1933 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1934 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1935 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1936 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1937 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1938 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1939 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1940 ]);
1941 elif ThrdFnVar.ksVariation_32 in dByVari:
1942 assert fSimple and not sBranch;
1943 aoCases.extend([
1944 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1945 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1946 ]);
1947 if ThrdFnVar.ksVariation_32f in dByVari:
1948 aoCases.extend([
1949 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1950 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1951 ]);
1952 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
1953 assert fSimple and sBranch;
1954 aoCases.extend([
1955 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1956 Case('IEMMODE_32BIT',
1957 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
1958 ]);
1959 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
1960 aoCases.extend([
1961 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1962 Case('IEMMODE_32BIT | 32',
1963 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
1964 ]);
1965
1966 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1967 assert not fSimple and not sBranch;
1968 aoCases.extend([
1969 Case('IEMMODE_16BIT | 16', None), # fall thru
1970 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1971 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1972 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1973 ]);
1974 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1975 aoCases.extend([
1976 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1977 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1978 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1979 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1980 ]);
1981 elif ThrdFnVar.ksVariation_16 in dByVari:
1982 assert fSimple and not sBranch;
1983 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1984 if ThrdFnVar.ksVariation_16f in dByVari:
1985 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1986 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
1987 assert fSimple and sBranch;
1988 aoCases.append(Case('IEMMODE_16BIT',
1989 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
1990 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
1991 aoCases.append(Case('IEMMODE_16BIT | 32',
1992 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
1993
1994
1995 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1996 if not fSimple:
1997 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1998 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1999 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2000 if not fSimple:
2001 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2002 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2003
2004 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2005 assert fSimple and sBranch;
2006 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2007 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2008 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2009 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2010 assert fSimple and sBranch;
2011 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2012 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2013 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2014
2015 #
2016 # If the case bodies are all the same, except for the function called,
2017 # we can reduce the code size and hopefully compile time.
2018 #
2019 iFirstCaseWithBody = 0;
2020 while not aoCases[iFirstCaseWithBody].aoBody:
2021 iFirstCaseWithBody += 1
2022 fAllSameCases = True
2023 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2024 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2025 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2026 if fAllSameCases:
2027 aoStmts = [
2028 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2029 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2030 iai.McCppGeneric('{'),
2031 ];
2032 for oCase in aoCases:
2033 aoStmts.extend(oCase.toFunctionAssignment());
2034 aoStmts.extend([
2035 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2036 iai.McCppGeneric('}'),
2037 ]);
2038 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2039
2040 else:
2041 #
2042 # Generate the generic switch statement.
2043 #
2044 aoStmts = [
2045 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2046 iai.McCppGeneric('{'),
2047 ];
2048 for oCase in aoCases:
2049 aoStmts.extend(oCase.toCode());
2050 aoStmts.extend([
2051 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2052 iai.McCppGeneric('}'),
2053 ]);
2054
2055 return aoStmts;
2056
2057 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2058 """
2059 Adjusts (& copies) the statements for the input/decoder so it will emit
2060 calls to the right threaded functions for each block.
2061
2062 Returns list/tree of statements (aoStmts is not modified) and updated
2063 fCallEmitted status.
2064 """
2065 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2066 aoDecoderStmts = [];
2067
2068 for iStmt, oStmt in enumerate(aoStmts):
2069 # Copy the statement. Make a deep copy to make sure we've got our own
2070 # copies of all instance variables, even if a bit overkill at the moment.
2071 oNewStmt = copy.deepcopy(oStmt);
2072 aoDecoderStmts.append(oNewStmt);
2073 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2074 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2075 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2076
2077 # If we haven't emitted the threaded function call yet, look for
2078 # statements which it would naturally follow or preceed.
2079 if not fCallEmitted:
2080 if not oStmt.isCppStmt():
2081 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2082 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2083 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2084 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2085 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2086 aoDecoderStmts.pop();
2087 if not fIsConditional:
2088 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2089 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2090 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2091 else:
2092 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2093 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2094 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2095 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2096 aoDecoderStmts.append(oNewStmt);
2097 fCallEmitted = True;
2098
2099 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2100 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2101 if not sBranchAnnotation:
2102 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2103 assert fIsConditional;
2104 aoDecoderStmts.pop();
2105 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2106 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2107 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2108 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2109 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2110 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2111 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2112 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2113 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2114 else:
2115 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2116 aoDecoderStmts.append(oNewStmt);
2117 fCallEmitted = True;
2118
2119 elif ( not fIsConditional
2120 and oStmt.fDecode
2121 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2122 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2123 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2124 fCallEmitted = True;
2125
2126 # Process branches of conditionals recursively.
2127 if isinstance(oStmt, iai.McStmtCond):
2128 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2129 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2130 if oStmt.aoElseBranch:
2131 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2132 fCallEmitted, cDepth + 1,
2133 oStmt.oElseBranchAnnotation);
2134 else:
2135 fCallEmitted2 = False;
2136 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2137
2138 if not fCallEmitted and cDepth == 0:
2139 self.raiseProblem('Unable to insert call to threaded function.');
2140
2141 return (aoDecoderStmts, fCallEmitted);
2142
2143
2144 def generateInputCode(self):
2145 """
2146 Modifies the input code.
2147 """
2148 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2149
2150 if len(self.oMcBlock.aoStmts) == 1:
2151 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2152 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2153 if self.dsCImplFlags:
2154 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2155 else:
2156 sCode += '0;\n';
2157 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2158 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2159 sIndent = ' ' * (min(cchIndent, 2) - 2);
2160 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2161 return sCode;
2162
2163 # IEM_MC_BEGIN/END block
2164 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2165 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2166 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2167 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2168 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2169
2170# Short alias for ThreadedFunctionVariation.
2171ThrdFnVar = ThreadedFunctionVariation;
2172
2173
2174class IEMThreadedGenerator(object):
2175 """
2176 The threaded code generator & annotator.
2177 """
2178
2179 def __init__(self):
2180 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2181 self.oOptions = None # type: argparse.Namespace
2182 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2183 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2184 self.cErrors = 0;
2185
2186 #
2187 # Error reporting.
2188 #
2189
2190 def rawError(self, sCompleteMessage):
2191 """ Output a raw error and increment the error counter. """
2192 print(sCompleteMessage, file = sys.stderr);
2193 self.cErrors += 1;
2194 return False;
2195
2196 #
2197 # Processing.
2198 #
2199
2200 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2201 """
2202 Process the input files.
2203 """
2204
2205 # Parse the files.
2206 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2207
2208 # Create threaded functions for the MC blocks.
2209 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2210
2211 # Analyze the threaded functions.
2212 dRawParamCounts = {};
2213 dMinParamCounts = {};
2214 for oThreadedFunction in self.aoThreadedFuncs:
2215 oThreadedFunction.analyze(self);
2216 for oVariation in oThreadedFunction.aoVariations:
2217 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2218 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2219 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2220 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2221 print('debug: %s params: %4s raw, %4s min'
2222 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2223 file = sys.stderr);
2224
2225 # Populate aidxFirstFunctions. This is ASSUMING that
2226 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2227 iThreadedFunction = 0;
2228 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2229 self.aidxFirstFunctions = [];
2230 for oParser in self.aoParsers:
2231 self.aidxFirstFunctions.append(iThreadedFunction);
2232
2233 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2234 iThreadedFunction += 1;
2235 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2236
2237 # Analyze the threaded functions and their variations for native recompilation.
2238 if fNativeRecompilerEnabled:
2239 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2240
2241 # Gather arguments + variable statistics for the MC blocks.
2242 cMaxArgs = 0;
2243 cMaxVars = 0;
2244 cMaxVarsAndArgs = 0;
2245 cbMaxArgs = 0;
2246 cbMaxVars = 0;
2247 cbMaxVarsAndArgs = 0;
2248 for oThreadedFunction in self.aoThreadedFuncs:
2249 if oThreadedFunction.oMcBlock.cLocals >= 0:
2250 # Counts.
2251 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2252 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2253 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2254 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2255 if cMaxVarsAndArgs > 9:
2256 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2257 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2258 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2259 # Calc stack allocation size:
2260 cbArgs = 0;
2261 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2262 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2263 cbVars = 0;
2264 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2265 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2266 cbMaxVars = max(cbMaxVars, cbVars);
2267 cbMaxArgs = max(cbMaxArgs, cbArgs);
2268 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2269 if cbMaxVarsAndArgs >= 0xc0:
2270 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2271 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2272
2273 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2274 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2275
2276 if self.cErrors > 0:
2277 print('fatal error: %u error%s during processing. Details above.'
2278 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2279 return False;
2280 return True;
2281
2282 #
2283 # Output
2284 #
2285
2286 def generateLicenseHeader(self):
2287 """
2288 Returns the lines for a license header.
2289 """
2290 return [
2291 '/*',
2292 ' * Autogenerated by $Id: IEMAllThrdPython.py 103214 2024-02-06 02:03:41Z vboxsync $ ',
2293 ' * Do not edit!',
2294 ' */',
2295 '',
2296 '/*',
2297 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2298 ' *',
2299 ' * This file is part of VirtualBox base platform packages, as',
2300 ' * available from https://www.alldomusa.eu.org.',
2301 ' *',
2302 ' * This program is free software; you can redistribute it and/or',
2303 ' * modify it under the terms of the GNU General Public License',
2304 ' * as published by the Free Software Foundation, in version 3 of the',
2305 ' * License.',
2306 ' *',
2307 ' * This program is distributed in the hope that it will be useful, but',
2308 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2309 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2310 ' * General Public License for more details.',
2311 ' *',
2312 ' * You should have received a copy of the GNU General Public License',
2313 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2314 ' *',
2315 ' * The contents of this file may alternatively be used under the terms',
2316 ' * of the Common Development and Distribution License Version 1.0',
2317 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2318 ' * in the VirtualBox distribution, in which case the provisions of the',
2319 ' * CDDL are applicable instead of those of the GPL.',
2320 ' *',
2321 ' * You may elect to license modified versions of this file under the',
2322 ' * terms and conditions of either the GPL or the CDDL or both.',
2323 ' *',
2324 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2325 ' */',
2326 '',
2327 '',
2328 '',
2329 ];
2330
2331 ## List of built-in threaded functions with user argument counts and
2332 ## whether it has a native recompiler implementation.
2333 katBltIns = (
2334 ( 'Nop', 0, True ),
2335 ( 'LogCpuState', 0, True ),
2336
2337 ( 'DeferToCImpl0', 2, True ),
2338 ( 'CheckIrq', 0, True ),
2339 ( 'CheckMode', 1, True ),
2340 ( 'CheckHwInstrBps', 0, False ),
2341 ( 'CheckCsLim', 1, True ),
2342
2343 ( 'CheckCsLimAndOpcodes', 3, True ),
2344 ( 'CheckOpcodes', 3, True ),
2345 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2346
2347 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2348 ( 'CheckPcAndOpcodes', 3, True ),
2349 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2350
2351 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2352 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2353 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2354
2355 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2356 ( 'CheckOpcodesLoadingTlb', 3, True ),
2357 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2358
2359 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2360 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2361 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2362
2363 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2364 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2365 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2366 );
2367
2368 def generateThreadedFunctionsHeader(self, oOut):
2369 """
2370 Generates the threaded functions header file.
2371 Returns success indicator.
2372 """
2373
2374 asLines = self.generateLicenseHeader();
2375
2376 # Generate the threaded function table indexes.
2377 asLines += [
2378 'typedef enum IEMTHREADEDFUNCS',
2379 '{',
2380 ' kIemThreadedFunc_Invalid = 0,',
2381 '',
2382 ' /*',
2383 ' * Predefined',
2384 ' */',
2385 ];
2386 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2387
2388 iThreadedFunction = 1 + len(self.katBltIns);
2389 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2390 asLines += [
2391 '',
2392 ' /*',
2393 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2394 ' */',
2395 ];
2396 for oThreadedFunction in self.aoThreadedFuncs:
2397 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2398 if oVariation:
2399 iThreadedFunction += 1;
2400 oVariation.iEnumValue = iThreadedFunction;
2401 asLines.append(' ' + oVariation.getIndexName() + ',');
2402 asLines += [
2403 ' kIemThreadedFunc_End',
2404 '} IEMTHREADEDFUNCS;',
2405 '',
2406 ];
2407
2408 # Prototype the function table.
2409 asLines += [
2410 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2411 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2412 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2413 '#endif',
2414 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2415 ];
2416
2417 oOut.write('\n'.join(asLines));
2418 return True;
2419
2420 ksBitsToIntMask = {
2421 1: "UINT64_C(0x1)",
2422 2: "UINT64_C(0x3)",
2423 4: "UINT64_C(0xf)",
2424 8: "UINT64_C(0xff)",
2425 16: "UINT64_C(0xffff)",
2426 32: "UINT64_C(0xffffffff)",
2427 };
2428
2429 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2430 """
2431 Outputs code for unpacking parameters.
2432 This is shared by the threaded and native code generators.
2433 """
2434 aasVars = [];
2435 for aoRefs in oVariation.dParamRefs.values():
2436 oRef = aoRefs[0];
2437 if oRef.sType[0] != 'P':
2438 cBits = g_kdTypeInfo[oRef.sType][0];
2439 sType = g_kdTypeInfo[oRef.sType][2];
2440 else:
2441 cBits = 64;
2442 sType = oRef.sType;
2443
2444 sTypeDecl = sType + ' const';
2445
2446 if cBits == 64:
2447 assert oRef.offNewParam == 0;
2448 if sType == 'uint64_t':
2449 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2450 else:
2451 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2452 elif oRef.offNewParam == 0:
2453 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2454 else:
2455 sUnpack = '(%s)((%s >> %s) & %s);' \
2456 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2457
2458 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2459
2460 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2461 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2462 acchVars = [0, 0, 0, 0, 0];
2463 for asVar in aasVars:
2464 for iCol, sStr in enumerate(asVar):
2465 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2466 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2467 for asVar in sorted(aasVars):
2468 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2469 return True;
2470
2471 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2472 def generateThreadedFunctionsSource(self, oOut):
2473 """
2474 Generates the threaded functions source file.
2475 Returns success indicator.
2476 """
2477
2478 asLines = self.generateLicenseHeader();
2479 oOut.write('\n'.join(asLines));
2480
2481 #
2482 # Emit the function definitions.
2483 #
2484 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2485 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2486 oOut.write( '\n'
2487 + '\n'
2488 + '\n'
2489 + '\n'
2490 + '/*' + '*' * 128 + '\n'
2491 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2492 + '*' * 128 + '*/\n');
2493
2494 for oThreadedFunction in self.aoThreadedFuncs:
2495 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2496 if oVariation:
2497 oMcBlock = oThreadedFunction.oMcBlock;
2498
2499 # Function header
2500 oOut.write( '\n'
2501 + '\n'
2502 + '/**\n'
2503 + ' * #%u: %s at line %s offset %s in %s%s\n'
2504 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2505 os.path.split(oMcBlock.sSrcFile)[1],
2506 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2507 + ' */\n'
2508 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2509 + '{\n');
2510
2511 # Unpack parameters.
2512 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2513
2514 # RT_NOREF for unused parameters.
2515 if oVariation.cMinParams < g_kcThreadedParams:
2516 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2517
2518 # Now for the actual statements.
2519 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2520
2521 oOut.write('}\n');
2522
2523
2524 #
2525 # Generate the output tables in parallel.
2526 #
2527 asFuncTable = [
2528 '/**',
2529 ' * Function pointer table.',
2530 ' */',
2531 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2532 '{',
2533 ' /*Invalid*/ NULL,',
2534 ];
2535 asNameTable = [
2536 '/**',
2537 ' * Function name table.',
2538 ' */',
2539 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2540 '{',
2541 ' "Invalid",',
2542 ];
2543 asArgCntTab = [
2544 '/**',
2545 ' * Argument count table.',
2546 ' */',
2547 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2548 '{',
2549 ' 0, /*Invalid*/',
2550 ];
2551 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2552
2553 for asTable in aasTables:
2554 asTable.extend((
2555 '',
2556 ' /*',
2557 ' * Predefined.',
2558 ' */',
2559 ));
2560 for sFuncNm, cArgs, _ in self.katBltIns:
2561 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2562 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2563 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2564
2565 iThreadedFunction = 1 + len(self.katBltIns);
2566 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2567 for asTable in aasTables:
2568 asTable.extend((
2569 '',
2570 ' /*',
2571 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2572 ' */',
2573 ));
2574 for oThreadedFunction in self.aoThreadedFuncs:
2575 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2576 if oVariation:
2577 iThreadedFunction += 1;
2578 assert oVariation.iEnumValue == iThreadedFunction;
2579 sName = oVariation.getThreadedFunctionName();
2580 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2581 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2582 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2583
2584 for asTable in aasTables:
2585 asTable.append('};');
2586
2587 #
2588 # Output the tables.
2589 #
2590 oOut.write( '\n'
2591 + '\n');
2592 oOut.write('\n'.join(asFuncTable));
2593 oOut.write( '\n'
2594 + '\n'
2595 + '\n'
2596 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2597 oOut.write('\n'.join(asNameTable));
2598 oOut.write( '\n'
2599 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2600 + '\n'
2601 + '\n');
2602 oOut.write('\n'.join(asArgCntTab));
2603 oOut.write('\n');
2604
2605 return True;
2606
2607 def generateNativeFunctionsHeader(self, oOut):
2608 """
2609 Generates the native recompiler functions header file.
2610 Returns success indicator.
2611 """
2612 if not self.oOptions.fNativeRecompilerEnabled:
2613 return True;
2614
2615 asLines = self.generateLicenseHeader();
2616
2617 # Prototype the function table.
2618 asLines += [
2619 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2620 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2621 '',
2622 ];
2623
2624 # Emit indicators as to which of the builtin functions have a native
2625 # recompiler function and which not. (We only really need this for
2626 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2627 for atBltIn in self.katBltIns:
2628 if atBltIn[1]:
2629 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2630 else:
2631 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2632
2633 # Emit prototypes for the builtin functions we use in tables.
2634 asLines += [
2635 '',
2636 '/* Prototypes for built-in functions used in the above tables. */',
2637 ];
2638 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2639 if fHaveRecompFunc:
2640 asLines += [
2641 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
2642 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
2643 ];
2644
2645 oOut.write('\n'.join(asLines));
2646 return True;
2647
2648 def generateNativeFunctionsSource(self, oOut):
2649 """
2650 Generates the native recompiler functions source file.
2651 Returns success indicator.
2652 """
2653 if not self.oOptions.fNativeRecompilerEnabled:
2654 return True;
2655
2656 #
2657 # The file header.
2658 #
2659 oOut.write('\n'.join(self.generateLicenseHeader()));
2660
2661 #
2662 # Emit the functions.
2663 #
2664 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2665 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2666 oOut.write( '\n'
2667 + '\n'
2668 + '\n'
2669 + '\n'
2670 + '/*' + '*' * 128 + '\n'
2671 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2672 + '*' * 128 + '*/\n');
2673
2674 for oThreadedFunction in self.aoThreadedFuncs:
2675 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2676 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2677 oMcBlock = oThreadedFunction.oMcBlock;
2678
2679 # Function header
2680 oOut.write( '\n'
2681 + '\n'
2682 + '/**\n'
2683 + ' * #%u: %s at line %s offset %s in %s%s\n'
2684 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2685 os.path.split(oMcBlock.sSrcFile)[1],
2686 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2687 + ' */\n'
2688 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2689 + '{\n');
2690
2691 # Unpack parameters.
2692 self.generateFunctionParameterUnpacking(oVariation, oOut,
2693 ('pCallEntry->auParams[0]',
2694 'pCallEntry->auParams[1]',
2695 'pCallEntry->auParams[2]',));
2696
2697 # Now for the actual statements.
2698 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2699
2700 oOut.write('}\n');
2701
2702 #
2703 # Output the function table.
2704 #
2705 oOut.write( '\n'
2706 + '\n'
2707 + '/*\n'
2708 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2709 + ' */\n'
2710 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2711 + '{\n'
2712 + ' /*Invalid*/ NULL,'
2713 + '\n'
2714 + ' /*\n'
2715 + ' * Predefined.\n'
2716 + ' */\n'
2717 );
2718 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2719 if fHaveRecompFunc:
2720 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2721 else:
2722 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2723
2724 iThreadedFunction = 1 + len(self.katBltIns);
2725 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2726 oOut.write( ' /*\n'
2727 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2728 + ' */\n');
2729 for oThreadedFunction in self.aoThreadedFuncs:
2730 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2731 if oVariation:
2732 iThreadedFunction += 1;
2733 assert oVariation.iEnumValue == iThreadedFunction;
2734 sName = oVariation.getNativeFunctionName();
2735 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2736 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2737 else:
2738 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2739
2740 oOut.write( '};\n'
2741 + '\n');
2742 return True;
2743
2744 def generateNativeLivenessSource(self, oOut):
2745 """
2746 Generates the native recompiler liveness analysis functions source file.
2747 Returns success indicator.
2748 """
2749 if not self.oOptions.fNativeRecompilerEnabled:
2750 return True;
2751
2752 #
2753 # The file header.
2754 #
2755 oOut.write('\n'.join(self.generateLicenseHeader()));
2756
2757 #
2758 # Emit the functions.
2759 #
2760 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2761 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2762 oOut.write( '\n'
2763 + '\n'
2764 + '\n'
2765 + '\n'
2766 + '/*' + '*' * 128 + '\n'
2767 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2768 + '*' * 128 + '*/\n');
2769
2770 for oThreadedFunction in self.aoThreadedFuncs:
2771 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2772 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2773 oMcBlock = oThreadedFunction.oMcBlock;
2774
2775 # Function header
2776 oOut.write( '\n'
2777 + '\n'
2778 + '/**\n'
2779 + ' * #%u: %s at line %s offset %s in %s%s\n'
2780 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2781 os.path.split(oMcBlock.sSrcFile)[1],
2782 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2783 + ' */\n'
2784 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
2785 + '{\n');
2786
2787 # Unpack parameters.
2788 self.generateFunctionParameterUnpacking(oVariation, oOut,
2789 ('pCallEntry->auParams[0]',
2790 'pCallEntry->auParams[1]',
2791 'pCallEntry->auParams[2]',));
2792 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
2793 for aoRefs in oVariation.dParamRefs.values():
2794 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
2795 oOut.write(' %s\n' % (' '.join(asNoRefs),));
2796
2797 # Now for the actual statements.
2798 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2799
2800 oOut.write('}\n');
2801
2802 #
2803 # Output the function table.
2804 #
2805 oOut.write( '\n'
2806 + '\n'
2807 + '/*\n'
2808 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2809 + ' */\n'
2810 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
2811 + '{\n'
2812 + ' /*Invalid*/ NULL,'
2813 + '\n'
2814 + ' /*\n'
2815 + ' * Predefined.\n'
2816 + ' */\n'
2817 );
2818 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2819 if fHaveRecompFunc:
2820 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
2821 else:
2822 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2823
2824 iThreadedFunction = 1 + len(self.katBltIns);
2825 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2826 oOut.write( ' /*\n'
2827 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2828 + ' */\n');
2829 for oThreadedFunction in self.aoThreadedFuncs:
2830 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2831 if oVariation:
2832 iThreadedFunction += 1;
2833 assert oVariation.iEnumValue == iThreadedFunction;
2834 sName = oVariation.getLivenessFunctionName();
2835 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2836 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2837 else:
2838 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2839
2840 oOut.write( '};\n'
2841 + '\n');
2842 return True;
2843
2844
2845 def getThreadedFunctionByIndex(self, idx):
2846 """
2847 Returns a ThreadedFunction object for the given index. If the index is
2848 out of bounds, a dummy is returned.
2849 """
2850 if idx < len(self.aoThreadedFuncs):
2851 return self.aoThreadedFuncs[idx];
2852 return ThreadedFunction.dummyInstance();
2853
2854 def generateModifiedInput(self, oOut, idxFile):
2855 """
2856 Generates the combined modified input source/header file.
2857 Returns success indicator.
2858 """
2859 #
2860 # File header and assert assumptions.
2861 #
2862 oOut.write('\n'.join(self.generateLicenseHeader()));
2863 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2864
2865 #
2866 # Iterate all parsers (input files) and output the ones related to the
2867 # file set given by idxFile.
2868 #
2869 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2870 # Is this included in the file set?
2871 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2872 fInclude = -1;
2873 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2874 if sSrcBaseFile == aoInfo[0].lower():
2875 fInclude = aoInfo[2] in (-1, idxFile);
2876 break;
2877 if fInclude is not True:
2878 assert fInclude is False;
2879 continue;
2880
2881 # Output it.
2882 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2883
2884 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2885 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2886 iLine = 0;
2887 while iLine < len(oParser.asLines):
2888 sLine = oParser.asLines[iLine];
2889 iLine += 1; # iBeginLine and iEndLine are 1-based.
2890
2891 # Can we pass it thru?
2892 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2893 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2894 oOut.write(sLine);
2895 #
2896 # Single MC block. Just extract it and insert the replacement.
2897 #
2898 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2899 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2900 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2901 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2902 sModified = oThreadedFunction.generateInputCode().strip();
2903 oOut.write(sModified);
2904
2905 iLine = oThreadedFunction.oMcBlock.iEndLine;
2906 sLine = oParser.asLines[iLine - 1];
2907 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2908 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2909 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2910 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2911
2912 # Advance
2913 iThreadedFunction += 1;
2914 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2915 #
2916 # Macro expansion line that have sublines and may contain multiple MC blocks.
2917 #
2918 else:
2919 offLine = 0;
2920 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2921 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2922
2923 sModified = oThreadedFunction.generateInputCode().strip();
2924 assert ( sModified.startswith('IEM_MC_BEGIN')
2925 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2926 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2927 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2928 ), 'sModified="%s"' % (sModified,);
2929 oOut.write(sModified);
2930
2931 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2932
2933 # Advance
2934 iThreadedFunction += 1;
2935 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2936
2937 # Last line segment.
2938 if offLine < len(sLine):
2939 oOut.write(sLine[offLine : ]);
2940
2941 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2942
2943 return True;
2944
2945 def generateModifiedInput1(self, oOut):
2946 """
2947 Generates the combined modified input source/header file, part 1.
2948 Returns success indicator.
2949 """
2950 return self.generateModifiedInput(oOut, 1);
2951
2952 def generateModifiedInput2(self, oOut):
2953 """
2954 Generates the combined modified input source/header file, part 2.
2955 Returns success indicator.
2956 """
2957 return self.generateModifiedInput(oOut, 2);
2958
2959 def generateModifiedInput3(self, oOut):
2960 """
2961 Generates the combined modified input source/header file, part 3.
2962 Returns success indicator.
2963 """
2964 return self.generateModifiedInput(oOut, 3);
2965
2966 def generateModifiedInput4(self, oOut):
2967 """
2968 Generates the combined modified input source/header file, part 4.
2969 Returns success indicator.
2970 """
2971 return self.generateModifiedInput(oOut, 4);
2972
2973
2974 #
2975 # Main
2976 #
2977
2978 def main(self, asArgs):
2979 """
2980 C-like main function.
2981 Returns exit code.
2982 """
2983
2984 #
2985 # Parse arguments
2986 #
2987 sScriptDir = os.path.dirname(__file__);
2988 oParser = argparse.ArgumentParser(add_help = False);
2989 oParser.add_argument('asInFiles',
2990 metavar = 'input.cpp.h',
2991 nargs = '*',
2992 default = [os.path.join(sScriptDir, aoInfo[0])
2993 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2994 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2995 oParser.add_argument('--host-arch',
2996 metavar = 'arch',
2997 dest = 'sHostArch',
2998 action = 'store',
2999 default = None,
3000 help = 'The host architecture.');
3001
3002 oParser.add_argument('--out-thrd-funcs-hdr',
3003 metavar = 'file-thrd-funcs.h',
3004 dest = 'sOutFileThrdFuncsHdr',
3005 action = 'store',
3006 default = '-',
3007 help = 'The output header file for the threaded functions.');
3008 oParser.add_argument('--out-thrd-funcs-cpp',
3009 metavar = 'file-thrd-funcs.cpp',
3010 dest = 'sOutFileThrdFuncsCpp',
3011 action = 'store',
3012 default = '-',
3013 help = 'The output C++ file for the threaded functions.');
3014 oParser.add_argument('--out-n8ve-funcs-hdr',
3015 metavar = 'file-n8tv-funcs.h',
3016 dest = 'sOutFileN8veFuncsHdr',
3017 action = 'store',
3018 default = '-',
3019 help = 'The output header file for the native recompiler functions.');
3020 oParser.add_argument('--out-n8ve-funcs-cpp',
3021 metavar = 'file-n8tv-funcs.cpp',
3022 dest = 'sOutFileN8veFuncsCpp',
3023 action = 'store',
3024 default = '-',
3025 help = 'The output C++ file for the native recompiler functions.');
3026 oParser.add_argument('--out-n8ve-liveness-cpp',
3027 metavar = 'file-n8tv-liveness.cpp',
3028 dest = 'sOutFileN8veLivenessCpp',
3029 action = 'store',
3030 default = '-',
3031 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3032 oParser.add_argument('--native',
3033 dest = 'fNativeRecompilerEnabled',
3034 action = 'store_true',
3035 default = False,
3036 help = 'Enables generating the files related to native recompilation.');
3037 oParser.add_argument('--out-mod-input1',
3038 metavar = 'file-instr.cpp.h',
3039 dest = 'sOutFileModInput1',
3040 action = 'store',
3041 default = '-',
3042 help = 'The output C++/header file for modified input instruction files part 1.');
3043 oParser.add_argument('--out-mod-input2',
3044 metavar = 'file-instr.cpp.h',
3045 dest = 'sOutFileModInput2',
3046 action = 'store',
3047 default = '-',
3048 help = 'The output C++/header file for modified input instruction files part 2.');
3049 oParser.add_argument('--out-mod-input3',
3050 metavar = 'file-instr.cpp.h',
3051 dest = 'sOutFileModInput3',
3052 action = 'store',
3053 default = '-',
3054 help = 'The output C++/header file for modified input instruction files part 3.');
3055 oParser.add_argument('--out-mod-input4',
3056 metavar = 'file-instr.cpp.h',
3057 dest = 'sOutFileModInput4',
3058 action = 'store',
3059 default = '-',
3060 help = 'The output C++/header file for modified input instruction files part 4.');
3061 oParser.add_argument('--help', '-h', '-?',
3062 action = 'help',
3063 help = 'Display help and exit.');
3064 oParser.add_argument('--version', '-V',
3065 action = 'version',
3066 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3067 % (__version__.split()[1], iai.__version__.split()[1],),
3068 help = 'Displays the version/revision of the script and exit.');
3069 self.oOptions = oParser.parse_args(asArgs[1:]);
3070 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3071
3072 #
3073 # Process the instructions specified in the IEM sources.
3074 #
3075 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3076 #
3077 # Generate the output files.
3078 #
3079 aaoOutputFiles = (
3080 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3081 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3082 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3083 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3084 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3085 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3086 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3087 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3088 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3089 );
3090 fRc = True;
3091 for sOutFile, fnGenMethod in aaoOutputFiles:
3092 if sOutFile == '-':
3093 fRc = fnGenMethod(sys.stdout) and fRc;
3094 else:
3095 try:
3096 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3097 except Exception as oXcpt:
3098 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3099 return 1;
3100 fRc = fnGenMethod(oOut) and fRc;
3101 oOut.close();
3102 if fRc:
3103 return 0;
3104
3105 return 1;
3106
3107
3108if __name__ == '__main__':
3109 sys.exit(IEMThreadedGenerator().main(sys.argv));
3110
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette