VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103194

最後變更 在這個檔案從103194是 103181,由 vboxsync 提交於 14 月 前

VMM/IEM: Liveness analysis, part 1. bugref:10372

  • 屬性 svn:eol-style 設為 LF
  • 屬性 svn:executable 設為 *
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 154.9 KB
 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103181 2024-02-03 02:13:06Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.alldomusa.eu.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103181 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 if self.oParent.oMcBlock.iInFunction == 0:
625 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
626 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
627
628 def getThreadedFunctionName(self):
629 sName = self.oParent.oMcBlock.sFunction;
630 if sName.startswith('iemOp_'):
631 sName = sName[len('iemOp_'):];
632 if self.oParent.oMcBlock.iInFunction == 0:
633 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
634 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getLivenessFunctionName(self):
640 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
641
642 def getShortName(self):
643 sName = self.oParent.oMcBlock.sFunction;
644 if sName.startswith('iemOp_'):
645 sName = sName[len('iemOp_'):];
646 if self.oParent.oMcBlock.iInFunction == 0:
647 return '%s%s' % ( sName, self.sVariation, );
648 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
649
650 def isWithFlagsCheckingAndClearingVariation(self):
651 """
652 Checks if this is a variation that checks and clears EFLAGS.
653 """
654 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
655
656 #
657 # Analysis and code morphing.
658 #
659
660 def raiseProblem(self, sMessage):
661 """ Raises a problem. """
662 self.oParent.raiseProblem(sMessage);
663
664 def warning(self, sMessage):
665 """ Emits a warning. """
666 self.oParent.warning(sMessage);
667
668 def analyzeReferenceToType(self, sRef):
669 """
670 Translates a variable or structure reference to a type.
671 Returns type name.
672 Raises exception if unable to figure it out.
673 """
674 ch0 = sRef[0];
675 if ch0 == 'u':
676 if sRef.startswith('u32'):
677 return 'uint32_t';
678 if sRef.startswith('u8') or sRef == 'uReg':
679 return 'uint8_t';
680 if sRef.startswith('u64'):
681 return 'uint64_t';
682 if sRef.startswith('u16'):
683 return 'uint16_t';
684 elif ch0 == 'b':
685 return 'uint8_t';
686 elif ch0 == 'f':
687 return 'bool';
688 elif ch0 == 'i':
689 if sRef.startswith('i8'):
690 return 'int8_t';
691 if sRef.startswith('i16'):
692 return 'int16_t';
693 if sRef.startswith('i32'):
694 return 'int32_t';
695 if sRef.startswith('i64'):
696 return 'int64_t';
697 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
698 return 'uint8_t';
699 elif ch0 == 'p':
700 if sRef.find('-') < 0:
701 return 'uintptr_t';
702 if sRef.startswith('pVCpu->iem.s.'):
703 sField = sRef[len('pVCpu->iem.s.') : ];
704 if sField in g_kdIemFieldToType:
705 if g_kdIemFieldToType[sField][0]:
706 return g_kdIemFieldToType[sField][0];
707 elif ch0 == 'G' and sRef.startswith('GCPtr'):
708 return 'uint64_t';
709 elif ch0 == 'e':
710 if sRef == 'enmEffOpSize':
711 return 'IEMMODE';
712 elif ch0 == 'o':
713 if sRef.startswith('off32'):
714 return 'uint32_t';
715 elif sRef == 'cbFrame': # enter
716 return 'uint16_t';
717 elif sRef == 'cShift': ## @todo risky
718 return 'uint8_t';
719
720 self.raiseProblem('Unknown reference: %s' % (sRef,));
721 return None; # Shut up pylint 2.16.2.
722
723 def analyzeCallToType(self, sFnRef):
724 """
725 Determins the type of an indirect function call.
726 """
727 assert sFnRef[0] == 'p';
728
729 #
730 # Simple?
731 #
732 if sFnRef.find('-') < 0:
733 oDecoderFunction = self.oParent.oMcBlock.oFunction;
734
735 # Try the argument list of the function defintion macro invocation first.
736 iArg = 2;
737 while iArg < len(oDecoderFunction.asDefArgs):
738 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
739 return oDecoderFunction.asDefArgs[iArg - 1];
740 iArg += 1;
741
742 # Then check out line that includes the word and looks like a variable declaration.
743 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
744 for sLine in oDecoderFunction.asLines:
745 oMatch = oRe.match(sLine);
746 if oMatch:
747 if not oMatch.group(1).startswith('const'):
748 return oMatch.group(1);
749 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
750
751 #
752 # Deal with the pImpl->pfnXxx:
753 #
754 elif sFnRef.startswith('pImpl->pfn'):
755 sMember = sFnRef[len('pImpl->') : ];
756 sBaseType = self.analyzeCallToType('pImpl');
757 offBits = sMember.rfind('U') + 1;
758 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
759 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
760 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
761 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
762 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
763 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
764 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
765 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
766 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
767 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
768
769 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
770
771 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
772 return None; # Shut up pylint 2.16.2.
773
774 def analyze8BitGRegStmt(self, oStmt):
775 """
776 Gets the 8-bit general purpose register access details of the given statement.
777 ASSUMES the statement is one accessing an 8-bit GREG.
778 """
779 idxReg = 0;
780 if ( oStmt.sName.find('_FETCH_') > 0
781 or oStmt.sName.find('_REF_') > 0
782 or oStmt.sName.find('_TO_LOCAL') > 0):
783 idxReg = 1;
784
785 sRegRef = oStmt.asParams[idxReg];
786 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
787 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
788 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
789 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
790 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
791 else:
792 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
793
794 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
795 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
796 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
797 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
798 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
799 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
800 else:
801 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
802 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
803 sStdRef = 'bOther8Ex';
804
805 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
806 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
807 return (idxReg, sOrgExpr, sStdRef);
808
809
810 ## Maps memory related MCs to info for FLAT conversion.
811 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
812 ## segmentation checking for every memory access. Only applied to access
813 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
814 ## the latter (CS) is just to keep things simple (we could safely fetch via
815 ## it, but only in 64-bit mode could we safely write via it, IIRC).
816 kdMemMcToFlatInfo = {
817 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
818 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
819 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
820 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
821 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
822 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
823 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
824 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
825 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
826 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
827 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
828 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
829 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
830 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
831 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
832 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
833 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
834 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
835 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
836 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
837 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
838 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
839 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
840 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
841 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
842 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
843 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
844 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
845 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
846 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
847 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
848 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
849 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
850 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
851 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
852 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
853 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
854 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
855 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
856 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
857 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
858 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
859 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
860 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
861 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
862 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
863 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
864 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
865 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
866 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
867 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
868 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
869 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
870 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
871 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
872 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
873 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
874 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
875 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
876 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
877 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
878 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
879 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
880 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
881 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
882 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
883 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
884 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
885 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
886 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
887 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
888 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
889 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
890 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
891 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
892 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
893 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
894 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
895 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
896 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
897 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
898 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
899 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
900 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
901 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
902 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
903 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
904 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
905 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
906 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
907 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
908 };
909
910 kdMemMcToFlatInfoStack = {
911 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
912 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
913 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
914 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
915 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
916 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
917 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
918 };
919
920 kdThreadedCalcRmEffAddrMcByVariation = {
921 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
922 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
923 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
924 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
925 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
926 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
927 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
928 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
929 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
930 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
931 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
932 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
933 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
934 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
935 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
936 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
937 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
938 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
939 };
940
941 def analyzeMorphStmtForThreaded(self, aoStmts, iParamRef = 0):
942 """
943 Transforms (copy) the statements into those for the threaded function.
944
945 Returns list/tree of statements (aoStmts is not modified) and the new
946 iParamRef value.
947 """
948 #
949 # We'll be traversing aoParamRefs in parallel to the statements, so we
950 # must match the traversal in analyzeFindThreadedParamRefs exactly.
951 #
952 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
953 aoThreadedStmts = [];
954 for oStmt in aoStmts:
955 # Skip C++ statements that is purely related to decoding.
956 if not oStmt.isCppStmt() or not oStmt.fDecode:
957 # Copy the statement. Make a deep copy to make sure we've got our own
958 # copies of all instance variables, even if a bit overkill at the moment.
959 oNewStmt = copy.deepcopy(oStmt);
960 aoThreadedStmts.append(oNewStmt);
961 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
962
963 # If the statement has parameter references, process the relevant parameters.
964 # We grab the references relevant to this statement and apply them in reserve order.
965 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
966 iParamRefFirst = iParamRef;
967 while True:
968 iParamRef += 1;
969 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
970 break;
971
972 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
973 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
974 oCurRef = self.aoParamRefs[iCurRef];
975 if oCurRef.iParam is not None:
976 assert oCurRef.oStmt == oStmt;
977 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
978 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
979 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
980 or oCurRef.fCustomRef), \
981 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
982 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
983 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
984 + oCurRef.sNewName \
985 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
986
987 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
988 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
989 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
990 assert len(oNewStmt.asParams) == 3;
991
992 if self.sVariation in self.kdVariationsWithFlatAddr16:
993 oNewStmt.asParams = [
994 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
995 ];
996 else:
997 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
998 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
999 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1000
1001 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1002 oNewStmt.asParams = [
1003 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1004 ];
1005 else:
1006 oNewStmt.asParams = [
1007 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1008 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1009 ];
1010 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1011 elif ( oNewStmt.sName
1012 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1013 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1014 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1015 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1016 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1017 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1018 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1019 and self.sVariation not in self.kdVariationsOnlyPre386):
1020 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1021 oNewStmt.sName += '_THREADED';
1022 if self.sVariation in self.kdVariationsOnly64NoFlags:
1023 oNewStmt.sName += '_PC64';
1024 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1025 oNewStmt.sName += '_PC64_WITH_FLAGS';
1026 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1027 oNewStmt.sName += '_PC16';
1028 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1029 oNewStmt.sName += '_PC16_WITH_FLAGS';
1030 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1031 assert self.sVariation != self.ksVariation_Default;
1032 oNewStmt.sName += '_PC32';
1033 else:
1034 oNewStmt.sName += '_PC32_WITH_FLAGS';
1035
1036 # This is making the wrong branch of conditionals break out of the TB.
1037 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1038 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1039 sExitTbStatus = 'VINF_SUCCESS';
1040 if self.sVariation in self.kdVariationsWithConditional:
1041 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1042 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1043 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1044 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1045 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1046 oNewStmt.asParams.append(sExitTbStatus);
1047
1048 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1049 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1050 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1051 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1052 oNewStmt.sName += '_THREADED';
1053
1054 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1055 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1056 oNewStmt.sName += '_THREADED';
1057 oNewStmt.idxFn += 1;
1058 oNewStmt.idxParams += 1;
1059 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1060
1061 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1062 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1063 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1064 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1065 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1066 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1067 if idxEffSeg != -1:
1068 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1069 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1070 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1071 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1072 oNewStmt.asParams.pop(idxEffSeg);
1073 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1074
1075 # ... PUSH and POP also needs flat variants, but these differ a little.
1076 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1077 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1078 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1079 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1080 self.kdVariationsWithFlat64StackAddress)];
1081
1082
1083 # Process branches of conditionals recursively.
1084 if isinstance(oStmt, iai.McStmtCond):
1085 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, iParamRef);
1086 if oStmt.aoElseBranch:
1087 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch, iParamRef);
1088
1089 return (aoThreadedStmts, iParamRef);
1090
1091
1092 def analyzeConsolidateThreadedParamRefs(self):
1093 """
1094 Consolidate threaded function parameter references into a dictionary
1095 with lists of the references to each variable/field.
1096 """
1097 # Gather unique parameters.
1098 self.dParamRefs = {};
1099 for oRef in self.aoParamRefs:
1100 if oRef.sStdRef not in self.dParamRefs:
1101 self.dParamRefs[oRef.sStdRef] = [oRef,];
1102 else:
1103 self.dParamRefs[oRef.sStdRef].append(oRef);
1104
1105 # Generate names for them for use in the threaded function.
1106 dParamNames = {};
1107 for sName, aoRefs in self.dParamRefs.items():
1108 # Morph the reference expression into a name.
1109 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1110 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1111 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1112 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1113 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1114 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1115 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1116 else:
1117 sName += 'P';
1118
1119 # Ensure it's unique.
1120 if sName in dParamNames:
1121 for i in range(10):
1122 if sName + str(i) not in dParamNames:
1123 sName += str(i);
1124 break;
1125 dParamNames[sName] = True;
1126
1127 # Update all the references.
1128 for oRef in aoRefs:
1129 oRef.sNewName = sName;
1130
1131 # Organize them by size too for the purpose of optimize them.
1132 dBySize = {} # type: Dict[str, str]
1133 for sStdRef, aoRefs in self.dParamRefs.items():
1134 if aoRefs[0].sType[0] != 'P':
1135 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1136 assert(cBits <= 64);
1137 else:
1138 cBits = 64;
1139
1140 if cBits not in dBySize:
1141 dBySize[cBits] = [sStdRef,]
1142 else:
1143 dBySize[cBits].append(sStdRef);
1144
1145 # Pack the parameters as best as we can, starting with the largest ones
1146 # and ASSUMING a 64-bit parameter size.
1147 self.cMinParams = 0;
1148 offNewParam = 0;
1149 for cBits in sorted(dBySize.keys(), reverse = True):
1150 for sStdRef in dBySize[cBits]:
1151 if offNewParam == 0 or offNewParam + cBits > 64:
1152 self.cMinParams += 1;
1153 offNewParam = cBits;
1154 else:
1155 offNewParam += cBits;
1156 assert(offNewParam <= 64);
1157
1158 for oRef in self.dParamRefs[sStdRef]:
1159 oRef.iNewParam = self.cMinParams - 1;
1160 oRef.offNewParam = offNewParam - cBits;
1161
1162 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1163 if self.cMinParams >= 4:
1164 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1165 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1166
1167 return True;
1168
1169 ksHexDigits = '0123456789abcdefABCDEF';
1170
1171 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1172 """
1173 Scans the statements for things that have to passed on to the threaded
1174 function (populates self.aoParamRefs).
1175 """
1176 for oStmt in aoStmts:
1177 # Some statements we can skip alltogether.
1178 if isinstance(oStmt, iai.McCppPreProc):
1179 continue;
1180 if oStmt.isCppStmt() and oStmt.fDecode:
1181 continue;
1182 if oStmt.sName in ('IEM_MC_BEGIN',):
1183 continue;
1184
1185 if isinstance(oStmt, iai.McStmtVar):
1186 if oStmt.sValue is None:
1187 continue;
1188 aiSkipParams = { 0: True, 1: True, 3: True };
1189 else:
1190 aiSkipParams = {};
1191
1192 # Several statements have implicit parameters and some have different parameters.
1193 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1194 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1195 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1196 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1197 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1198 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1199
1200 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1201 and self.sVariation not in self.kdVariationsOnlyPre386):
1202 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1203
1204 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1205 # This is being pretty presumptive about bRm always being the RM byte...
1206 assert len(oStmt.asParams) == 3;
1207 assert oStmt.asParams[1] == 'bRm';
1208
1209 if self.sVariation in self.kdVariationsWithFlatAddr16:
1210 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1211 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1212 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1213 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1214 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1215 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1216 'uint8_t', oStmt, sStdRef = 'bSib'));
1217 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1218 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1219 else:
1220 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1221 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1222 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1223 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1224 'uint8_t', oStmt, sStdRef = 'bSib'));
1225 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1226 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1227 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1228 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1229 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1230
1231 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1232 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1233 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1234 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1235 aiSkipParams[idxReg] = True; # Skip the parameter below.
1236
1237 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1238 if ( self.sVariation in self.kdVariationsWithFlatAddress
1239 and oStmt.sName in self.kdMemMcToFlatInfo
1240 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1241 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1242
1243 # Inspect the target of calls to see if we need to pass down a
1244 # function pointer or function table pointer for it to work.
1245 if isinstance(oStmt, iai.McStmtCall):
1246 if oStmt.sFn[0] == 'p':
1247 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1248 elif ( oStmt.sFn[0] != 'i'
1249 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1250 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1251 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1252 aiSkipParams[oStmt.idxFn] = True;
1253
1254 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1255 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1256 assert oStmt.idxFn == 2;
1257 aiSkipParams[0] = True;
1258
1259
1260 # Check all the parameters for bogus references.
1261 for iParam, sParam in enumerate(oStmt.asParams):
1262 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1263 # The parameter may contain a C expression, so we have to try
1264 # extract the relevant bits, i.e. variables and fields while
1265 # ignoring operators and parentheses.
1266 offParam = 0;
1267 while offParam < len(sParam):
1268 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1269 ch = sParam[offParam];
1270 if ch.isalpha() or ch == '_':
1271 offStart = offParam;
1272 offParam += 1;
1273 while offParam < len(sParam):
1274 ch = sParam[offParam];
1275 if not ch.isalnum() and ch != '_' and ch != '.':
1276 if ch != '-' or sParam[offParam + 1] != '>':
1277 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1278 if ( ch == '('
1279 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1280 offParam += len('(pVM)->') - 1;
1281 else:
1282 break;
1283 offParam += 1;
1284 offParam += 1;
1285 sRef = sParam[offStart : offParam];
1286
1287 # For register references, we pass the full register indexes instead as macros
1288 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1289 # threaded function will be more efficient if we just pass the register index
1290 # as a 4-bit param.
1291 if ( sRef.startswith('IEM_GET_MODRM')
1292 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1293 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1294 if sParam[offParam] != '(':
1295 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1296 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1297 if asMacroParams is None:
1298 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1299 offParam = offCloseParam + 1;
1300 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1301 oStmt, iParam, offStart));
1302
1303 # We can skip known variables.
1304 elif sRef in self.oParent.dVariables:
1305 pass;
1306
1307 # Skip certain macro invocations.
1308 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1309 'IEM_GET_GUEST_CPU_FEATURES',
1310 'IEM_IS_GUEST_CPU_AMD',
1311 'IEM_IS_16BIT_CODE',
1312 'IEM_IS_32BIT_CODE',
1313 'IEM_IS_64BIT_CODE',
1314 ):
1315 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1316 if sParam[offParam] != '(':
1317 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1318 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1319 if asMacroParams is None:
1320 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1321 offParam = offCloseParam + 1;
1322
1323 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1324 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1325 'IEM_IS_16BIT_CODE',
1326 'IEM_IS_32BIT_CODE',
1327 'IEM_IS_64BIT_CODE',
1328 ):
1329 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1330 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1331 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1332 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1333 offParam += 1;
1334
1335 # Skip constants, globals, types (casts), sizeof and macros.
1336 elif ( sRef.startswith('IEM_OP_PRF_')
1337 or sRef.startswith('IEM_ACCESS_')
1338 or sRef.startswith('IEMINT_')
1339 or sRef.startswith('X86_GREG_')
1340 or sRef.startswith('X86_SREG_')
1341 or sRef.startswith('X86_EFL_')
1342 or sRef.startswith('X86_FSW_')
1343 or sRef.startswith('X86_FCW_')
1344 or sRef.startswith('X86_XCPT_')
1345 or sRef.startswith('IEMMODE_')
1346 or sRef.startswith('IEM_F_')
1347 or sRef.startswith('IEM_CIMPL_F_')
1348 or sRef.startswith('g_')
1349 or sRef.startswith('iemAImpl_')
1350 or sRef.startswith('kIemNativeGstReg_')
1351 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1352 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1353 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1354 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1355 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1356 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1357 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1358 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1359 'NIL_RTGCPTR',) ):
1360 pass;
1361
1362 # Skip certain macro invocations.
1363 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1364 elif ( ( '.' not in sRef
1365 and '-' not in sRef
1366 and sRef not in ('pVCpu', ) )
1367 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1368 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1369 oStmt, iParam, offStart));
1370 # Number.
1371 elif ch.isdigit():
1372 if ( ch == '0'
1373 and offParam + 2 <= len(sParam)
1374 and sParam[offParam + 1] in 'xX'
1375 and sParam[offParam + 2] in self.ksHexDigits ):
1376 offParam += 2;
1377 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1378 offParam += 1;
1379 else:
1380 while offParam < len(sParam) and sParam[offParam].isdigit():
1381 offParam += 1;
1382 # Comment?
1383 elif ( ch == '/'
1384 and offParam + 4 <= len(sParam)
1385 and sParam[offParam + 1] == '*'):
1386 offParam += 2;
1387 offNext = sParam.find('*/', offParam);
1388 if offNext < offParam:
1389 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1390 offParam = offNext + 2;
1391 # Whatever else.
1392 else:
1393 offParam += 1;
1394
1395 # Traverse the branches of conditionals.
1396 if isinstance(oStmt, iai.McStmtCond):
1397 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1398 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1399 return True;
1400
1401 def analyzeVariation(self, aoStmts):
1402 """
1403 2nd part of the analysis, done on each variation.
1404
1405 The variations may differ in parameter requirements and will end up with
1406 slightly different MC sequences. Thus this is done on each individually.
1407
1408 Returns dummy True - raises exception on trouble.
1409 """
1410 # Now scan the code for variables and field references that needs to
1411 # be passed to the threaded function because they are related to the
1412 # instruction decoding.
1413 self.analyzeFindThreadedParamRefs(aoStmts);
1414 self.analyzeConsolidateThreadedParamRefs();
1415
1416 # Morph the statement stream for the block into what we'll be using in the threaded function.
1417 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts);
1418 if iParamRef != len(self.aoParamRefs):
1419 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1420
1421 return True;
1422
1423 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1424 """
1425 Produces generic C++ statments that emits a call to the thread function
1426 variation and any subsequent checks that may be necessary after that.
1427
1428 The sCallVarNm is the name of the variable with the threaded function
1429 to call. This is for the case where all the variations have the same
1430 parameters and only the threaded function number differs.
1431 """
1432 aoStmts = [
1433 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1434 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1435 cchIndent = cchIndent), # Scope and a hook for various stuff.
1436 ];
1437
1438 # The call to the threaded function.
1439 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1440 for iParam in range(self.cMinParams):
1441 asFrags = [];
1442 for aoRefs in self.dParamRefs.values():
1443 oRef = aoRefs[0];
1444 if oRef.iNewParam == iParam:
1445 sCast = '(uint64_t)'
1446 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1447 sCast = '(uint64_t)(u' + oRef.sType + ')';
1448 if oRef.offNewParam == 0:
1449 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1450 else:
1451 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1452 assert asFrags;
1453 asCallArgs.append(' | '.join(asFrags));
1454
1455 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1456
1457 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1458 # emit this mode check from the compilation loop. On the
1459 # plus side, this means we eliminate unnecessary call at
1460 # end of the TB. :-)
1461 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1462 ## mask and maybe emit additional checks.
1463 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1464 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1465 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1466 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1467 # cchIndent = cchIndent));
1468
1469 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1470 if not sCImplFlags:
1471 sCImplFlags = '0'
1472 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1473
1474 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1475 # indicates we should do so.
1476 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1477 asEndTbFlags = [];
1478 asTbBranchedFlags = [];
1479 for sFlag in self.oParent.dsCImplFlags:
1480 if self.kdCImplFlags[sFlag] is True:
1481 asEndTbFlags.append(sFlag);
1482 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1483 asTbBranchedFlags.append(sFlag);
1484 if ( asTbBranchedFlags
1485 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1486 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1487 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1488 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1489 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1490 if asEndTbFlags:
1491 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1492 cchIndent = cchIndent));
1493
1494 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1495 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1496
1497 return aoStmts;
1498
1499
1500class ThreadedFunction(object):
1501 """
1502 A threaded function.
1503 """
1504
1505 def __init__(self, oMcBlock: iai.McBlock) -> None:
1506 self.oMcBlock = oMcBlock # type: iai.McBlock
1507 # The remaining fields are only useful after analyze() has been called:
1508 ## Variations for this block. There is at least one.
1509 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1510 ## Variation dictionary containing the same as aoVariations.
1511 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1512 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1513 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1514 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1515 ## and those determined by analyzeCodeOperation().
1516 self.dsCImplFlags = {} # type: Dict[str, bool]
1517
1518 @staticmethod
1519 def dummyInstance():
1520 """ Gets a dummy instance. """
1521 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1522 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1523
1524 def hasWithFlagsCheckingAndClearingVariation(self):
1525 """
1526 Check if there is one or more with flags checking and clearing
1527 variations for this threaded function.
1528 """
1529 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1530 if sVarWithFlags in self.dVariations:
1531 return True;
1532 return False;
1533
1534 #
1535 # Analysis and code morphing.
1536 #
1537
1538 def raiseProblem(self, sMessage):
1539 """ Raises a problem. """
1540 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1541
1542 def warning(self, sMessage):
1543 """ Emits a warning. """
1544 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1545
1546 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1547 """ Scans the statements for MC variables and call arguments. """
1548 for oStmt in aoStmts:
1549 if isinstance(oStmt, iai.McStmtVar):
1550 if oStmt.sVarName in self.dVariables:
1551 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1552 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1553
1554 # There shouldn't be any variables or arguments declared inside if/
1555 # else blocks, but scan them too to be on the safe side.
1556 if isinstance(oStmt, iai.McStmtCond):
1557 #cBefore = len(self.dVariables);
1558 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1559 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1560 #if len(self.dVariables) != cBefore:
1561 # raise Exception('Variables/arguments defined in conditional branches!');
1562 return True;
1563
1564 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], fSeenConditional = False) -> bool:
1565 """
1566 Analyzes the code looking clues as to additional side-effects.
1567
1568 Currently this is simply looking for branching and adding the relevant
1569 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1570 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1571
1572 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1573
1574 Returns annotation on return style.
1575 """
1576 sAnnotation = None;
1577 for oStmt in aoStmts:
1578 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1579 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1580 assert not fSeenConditional;
1581 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1582 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1583 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1584 if fSeenConditional:
1585 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1586
1587 # Check for CIMPL and AIMPL calls.
1588 if oStmt.sName.startswith('IEM_MC_CALL_'):
1589 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1590 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1591 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1592 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1593 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1594 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1595 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1596 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1597 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1598 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1599 else:
1600 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1601
1602 # Check for return statements.
1603 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1604 assert sAnnotation is None;
1605 sAnnotation = g_ksFinishAnnotation_Advance;
1606 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1607 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1608 assert sAnnotation is None;
1609 sAnnotation = g_ksFinishAnnotation_RelJmp;
1610 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1611 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1612 assert sAnnotation is None;
1613 sAnnotation = g_ksFinishAnnotation_SetJmp;
1614 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1615 assert sAnnotation is None;
1616 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1617
1618 # Process branches of conditionals recursively.
1619 if isinstance(oStmt, iai.McStmtCond):
1620 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, True);
1621 if oStmt.aoElseBranch:
1622 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, True);
1623
1624 return sAnnotation;
1625
1626 def analyze(self):
1627 """
1628 Analyzes the code, identifying the number of parameters it requires and such.
1629
1630 Returns dummy True - raises exception on trouble.
1631 """
1632
1633 # Check the block for errors before we proceed (will decode it).
1634 asErrors = self.oMcBlock.check();
1635 if asErrors:
1636 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1637 for sError in asErrors]));
1638
1639 # Decode the block into a list/tree of McStmt objects.
1640 aoStmts = self.oMcBlock.decode();
1641
1642 # Scan the statements for local variables and call arguments (self.dVariables).
1643 self.analyzeFindVariablesAndCallArgs(aoStmts);
1644
1645 # Scan the code for IEM_CIMPL_F_ and other clues.
1646 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1647 self.analyzeCodeOperation(aoStmts);
1648 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1649 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1650 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1651 self.raiseProblem('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls');
1652
1653 # Create variations as needed.
1654 if iai.McStmt.findStmtByNames(aoStmts,
1655 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1656 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1657 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1658 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1659 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1660
1661 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1662 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1663 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1664 'IEM_MC_FETCH_MEM_U32' : True,
1665 'IEM_MC_FETCH_MEM_U64' : True,
1666 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1667 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1668 'IEM_MC_STORE_MEM_U32' : True,
1669 'IEM_MC_STORE_MEM_U64' : True, }):
1670 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1671 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1672 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1673 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1674 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1675 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1676 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1677 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1678 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1679 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1680 else:
1681 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1682 else:
1683 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1684 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1685 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1686 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1687 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1688 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1689 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1690 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1691 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1692 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1693 else:
1694 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1695
1696 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
1697 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
1698 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
1699 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
1700 asVariationsBase = asVariations;
1701 asVariations = [];
1702 for sVariation in asVariationsBase:
1703 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
1704 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
1705
1706 if not iai.McStmt.findStmtByNames(aoStmts,
1707 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1708 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1709 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1710 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1711 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1712 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1713 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1714 }):
1715 asVariations = [sVariation for sVariation in asVariations
1716 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1717
1718 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1719
1720 # Dictionary variant of the list.
1721 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1722
1723 # Continue the analysis on each variation.
1724 for oVariation in self.aoVariations:
1725 oVariation.analyzeVariation(aoStmts);
1726
1727 return True;
1728
1729 ## Used by emitThreadedCallStmts.
1730 kdVariationsWithNeedForPrefixCheck = {
1731 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1732 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1733 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1734 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1735 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1736 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1737 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1738 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1739 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1740 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1741 };
1742
1743 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
1744 """
1745 Worker for morphInputCode that returns a list of statements that emits
1746 the call to the threaded functions for the block.
1747
1748 The sBranch parameter is used with conditional branches where we'll emit
1749 different threaded calls depending on whether we're in the jump-taken or
1750 no-jump code path.
1751 """
1752 # Special case for only default variation:
1753 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1754 assert not sBranch;
1755 return self.aoVariations[0].emitThreadedCallStmts(0);
1756
1757 #
1758 # Case statement sub-class.
1759 #
1760 dByVari = self.dVariations;
1761 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1762 class Case:
1763 def __init__(self, sCond, sVarNm = None):
1764 self.sCond = sCond;
1765 self.sVarNm = sVarNm;
1766 self.oVar = dByVari[sVarNm] if sVarNm else None;
1767 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1768
1769 def toCode(self):
1770 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1771 if self.aoBody:
1772 aoStmts.extend(self.aoBody);
1773 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1774 return aoStmts;
1775
1776 def toFunctionAssignment(self):
1777 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1778 if self.aoBody:
1779 aoStmts.extend([
1780 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1781 iai.McCppGeneric('break;', cchIndent = 8),
1782 ]);
1783 return aoStmts;
1784
1785 def isSame(self, oThat):
1786 if not self.aoBody: # fall thru always matches.
1787 return True;
1788 if len(self.aoBody) != len(oThat.aoBody):
1789 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1790 return False;
1791 for iStmt, oStmt in enumerate(self.aoBody):
1792 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1793 assert isinstance(oStmt, iai.McCppGeneric);
1794 assert not isinstance(oStmt, iai.McStmtCond);
1795 if isinstance(oStmt, iai.McStmtCond):
1796 return False;
1797 if oStmt.sName != oThatStmt.sName:
1798 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1799 return False;
1800 if len(oStmt.asParams) != len(oThatStmt.asParams):
1801 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1802 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1803 return False;
1804 for iParam, sParam in enumerate(oStmt.asParams):
1805 if ( sParam != oThatStmt.asParams[iParam]
1806 and ( iParam != 1
1807 or not isinstance(oStmt, iai.McCppCall)
1808 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1809 or sParam != self.oVar.getIndexName()
1810 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1811 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1812 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1813 return False;
1814 return True;
1815
1816 #
1817 # Determine what we're switch on.
1818 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1819 #
1820 fSimple = True;
1821 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1822 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1823 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1824 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1825 # is not writable in 32-bit mode (at least), thus the penalty mode
1826 # for any accesses via it (simpler this way).)
1827 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1828 fSimple = False; # threaded functions.
1829 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1830 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1831 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1832
1833 #
1834 # Generate the case statements.
1835 #
1836 # pylintx: disable=x
1837 aoCases = [];
1838 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1839 assert not fSimple and not sBranch;
1840 aoCases.extend([
1841 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1842 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1843 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1844 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1845 ]);
1846 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1847 aoCases.extend([
1848 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1849 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1850 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1851 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1852 ]);
1853 elif ThrdFnVar.ksVariation_64 in dByVari:
1854 assert fSimple and not sBranch;
1855 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1856 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1857 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1858 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
1859 assert fSimple and sBranch;
1860 aoCases.append(Case('IEMMODE_64BIT',
1861 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
1862 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
1863 aoCases.append(Case('IEMMODE_64BIT | 32',
1864 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
1865
1866 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1867 assert not fSimple and not sBranch;
1868 aoCases.extend([
1869 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1870 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1871 Case('IEMMODE_32BIT | 16', None), # fall thru
1872 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1873 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1874 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1875 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1876 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1877 ]);
1878 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1879 aoCases.extend([
1880 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1881 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1882 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1883 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1884 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1885 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1886 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1887 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1888 ]);
1889 elif ThrdFnVar.ksVariation_32 in dByVari:
1890 assert fSimple and not sBranch;
1891 aoCases.extend([
1892 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1893 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1894 ]);
1895 if ThrdFnVar.ksVariation_32f in dByVari:
1896 aoCases.extend([
1897 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1898 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1899 ]);
1900 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
1901 assert fSimple and sBranch;
1902 aoCases.extend([
1903 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1904 Case('IEMMODE_32BIT',
1905 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
1906 ]);
1907 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
1908 aoCases.extend([
1909 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1910 Case('IEMMODE_32BIT | 32',
1911 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
1912 ]);
1913
1914 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1915 assert not fSimple and not sBranch;
1916 aoCases.extend([
1917 Case('IEMMODE_16BIT | 16', None), # fall thru
1918 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1919 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1920 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1921 ]);
1922 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1923 aoCases.extend([
1924 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1925 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1926 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1927 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1928 ]);
1929 elif ThrdFnVar.ksVariation_16 in dByVari:
1930 assert fSimple and not sBranch;
1931 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
1932 if ThrdFnVar.ksVariation_16f in dByVari:
1933 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
1934 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
1935 assert fSimple and sBranch;
1936 aoCases.append(Case('IEMMODE_16BIT',
1937 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
1938 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
1939 aoCases.append(Case('IEMMODE_16BIT | 32',
1940 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
1941
1942
1943 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
1944 if not fSimple:
1945 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
1946 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
1947 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
1948 if not fSimple:
1949 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
1950 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
1951
1952 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
1953 assert fSimple and sBranch;
1954 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
1955 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
1956 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
1957 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
1958 assert fSimple and sBranch;
1959 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
1960 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
1961 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
1962
1963 #
1964 # If the case bodies are all the same, except for the function called,
1965 # we can reduce the code size and hopefully compile time.
1966 #
1967 iFirstCaseWithBody = 0;
1968 while not aoCases[iFirstCaseWithBody].aoBody:
1969 iFirstCaseWithBody += 1
1970 fAllSameCases = True
1971 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
1972 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
1973 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
1974 if fAllSameCases:
1975 aoStmts = [
1976 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
1977 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1978 iai.McCppGeneric('{'),
1979 ];
1980 for oCase in aoCases:
1981 aoStmts.extend(oCase.toFunctionAssignment());
1982 aoStmts.extend([
1983 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
1984 iai.McCppGeneric('}'),
1985 ]);
1986 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
1987
1988 else:
1989 #
1990 # Generate the generic switch statement.
1991 #
1992 aoStmts = [
1993 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
1994 iai.McCppGeneric('{'),
1995 ];
1996 for oCase in aoCases:
1997 aoStmts.extend(oCase.toCode());
1998 aoStmts.extend([
1999 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2000 iai.McCppGeneric('}'),
2001 ]);
2002
2003 return aoStmts;
2004
2005 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2006 """
2007 Adjusts (& copies) the statements for the input/decoder so it will emit
2008 calls to the right threaded functions for each block.
2009
2010 Returns list/tree of statements (aoStmts is not modified) and updated
2011 fCallEmitted status.
2012 """
2013 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2014 aoDecoderStmts = [];
2015
2016 for iStmt, oStmt in enumerate(aoStmts):
2017 # Copy the statement. Make a deep copy to make sure we've got our own
2018 # copies of all instance variables, even if a bit overkill at the moment.
2019 oNewStmt = copy.deepcopy(oStmt);
2020 aoDecoderStmts.append(oNewStmt);
2021 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2022 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2023 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2024
2025 # If we haven't emitted the threaded function call yet, look for
2026 # statements which it would naturally follow or preceed.
2027 if not fCallEmitted:
2028 if not oStmt.isCppStmt():
2029 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2030 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2031 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2032 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2033 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2034 aoDecoderStmts.pop();
2035 if not fIsConditional:
2036 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2037 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2038 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2039 else:
2040 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2041 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2042 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2043 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2044 aoDecoderStmts.append(oNewStmt);
2045 fCallEmitted = True;
2046
2047 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2048 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2049 if not sBranchAnnotation:
2050 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2051 assert fIsConditional;
2052 aoDecoderStmts.pop();
2053 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2054 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2055 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2056 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2057 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2058 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2059 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2060 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2061 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2062 else:
2063 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2064 aoDecoderStmts.append(oNewStmt);
2065 fCallEmitted = True;
2066
2067 elif ( not fIsConditional
2068 and oStmt.fDecode
2069 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2070 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2071 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2072 fCallEmitted = True;
2073
2074 # Process branches of conditionals recursively.
2075 if isinstance(oStmt, iai.McStmtCond):
2076 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2077 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2078 if oStmt.aoElseBranch:
2079 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2080 fCallEmitted, cDepth + 1,
2081 oStmt.oElseBranchAnnotation);
2082 else:
2083 fCallEmitted2 = False;
2084 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2085
2086 if not fCallEmitted and cDepth == 0:
2087 self.raiseProblem('Unable to insert call to threaded function.');
2088
2089 return (aoDecoderStmts, fCallEmitted);
2090
2091
2092 def generateInputCode(self):
2093 """
2094 Modifies the input code.
2095 """
2096 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2097
2098 if len(self.oMcBlock.aoStmts) == 1:
2099 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2100 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2101 if self.dsCImplFlags:
2102 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2103 else:
2104 sCode += '0;\n';
2105 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2106 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2107 sIndent = ' ' * (min(cchIndent, 2) - 2);
2108 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2109 return sCode;
2110
2111 # IEM_MC_BEGIN/END block
2112 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2113 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2114 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2115 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2116 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2117
2118# Short alias for ThreadedFunctionVariation.
2119ThrdFnVar = ThreadedFunctionVariation;
2120
2121
2122class IEMThreadedGenerator(object):
2123 """
2124 The threaded code generator & annotator.
2125 """
2126
2127 def __init__(self):
2128 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2129 self.oOptions = None # type: argparse.Namespace
2130 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2131 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2132
2133 #
2134 # Processing.
2135 #
2136
2137 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2138 """
2139 Process the input files.
2140 """
2141
2142 # Parse the files.
2143 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2144
2145 # Create threaded functions for the MC blocks.
2146 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2147
2148 # Analyze the threaded functions.
2149 dRawParamCounts = {};
2150 dMinParamCounts = {};
2151 for oThreadedFunction in self.aoThreadedFuncs:
2152 oThreadedFunction.analyze();
2153 for oVariation in oThreadedFunction.aoVariations:
2154 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2155 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2156 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2157 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2158 print('debug: %s params: %4s raw, %4s min'
2159 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2160 file = sys.stderr);
2161
2162 # Populate aidxFirstFunctions. This is ASSUMING that
2163 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2164 iThreadedFunction = 0;
2165 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2166 self.aidxFirstFunctions = [];
2167 for oParser in self.aoParsers:
2168 self.aidxFirstFunctions.append(iThreadedFunction);
2169
2170 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2171 iThreadedFunction += 1;
2172 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2173
2174 # Analyze the threaded functions and their variations for native recompilation.
2175 if fNativeRecompilerEnabled:
2176 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2177
2178 # Gather arguments + variable statistics for the MC blocks.
2179 cMaxArgs = 0;
2180 cMaxVars = 0;
2181 cMaxVarsAndArgs = 0;
2182 cbMaxArgs = 0;
2183 cbMaxVars = 0;
2184 cbMaxVarsAndArgs = 0;
2185 for oThreadedFunction in self.aoThreadedFuncs:
2186 if oThreadedFunction.oMcBlock.cLocals >= 0:
2187 # Counts.
2188 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2189 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2190 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2191 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2192 if cMaxVarsAndArgs > 9:
2193 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2194 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2195 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2196 # Calc stack allocation size:
2197 cbArgs = 0;
2198 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2199 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2200 cbVars = 0;
2201 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2202 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2203 cbMaxVars = max(cbMaxVars, cbVars);
2204 cbMaxArgs = max(cbMaxArgs, cbArgs);
2205 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2206 if cbMaxVarsAndArgs >= 0xc0:
2207 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2208 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2209
2210 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2211 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2212
2213 return True;
2214
2215 #
2216 # Output
2217 #
2218
2219 def generateLicenseHeader(self):
2220 """
2221 Returns the lines for a license header.
2222 """
2223 return [
2224 '/*',
2225 ' * Autogenerated by $Id: IEMAllThrdPython.py 103181 2024-02-03 02:13:06Z vboxsync $ ',
2226 ' * Do not edit!',
2227 ' */',
2228 '',
2229 '/*',
2230 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2231 ' *',
2232 ' * This file is part of VirtualBox base platform packages, as',
2233 ' * available from https://www.alldomusa.eu.org.',
2234 ' *',
2235 ' * This program is free software; you can redistribute it and/or',
2236 ' * modify it under the terms of the GNU General Public License',
2237 ' * as published by the Free Software Foundation, in version 3 of the',
2238 ' * License.',
2239 ' *',
2240 ' * This program is distributed in the hope that it will be useful, but',
2241 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2242 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2243 ' * General Public License for more details.',
2244 ' *',
2245 ' * You should have received a copy of the GNU General Public License',
2246 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2247 ' *',
2248 ' * The contents of this file may alternatively be used under the terms',
2249 ' * of the Common Development and Distribution License Version 1.0',
2250 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2251 ' * in the VirtualBox distribution, in which case the provisions of the',
2252 ' * CDDL are applicable instead of those of the GPL.',
2253 ' *',
2254 ' * You may elect to license modified versions of this file under the',
2255 ' * terms and conditions of either the GPL or the CDDL or both.',
2256 ' *',
2257 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2258 ' */',
2259 '',
2260 '',
2261 '',
2262 ];
2263
2264 ## List of built-in threaded functions with user argument counts and
2265 ## whether it has a native recompiler implementation.
2266 katBltIns = (
2267 ( 'Nop', 0, True ),
2268 ( 'LogCpuState', 0, True ),
2269
2270 ( 'DeferToCImpl0', 2, True ),
2271 ( 'CheckIrq', 0, True ),
2272 ( 'CheckMode', 1, True ),
2273 ( 'CheckHwInstrBps', 0, False ),
2274 ( 'CheckCsLim', 1, True ),
2275
2276 ( 'CheckCsLimAndOpcodes', 3, True ),
2277 ( 'CheckOpcodes', 3, True ),
2278 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2279
2280 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2281 ( 'CheckPcAndOpcodes', 3, True ),
2282 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2283
2284 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2285 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2286 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2287
2288 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2289 ( 'CheckOpcodesLoadingTlb', 3, True ),
2290 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2291
2292 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2293 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2294 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2295
2296 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2297 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2298 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2299 );
2300
2301 def generateThreadedFunctionsHeader(self, oOut):
2302 """
2303 Generates the threaded functions header file.
2304 Returns success indicator.
2305 """
2306
2307 asLines = self.generateLicenseHeader();
2308
2309 # Generate the threaded function table indexes.
2310 asLines += [
2311 'typedef enum IEMTHREADEDFUNCS',
2312 '{',
2313 ' kIemThreadedFunc_Invalid = 0,',
2314 '',
2315 ' /*',
2316 ' * Predefined',
2317 ' */',
2318 ];
2319 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2320
2321 iThreadedFunction = 1 + len(self.katBltIns);
2322 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2323 asLines += [
2324 '',
2325 ' /*',
2326 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2327 ' */',
2328 ];
2329 for oThreadedFunction in self.aoThreadedFuncs:
2330 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2331 if oVariation:
2332 iThreadedFunction += 1;
2333 oVariation.iEnumValue = iThreadedFunction;
2334 asLines.append(' ' + oVariation.getIndexName() + ',');
2335 asLines += [
2336 ' kIemThreadedFunc_End',
2337 '} IEMTHREADEDFUNCS;',
2338 '',
2339 ];
2340
2341 # Prototype the function table.
2342 asLines += [
2343 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2344 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2345 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2346 '#endif',
2347 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2348 ];
2349
2350 oOut.write('\n'.join(asLines));
2351 return True;
2352
2353 ksBitsToIntMask = {
2354 1: "UINT64_C(0x1)",
2355 2: "UINT64_C(0x3)",
2356 4: "UINT64_C(0xf)",
2357 8: "UINT64_C(0xff)",
2358 16: "UINT64_C(0xffff)",
2359 32: "UINT64_C(0xffffffff)",
2360 };
2361
2362 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2363 """
2364 Outputs code for unpacking parameters.
2365 This is shared by the threaded and native code generators.
2366 """
2367 aasVars = [];
2368 for aoRefs in oVariation.dParamRefs.values():
2369 oRef = aoRefs[0];
2370 if oRef.sType[0] != 'P':
2371 cBits = g_kdTypeInfo[oRef.sType][0];
2372 sType = g_kdTypeInfo[oRef.sType][2];
2373 else:
2374 cBits = 64;
2375 sType = oRef.sType;
2376
2377 sTypeDecl = sType + ' const';
2378
2379 if cBits == 64:
2380 assert oRef.offNewParam == 0;
2381 if sType == 'uint64_t':
2382 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2383 else:
2384 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2385 elif oRef.offNewParam == 0:
2386 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2387 else:
2388 sUnpack = '(%s)((%s >> %s) & %s);' \
2389 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2390
2391 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2392
2393 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2394 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2395 acchVars = [0, 0, 0, 0, 0];
2396 for asVar in aasVars:
2397 for iCol, sStr in enumerate(asVar):
2398 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2399 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2400 for asVar in sorted(aasVars):
2401 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2402 return True;
2403
2404 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2405 def generateThreadedFunctionsSource(self, oOut):
2406 """
2407 Generates the threaded functions source file.
2408 Returns success indicator.
2409 """
2410
2411 asLines = self.generateLicenseHeader();
2412 oOut.write('\n'.join(asLines));
2413
2414 #
2415 # Emit the function definitions.
2416 #
2417 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2418 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2419 oOut.write( '\n'
2420 + '\n'
2421 + '\n'
2422 + '\n'
2423 + '/*' + '*' * 128 + '\n'
2424 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2425 + '*' * 128 + '*/\n');
2426
2427 for oThreadedFunction in self.aoThreadedFuncs:
2428 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2429 if oVariation:
2430 oMcBlock = oThreadedFunction.oMcBlock;
2431
2432 # Function header
2433 oOut.write( '\n'
2434 + '\n'
2435 + '/**\n'
2436 + ' * #%u: %s at line %s offset %s in %s%s\n'
2437 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2438 os.path.split(oMcBlock.sSrcFile)[1],
2439 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2440 + ' */\n'
2441 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2442 + '{\n');
2443
2444 # Unpack parameters.
2445 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2446
2447 # RT_NOREF for unused parameters.
2448 if oVariation.cMinParams < g_kcThreadedParams:
2449 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2450
2451 # Now for the actual statements.
2452 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2453
2454 oOut.write('}\n');
2455
2456
2457 #
2458 # Generate the output tables in parallel.
2459 #
2460 asFuncTable = [
2461 '/**',
2462 ' * Function pointer table.',
2463 ' */',
2464 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2465 '{',
2466 ' /*Invalid*/ NULL,',
2467 ];
2468 asNameTable = [
2469 '/**',
2470 ' * Function name table.',
2471 ' */',
2472 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2473 '{',
2474 ' "Invalid",',
2475 ];
2476 asArgCntTab = [
2477 '/**',
2478 ' * Argument count table.',
2479 ' */',
2480 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2481 '{',
2482 ' 0, /*Invalid*/',
2483 ];
2484 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2485
2486 for asTable in aasTables:
2487 asTable.extend((
2488 '',
2489 ' /*',
2490 ' * Predefined.',
2491 ' */',
2492 ));
2493 for sFuncNm, cArgs, _ in self.katBltIns:
2494 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2495 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2496 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2497
2498 iThreadedFunction = 1 + len(self.katBltIns);
2499 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2500 for asTable in aasTables:
2501 asTable.extend((
2502 '',
2503 ' /*',
2504 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2505 ' */',
2506 ));
2507 for oThreadedFunction in self.aoThreadedFuncs:
2508 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2509 if oVariation:
2510 iThreadedFunction += 1;
2511 assert oVariation.iEnumValue == iThreadedFunction;
2512 sName = oVariation.getThreadedFunctionName();
2513 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2514 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2515 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2516
2517 for asTable in aasTables:
2518 asTable.append('};');
2519
2520 #
2521 # Output the tables.
2522 #
2523 oOut.write( '\n'
2524 + '\n');
2525 oOut.write('\n'.join(asFuncTable));
2526 oOut.write( '\n'
2527 + '\n'
2528 + '\n'
2529 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2530 oOut.write('\n'.join(asNameTable));
2531 oOut.write( '\n'
2532 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2533 + '\n'
2534 + '\n');
2535 oOut.write('\n'.join(asArgCntTab));
2536 oOut.write('\n');
2537
2538 return True;
2539
2540 def generateNativeFunctionsHeader(self, oOut):
2541 """
2542 Generates the native recompiler functions header file.
2543 Returns success indicator.
2544 """
2545 if not self.oOptions.fNativeRecompilerEnabled:
2546 return True;
2547
2548 asLines = self.generateLicenseHeader();
2549
2550 # Prototype the function table.
2551 asLines += [
2552 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2553 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2554 '',
2555 ];
2556
2557 # Emit indicators as to which of the builtin functions have a native
2558 # recompiler function and which not. (We only really need this for
2559 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2560 for atBltIn in self.katBltIns:
2561 if atBltIn[1]:
2562 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2563 else:
2564 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2565
2566 # Emit prototypes for the builtin functions we use in tables.
2567 asLines += [
2568 '',
2569 '/* Prototypes for built-in functions used in the above tables. */',
2570 ];
2571 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2572 if fHaveRecompFunc:
2573 asLines += [
2574 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
2575 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
2576 ];
2577
2578 oOut.write('\n'.join(asLines));
2579 return True;
2580
2581 def generateNativeFunctionsSource(self, oOut):
2582 """
2583 Generates the native recompiler functions source file.
2584 Returns success indicator.
2585 """
2586 if not self.oOptions.fNativeRecompilerEnabled:
2587 return True;
2588
2589 #
2590 # The file header.
2591 #
2592 oOut.write('\n'.join(self.generateLicenseHeader()));
2593
2594 #
2595 # Emit the functions.
2596 #
2597 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2598 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2599 oOut.write( '\n'
2600 + '\n'
2601 + '\n'
2602 + '\n'
2603 + '/*' + '*' * 128 + '\n'
2604 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2605 + '*' * 128 + '*/\n');
2606
2607 for oThreadedFunction in self.aoThreadedFuncs:
2608 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2609 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2610 oMcBlock = oThreadedFunction.oMcBlock;
2611
2612 # Function header
2613 oOut.write( '\n'
2614 + '\n'
2615 + '/**\n'
2616 + ' * #%u: %s at line %s offset %s in %s%s\n'
2617 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2618 os.path.split(oMcBlock.sSrcFile)[1],
2619 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2620 + ' */\n'
2621 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2622 + '{\n');
2623
2624 # Unpack parameters.
2625 self.generateFunctionParameterUnpacking(oVariation, oOut,
2626 ('pCallEntry->auParams[0]',
2627 'pCallEntry->auParams[1]',
2628 'pCallEntry->auParams[2]',));
2629
2630 # Now for the actual statements.
2631 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2632
2633 oOut.write('}\n');
2634
2635 #
2636 # Output the function table.
2637 #
2638 oOut.write( '\n'
2639 + '\n'
2640 + '/*\n'
2641 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2642 + ' */\n'
2643 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2644 + '{\n'
2645 + ' /*Invalid*/ NULL,'
2646 + '\n'
2647 + ' /*\n'
2648 + ' * Predefined.\n'
2649 + ' */\n'
2650 );
2651 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2652 if fHaveRecompFunc:
2653 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2654 else:
2655 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2656
2657 iThreadedFunction = 1 + len(self.katBltIns);
2658 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2659 oOut.write( ' /*\n'
2660 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2661 + ' */\n');
2662 for oThreadedFunction in self.aoThreadedFuncs:
2663 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2664 if oVariation:
2665 iThreadedFunction += 1;
2666 assert oVariation.iEnumValue == iThreadedFunction;
2667 sName = oVariation.getNativeFunctionName();
2668 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2669 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2670 else:
2671 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2672
2673 oOut.write( '};\n'
2674 + '\n');
2675 return True;
2676
2677 def generateNativeLivenessSource(self, oOut):
2678 """
2679 Generates the native recompiler liveness analysis functions source file.
2680 Returns success indicator.
2681 """
2682 if not self.oOptions.fNativeRecompilerEnabled:
2683 return True;
2684
2685 #
2686 # The file header.
2687 #
2688 oOut.write('\n'.join(self.generateLicenseHeader()));
2689
2690 #
2691 # Emit the functions.
2692 #
2693 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2694 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2695 oOut.write( '\n'
2696 + '\n'
2697 + '\n'
2698 + '\n'
2699 + '/*' + '*' * 128 + '\n'
2700 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2701 + '*' * 128 + '*/\n');
2702
2703 for oThreadedFunction in self.aoThreadedFuncs:
2704 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2705 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2706 oMcBlock = oThreadedFunction.oMcBlock;
2707
2708 # Function header
2709 oOut.write( '\n'
2710 + '\n'
2711 + '/**\n'
2712 + ' * #%u: %s at line %s offset %s in %s%s\n'
2713 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2714 os.path.split(oMcBlock.sSrcFile)[1],
2715 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2716 + ' */\n'
2717 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
2718 + '{\n');
2719
2720 # Unpack parameters.
2721 self.generateFunctionParameterUnpacking(oVariation, oOut,
2722 ('pCallEntry->auParams[0]',
2723 'pCallEntry->auParams[1]',
2724 'pCallEntry->auParams[2]',));
2725 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
2726 for aoRefs in oVariation.dParamRefs.values():
2727 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
2728 oOut.write(' %s\n' % (' '.join(asNoRefs),));
2729
2730 # Now for the actual statements.
2731 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2732
2733 oOut.write('}\n');
2734
2735 #
2736 # Output the function table.
2737 #
2738 oOut.write( '\n'
2739 + '\n'
2740 + '/*\n'
2741 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2742 + ' */\n'
2743 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
2744 + '{\n'
2745 + ' /*Invalid*/ NULL,'
2746 + '\n'
2747 + ' /*\n'
2748 + ' * Predefined.\n'
2749 + ' */\n'
2750 );
2751 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2752 if fHaveRecompFunc:
2753 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
2754 else:
2755 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2756
2757 iThreadedFunction = 1 + len(self.katBltIns);
2758 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2759 oOut.write( ' /*\n'
2760 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2761 + ' */\n');
2762 for oThreadedFunction in self.aoThreadedFuncs:
2763 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2764 if oVariation:
2765 iThreadedFunction += 1;
2766 assert oVariation.iEnumValue == iThreadedFunction;
2767 sName = oVariation.getLivenessFunctionName();
2768 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2769 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2770 else:
2771 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2772
2773 oOut.write( '};\n'
2774 + '\n');
2775 return True;
2776
2777
2778 def getThreadedFunctionByIndex(self, idx):
2779 """
2780 Returns a ThreadedFunction object for the given index. If the index is
2781 out of bounds, a dummy is returned.
2782 """
2783 if idx < len(self.aoThreadedFuncs):
2784 return self.aoThreadedFuncs[idx];
2785 return ThreadedFunction.dummyInstance();
2786
2787 def generateModifiedInput(self, oOut, idxFile):
2788 """
2789 Generates the combined modified input source/header file.
2790 Returns success indicator.
2791 """
2792 #
2793 # File header and assert assumptions.
2794 #
2795 oOut.write('\n'.join(self.generateLicenseHeader()));
2796 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2797
2798 #
2799 # Iterate all parsers (input files) and output the ones related to the
2800 # file set given by idxFile.
2801 #
2802 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2803 # Is this included in the file set?
2804 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2805 fInclude = -1;
2806 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2807 if sSrcBaseFile == aoInfo[0].lower():
2808 fInclude = aoInfo[2] in (-1, idxFile);
2809 break;
2810 if fInclude is not True:
2811 assert fInclude is False;
2812 continue;
2813
2814 # Output it.
2815 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2816
2817 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2818 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2819 iLine = 0;
2820 while iLine < len(oParser.asLines):
2821 sLine = oParser.asLines[iLine];
2822 iLine += 1; # iBeginLine and iEndLine are 1-based.
2823
2824 # Can we pass it thru?
2825 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2826 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2827 oOut.write(sLine);
2828 #
2829 # Single MC block. Just extract it and insert the replacement.
2830 #
2831 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2832 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2833 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2834 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2835 sModified = oThreadedFunction.generateInputCode().strip();
2836 oOut.write(sModified);
2837
2838 iLine = oThreadedFunction.oMcBlock.iEndLine;
2839 sLine = oParser.asLines[iLine - 1];
2840 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2841 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2842 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2843 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2844
2845 # Advance
2846 iThreadedFunction += 1;
2847 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2848 #
2849 # Macro expansion line that have sublines and may contain multiple MC blocks.
2850 #
2851 else:
2852 offLine = 0;
2853 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2854 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2855
2856 sModified = oThreadedFunction.generateInputCode().strip();
2857 assert ( sModified.startswith('IEM_MC_BEGIN')
2858 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2859 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2860 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2861 ), 'sModified="%s"' % (sModified,);
2862 oOut.write(sModified);
2863
2864 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2865
2866 # Advance
2867 iThreadedFunction += 1;
2868 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2869
2870 # Last line segment.
2871 if offLine < len(sLine):
2872 oOut.write(sLine[offLine : ]);
2873
2874 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2875
2876 return True;
2877
2878 def generateModifiedInput1(self, oOut):
2879 """
2880 Generates the combined modified input source/header file, part 1.
2881 Returns success indicator.
2882 """
2883 return self.generateModifiedInput(oOut, 1);
2884
2885 def generateModifiedInput2(self, oOut):
2886 """
2887 Generates the combined modified input source/header file, part 2.
2888 Returns success indicator.
2889 """
2890 return self.generateModifiedInput(oOut, 2);
2891
2892 def generateModifiedInput3(self, oOut):
2893 """
2894 Generates the combined modified input source/header file, part 3.
2895 Returns success indicator.
2896 """
2897 return self.generateModifiedInput(oOut, 3);
2898
2899 def generateModifiedInput4(self, oOut):
2900 """
2901 Generates the combined modified input source/header file, part 4.
2902 Returns success indicator.
2903 """
2904 return self.generateModifiedInput(oOut, 4);
2905
2906
2907 #
2908 # Main
2909 #
2910
2911 def main(self, asArgs):
2912 """
2913 C-like main function.
2914 Returns exit code.
2915 """
2916
2917 #
2918 # Parse arguments
2919 #
2920 sScriptDir = os.path.dirname(__file__);
2921 oParser = argparse.ArgumentParser(add_help = False);
2922 oParser.add_argument('asInFiles',
2923 metavar = 'input.cpp.h',
2924 nargs = '*',
2925 default = [os.path.join(sScriptDir, aoInfo[0])
2926 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
2927 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
2928 oParser.add_argument('--host-arch',
2929 metavar = 'arch',
2930 dest = 'sHostArch',
2931 action = 'store',
2932 default = None,
2933 help = 'The host architecture.');
2934
2935 oParser.add_argument('--out-thrd-funcs-hdr',
2936 metavar = 'file-thrd-funcs.h',
2937 dest = 'sOutFileThrdFuncsHdr',
2938 action = 'store',
2939 default = '-',
2940 help = 'The output header file for the threaded functions.');
2941 oParser.add_argument('--out-thrd-funcs-cpp',
2942 metavar = 'file-thrd-funcs.cpp',
2943 dest = 'sOutFileThrdFuncsCpp',
2944 action = 'store',
2945 default = '-',
2946 help = 'The output C++ file for the threaded functions.');
2947 oParser.add_argument('--out-n8ve-funcs-hdr',
2948 metavar = 'file-n8tv-funcs.h',
2949 dest = 'sOutFileN8veFuncsHdr',
2950 action = 'store',
2951 default = '-',
2952 help = 'The output header file for the native recompiler functions.');
2953 oParser.add_argument('--out-n8ve-funcs-cpp',
2954 metavar = 'file-n8tv-funcs.cpp',
2955 dest = 'sOutFileN8veFuncsCpp',
2956 action = 'store',
2957 default = '-',
2958 help = 'The output C++ file for the native recompiler functions.');
2959 oParser.add_argument('--out-n8ve-liveness-cpp',
2960 metavar = 'file-n8tv-liveness.cpp',
2961 dest = 'sOutFileN8veLivenessCpp',
2962 action = 'store',
2963 default = '-',
2964 help = 'The output C++ file for the native recompiler liveness analysis functions.');
2965 oParser.add_argument('--native',
2966 dest = 'fNativeRecompilerEnabled',
2967 action = 'store_true',
2968 default = False,
2969 help = 'Enables generating the files related to native recompilation.');
2970 oParser.add_argument('--out-mod-input1',
2971 metavar = 'file-instr.cpp.h',
2972 dest = 'sOutFileModInput1',
2973 action = 'store',
2974 default = '-',
2975 help = 'The output C++/header file for modified input instruction files part 1.');
2976 oParser.add_argument('--out-mod-input2',
2977 metavar = 'file-instr.cpp.h',
2978 dest = 'sOutFileModInput2',
2979 action = 'store',
2980 default = '-',
2981 help = 'The output C++/header file for modified input instruction files part 2.');
2982 oParser.add_argument('--out-mod-input3',
2983 metavar = 'file-instr.cpp.h',
2984 dest = 'sOutFileModInput3',
2985 action = 'store',
2986 default = '-',
2987 help = 'The output C++/header file for modified input instruction files part 3.');
2988 oParser.add_argument('--out-mod-input4',
2989 metavar = 'file-instr.cpp.h',
2990 dest = 'sOutFileModInput4',
2991 action = 'store',
2992 default = '-',
2993 help = 'The output C++/header file for modified input instruction files part 4.');
2994 oParser.add_argument('--help', '-h', '-?',
2995 action = 'help',
2996 help = 'Display help and exit.');
2997 oParser.add_argument('--version', '-V',
2998 action = 'version',
2999 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3000 % (__version__.split()[1], iai.__version__.split()[1],),
3001 help = 'Displays the version/revision of the script and exit.');
3002 self.oOptions = oParser.parse_args(asArgs[1:]);
3003 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3004
3005 #
3006 # Process the instructions specified in the IEM sources.
3007 #
3008 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3009 #
3010 # Generate the output files.
3011 #
3012 aaoOutputFiles = (
3013 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3014 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3015 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3016 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3017 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3018 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3019 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3020 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3021 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3022 );
3023 fRc = True;
3024 for sOutFile, fnGenMethod in aaoOutputFiles:
3025 if sOutFile == '-':
3026 fRc = fnGenMethod(sys.stdout) and fRc;
3027 else:
3028 try:
3029 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3030 except Exception as oXcpt:
3031 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3032 return 1;
3033 fRc = fnGenMethod(oOut) and fRc;
3034 oOut.close();
3035 if fRc:
3036 return 0;
3037
3038 return 1;
3039
3040
3041if __name__ == '__main__':
3042 sys.exit(IEMThreadedGenerator().main(sys.argv));
3043
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette