VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103233

最後變更 在這個檔案從103233是 103233,由 vboxsync 提交於 14 月 前

VMM/IEM: Liveness analysis, part 8: Propagating EFLAGS annotations to the liveness code, asserting that flag modifications are within the annotations, gather some statistics on potential EFLAGS updating gains. bugref:10372 bugref:10375

  • 屬性 svn:eol-style 設為 LF
  • 屬性 svn:executable 設為 *
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 160.1 KB
 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103233 2024-02-07 00:09:53Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.alldomusa.eu.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103233 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 if self.oParent.oMcBlock.iInFunction == 0:
625 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
626 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
627
628 def getThreadedFunctionName(self):
629 sName = self.oParent.oMcBlock.sFunction;
630 if sName.startswith('iemOp_'):
631 sName = sName[len('iemOp_'):];
632 if self.oParent.oMcBlock.iInFunction == 0:
633 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
634 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getLivenessFunctionName(self):
640 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
641
642 def getShortName(self):
643 sName = self.oParent.oMcBlock.sFunction;
644 if sName.startswith('iemOp_'):
645 sName = sName[len('iemOp_'):];
646 if self.oParent.oMcBlock.iInFunction == 0:
647 return '%s%s' % ( sName, self.sVariation, );
648 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
649
650 def isWithFlagsCheckingAndClearingVariation(self):
651 """
652 Checks if this is a variation that checks and clears EFLAGS.
653 """
654 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
655
656 #
657 # Analysis and code morphing.
658 #
659
660 def raiseProblem(self, sMessage):
661 """ Raises a problem. """
662 self.oParent.raiseProblem(sMessage);
663
664 def warning(self, sMessage):
665 """ Emits a warning. """
666 self.oParent.warning(sMessage);
667
668 def analyzeReferenceToType(self, sRef):
669 """
670 Translates a variable or structure reference to a type.
671 Returns type name.
672 Raises exception if unable to figure it out.
673 """
674 ch0 = sRef[0];
675 if ch0 == 'u':
676 if sRef.startswith('u32'):
677 return 'uint32_t';
678 if sRef.startswith('u8') or sRef == 'uReg':
679 return 'uint8_t';
680 if sRef.startswith('u64'):
681 return 'uint64_t';
682 if sRef.startswith('u16'):
683 return 'uint16_t';
684 elif ch0 == 'b':
685 return 'uint8_t';
686 elif ch0 == 'f':
687 return 'bool';
688 elif ch0 == 'i':
689 if sRef.startswith('i8'):
690 return 'int8_t';
691 if sRef.startswith('i16'):
692 return 'int16_t';
693 if sRef.startswith('i32'):
694 return 'int32_t';
695 if sRef.startswith('i64'):
696 return 'int64_t';
697 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
698 return 'uint8_t';
699 elif ch0 == 'p':
700 if sRef.find('-') < 0:
701 return 'uintptr_t';
702 if sRef.startswith('pVCpu->iem.s.'):
703 sField = sRef[len('pVCpu->iem.s.') : ];
704 if sField in g_kdIemFieldToType:
705 if g_kdIemFieldToType[sField][0]:
706 return g_kdIemFieldToType[sField][0];
707 elif ch0 == 'G' and sRef.startswith('GCPtr'):
708 return 'uint64_t';
709 elif ch0 == 'e':
710 if sRef == 'enmEffOpSize':
711 return 'IEMMODE';
712 elif ch0 == 'o':
713 if sRef.startswith('off32'):
714 return 'uint32_t';
715 elif sRef == 'cbFrame': # enter
716 return 'uint16_t';
717 elif sRef == 'cShift': ## @todo risky
718 return 'uint8_t';
719
720 self.raiseProblem('Unknown reference: %s' % (sRef,));
721 return None; # Shut up pylint 2.16.2.
722
723 def analyzeCallToType(self, sFnRef):
724 """
725 Determins the type of an indirect function call.
726 """
727 assert sFnRef[0] == 'p';
728
729 #
730 # Simple?
731 #
732 if sFnRef.find('-') < 0:
733 oDecoderFunction = self.oParent.oMcBlock.oFunction;
734
735 # Try the argument list of the function defintion macro invocation first.
736 iArg = 2;
737 while iArg < len(oDecoderFunction.asDefArgs):
738 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
739 return oDecoderFunction.asDefArgs[iArg - 1];
740 iArg += 1;
741
742 # Then check out line that includes the word and looks like a variable declaration.
743 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
744 for sLine in oDecoderFunction.asLines:
745 oMatch = oRe.match(sLine);
746 if oMatch:
747 if not oMatch.group(1).startswith('const'):
748 return oMatch.group(1);
749 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
750
751 #
752 # Deal with the pImpl->pfnXxx:
753 #
754 elif sFnRef.startswith('pImpl->pfn'):
755 sMember = sFnRef[len('pImpl->') : ];
756 sBaseType = self.analyzeCallToType('pImpl');
757 offBits = sMember.rfind('U') + 1;
758 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
759 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
760 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
761 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
762 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
763 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
764 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
765 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
766 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
767 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
768
769 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
770
771 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
772 return None; # Shut up pylint 2.16.2.
773
774 def analyze8BitGRegStmt(self, oStmt):
775 """
776 Gets the 8-bit general purpose register access details of the given statement.
777 ASSUMES the statement is one accessing an 8-bit GREG.
778 """
779 idxReg = 0;
780 if ( oStmt.sName.find('_FETCH_') > 0
781 or oStmt.sName.find('_REF_') > 0
782 or oStmt.sName.find('_TO_LOCAL') > 0):
783 idxReg = 1;
784
785 sRegRef = oStmt.asParams[idxReg];
786 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
787 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
788 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
789 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
790 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
791 else:
792 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
793
794 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
795 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
796 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
797 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
798 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
799 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
800 else:
801 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
802 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
803 sStdRef = 'bOther8Ex';
804
805 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
806 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
807 return (idxReg, sOrgExpr, sStdRef);
808
809
810 ## Maps memory related MCs to info for FLAT conversion.
811 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
812 ## segmentation checking for every memory access. Only applied to access
813 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
814 ## the latter (CS) is just to keep things simple (we could safely fetch via
815 ## it, but only in 64-bit mode could we safely write via it, IIRC).
816 kdMemMcToFlatInfo = {
817 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
818 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
819 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
820 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
821 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
822 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
823 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
824 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
825 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
826 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
827 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
828 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
829 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
830 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
831 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
832 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
833 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
834 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
835 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
836 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
837 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
838 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
839 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
840 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
841 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
842 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
843 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
844 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
845 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
846 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
847 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
848 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
849 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
850 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
851 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
852 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
853 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
854 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
855 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
856 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
857 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
858 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
859 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
860 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
861 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
862 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
863 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
864 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
865 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
866 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
867 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
868 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
869 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
870 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
871 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
872 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
873 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
874 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
875 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
876 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
877 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
878 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
879 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
880 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
881 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
882 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
883 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
884 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
885 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
886 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
887 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
888 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
889 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
890 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
891 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
892 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
893 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
894 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
895 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
896 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
897 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
898 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
899 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
900 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
901 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
902 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
903 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
904 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
905 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
906 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
907 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
908 };
909
910 kdMemMcToFlatInfoStack = {
911 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
912 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
913 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
914 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
915 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
916 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
917 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
918 };
919
920 kdThreadedCalcRmEffAddrMcByVariation = {
921 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
922 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
923 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
924 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
925 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
926 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
927 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
928 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
929 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
930 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
931 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
932 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
933 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
934 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
935 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
936 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
937 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
938 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
939 };
940
941 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0):
942 """
943 Transforms (copy) the statements into those for the threaded function.
944
945 Returns list/tree of statements (aoStmts is not modified) and the new
946 iParamRef value.
947 """
948 #
949 # We'll be traversing aoParamRefs in parallel to the statements, so we
950 # must match the traversal in analyzeFindThreadedParamRefs exactly.
951 #
952 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
953 aoThreadedStmts = [];
954 for oStmt in aoStmts:
955 # Skip C++ statements that is purely related to decoding.
956 if not oStmt.isCppStmt() or not oStmt.fDecode:
957 # Copy the statement. Make a deep copy to make sure we've got our own
958 # copies of all instance variables, even if a bit overkill at the moment.
959 oNewStmt = copy.deepcopy(oStmt);
960 aoThreadedStmts.append(oNewStmt);
961 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
962
963 # If the statement has parameter references, process the relevant parameters.
964 # We grab the references relevant to this statement and apply them in reserve order.
965 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
966 iParamRefFirst = iParamRef;
967 while True:
968 iParamRef += 1;
969 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
970 break;
971
972 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
973 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
974 oCurRef = self.aoParamRefs[iCurRef];
975 if oCurRef.iParam is not None:
976 assert oCurRef.oStmt == oStmt;
977 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
978 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
979 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
980 or oCurRef.fCustomRef), \
981 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
982 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
983 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
984 + oCurRef.sNewName \
985 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
986
987 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
988 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
989 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
990 assert len(oNewStmt.asParams) == 3;
991
992 if self.sVariation in self.kdVariationsWithFlatAddr16:
993 oNewStmt.asParams = [
994 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
995 ];
996 else:
997 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
998 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
999 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1000
1001 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1002 oNewStmt.asParams = [
1003 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1004 ];
1005 else:
1006 oNewStmt.asParams = [
1007 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1008 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1009 ];
1010 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1011 elif ( oNewStmt.sName
1012 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1013 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1014 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1015 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1016 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1017 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1018 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1019 and self.sVariation not in self.kdVariationsOnlyPre386):
1020 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1021 oNewStmt.sName += '_THREADED';
1022 if self.sVariation in self.kdVariationsOnly64NoFlags:
1023 oNewStmt.sName += '_PC64';
1024 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1025 oNewStmt.sName += '_PC64_WITH_FLAGS';
1026 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1027 oNewStmt.sName += '_PC16';
1028 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1029 oNewStmt.sName += '_PC16_WITH_FLAGS';
1030 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1031 assert self.sVariation != self.ksVariation_Default;
1032 oNewStmt.sName += '_PC32';
1033 else:
1034 oNewStmt.sName += '_PC32_WITH_FLAGS';
1035
1036 # This is making the wrong branch of conditionals break out of the TB.
1037 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1038 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1039 sExitTbStatus = 'VINF_SUCCESS';
1040 if self.sVariation in self.kdVariationsWithConditional:
1041 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1042 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1043 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1044 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1045 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1046 oNewStmt.asParams.append(sExitTbStatus);
1047
1048 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1049 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1050 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1051 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1052
1053 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1054 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1055 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1056 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1057 oNewStmt.sName += '_THREADED';
1058
1059 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1060 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1061 oNewStmt.sName += '_THREADED';
1062 oNewStmt.idxFn += 1;
1063 oNewStmt.idxParams += 1;
1064 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1065
1066 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1067 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1068 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1069 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1070 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1071 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1072 if idxEffSeg != -1:
1073 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1074 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1075 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1076 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1077 oNewStmt.asParams.pop(idxEffSeg);
1078 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1079
1080 # ... PUSH and POP also needs flat variants, but these differ a little.
1081 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1082 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1083 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1084 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1085 self.kdVariationsWithFlat64StackAddress)];
1086
1087 # Add EFLAGS usage annotations to relevant MCs.
1088 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'):
1089 oInstruction = self.oParent.oMcBlock.oInstruction;
1090 oNewStmt.sName += '_EX';
1091 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1092 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1093
1094 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1095 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1096 dState['IEM_MC_ASSERT_EFLAGS'] = True;
1097
1098 # Process branches of conditionals recursively.
1099 if isinstance(oStmt, iai.McStmtCond):
1100 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState, iParamRef);
1101 if oStmt.aoElseBranch:
1102 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1103 dState, iParamRef);
1104
1105 return (aoThreadedStmts, iParamRef);
1106
1107
1108 def analyzeConsolidateThreadedParamRefs(self):
1109 """
1110 Consolidate threaded function parameter references into a dictionary
1111 with lists of the references to each variable/field.
1112 """
1113 # Gather unique parameters.
1114 self.dParamRefs = {};
1115 for oRef in self.aoParamRefs:
1116 if oRef.sStdRef not in self.dParamRefs:
1117 self.dParamRefs[oRef.sStdRef] = [oRef,];
1118 else:
1119 self.dParamRefs[oRef.sStdRef].append(oRef);
1120
1121 # Generate names for them for use in the threaded function.
1122 dParamNames = {};
1123 for sName, aoRefs in self.dParamRefs.items():
1124 # Morph the reference expression into a name.
1125 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1126 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1127 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1128 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1129 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1130 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1131 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1132 else:
1133 sName += 'P';
1134
1135 # Ensure it's unique.
1136 if sName in dParamNames:
1137 for i in range(10):
1138 if sName + str(i) not in dParamNames:
1139 sName += str(i);
1140 break;
1141 dParamNames[sName] = True;
1142
1143 # Update all the references.
1144 for oRef in aoRefs:
1145 oRef.sNewName = sName;
1146
1147 # Organize them by size too for the purpose of optimize them.
1148 dBySize = {} # type: Dict[str, str]
1149 for sStdRef, aoRefs in self.dParamRefs.items():
1150 if aoRefs[0].sType[0] != 'P':
1151 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1152 assert(cBits <= 64);
1153 else:
1154 cBits = 64;
1155
1156 if cBits not in dBySize:
1157 dBySize[cBits] = [sStdRef,]
1158 else:
1159 dBySize[cBits].append(sStdRef);
1160
1161 # Pack the parameters as best as we can, starting with the largest ones
1162 # and ASSUMING a 64-bit parameter size.
1163 self.cMinParams = 0;
1164 offNewParam = 0;
1165 for cBits in sorted(dBySize.keys(), reverse = True):
1166 for sStdRef in dBySize[cBits]:
1167 if offNewParam == 0 or offNewParam + cBits > 64:
1168 self.cMinParams += 1;
1169 offNewParam = cBits;
1170 else:
1171 offNewParam += cBits;
1172 assert(offNewParam <= 64);
1173
1174 for oRef in self.dParamRefs[sStdRef]:
1175 oRef.iNewParam = self.cMinParams - 1;
1176 oRef.offNewParam = offNewParam - cBits;
1177
1178 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1179 if self.cMinParams >= 4:
1180 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1181 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1182
1183 return True;
1184
1185 ksHexDigits = '0123456789abcdefABCDEF';
1186
1187 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1188 """
1189 Scans the statements for things that have to passed on to the threaded
1190 function (populates self.aoParamRefs).
1191 """
1192 for oStmt in aoStmts:
1193 # Some statements we can skip alltogether.
1194 if isinstance(oStmt, iai.McCppPreProc):
1195 continue;
1196 if oStmt.isCppStmt() and oStmt.fDecode:
1197 continue;
1198 if oStmt.sName in ('IEM_MC_BEGIN',):
1199 continue;
1200
1201 if isinstance(oStmt, iai.McStmtVar):
1202 if oStmt.sValue is None:
1203 continue;
1204 aiSkipParams = { 0: True, 1: True, 3: True };
1205 else:
1206 aiSkipParams = {};
1207
1208 # Several statements have implicit parameters and some have different parameters.
1209 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1210 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1211 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1212 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1213 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1214 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1215
1216 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1217 and self.sVariation not in self.kdVariationsOnlyPre386):
1218 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1219
1220 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1221 # This is being pretty presumptive about bRm always being the RM byte...
1222 assert len(oStmt.asParams) == 3;
1223 assert oStmt.asParams[1] == 'bRm';
1224
1225 if self.sVariation in self.kdVariationsWithFlatAddr16:
1226 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1227 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1228 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1229 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1230 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1231 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1232 'uint8_t', oStmt, sStdRef = 'bSib'));
1233 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1234 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1235 else:
1236 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1237 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1238 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1239 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1240 'uint8_t', oStmt, sStdRef = 'bSib'));
1241 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1242 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1243 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1244 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1245 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1246
1247 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1248 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1249 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1250 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint16_t', oStmt, idxReg, sStdRef = sStdRef));
1251 aiSkipParams[idxReg] = True; # Skip the parameter below.
1252
1253 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1254 if ( self.sVariation in self.kdVariationsWithFlatAddress
1255 and oStmt.sName in self.kdMemMcToFlatInfo
1256 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1257 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1258
1259 # Inspect the target of calls to see if we need to pass down a
1260 # function pointer or function table pointer for it to work.
1261 if isinstance(oStmt, iai.McStmtCall):
1262 if oStmt.sFn[0] == 'p':
1263 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1264 elif ( oStmt.sFn[0] != 'i'
1265 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1266 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1267 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1268 aiSkipParams[oStmt.idxFn] = True;
1269
1270 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1271 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1272 assert oStmt.idxFn == 2;
1273 aiSkipParams[0] = True;
1274
1275
1276 # Check all the parameters for bogus references.
1277 for iParam, sParam in enumerate(oStmt.asParams):
1278 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1279 # The parameter may contain a C expression, so we have to try
1280 # extract the relevant bits, i.e. variables and fields while
1281 # ignoring operators and parentheses.
1282 offParam = 0;
1283 while offParam < len(sParam):
1284 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1285 ch = sParam[offParam];
1286 if ch.isalpha() or ch == '_':
1287 offStart = offParam;
1288 offParam += 1;
1289 while offParam < len(sParam):
1290 ch = sParam[offParam];
1291 if not ch.isalnum() and ch != '_' and ch != '.':
1292 if ch != '-' or sParam[offParam + 1] != '>':
1293 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1294 if ( ch == '('
1295 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1296 offParam += len('(pVM)->') - 1;
1297 else:
1298 break;
1299 offParam += 1;
1300 offParam += 1;
1301 sRef = sParam[offStart : offParam];
1302
1303 # For register references, we pass the full register indexes instead as macros
1304 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1305 # threaded function will be more efficient if we just pass the register index
1306 # as a 4-bit param.
1307 if ( sRef.startswith('IEM_GET_MODRM')
1308 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1309 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1310 if sParam[offParam] != '(':
1311 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1312 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1313 if asMacroParams is None:
1314 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1315 offParam = offCloseParam + 1;
1316 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1317 oStmt, iParam, offStart));
1318
1319 # We can skip known variables.
1320 elif sRef in self.oParent.dVariables:
1321 pass;
1322
1323 # Skip certain macro invocations.
1324 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1325 'IEM_GET_GUEST_CPU_FEATURES',
1326 'IEM_IS_GUEST_CPU_AMD',
1327 'IEM_IS_16BIT_CODE',
1328 'IEM_IS_32BIT_CODE',
1329 'IEM_IS_64BIT_CODE',
1330 ):
1331 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1332 if sParam[offParam] != '(':
1333 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1334 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1335 if asMacroParams is None:
1336 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1337 offParam = offCloseParam + 1;
1338
1339 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1340 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1341 'IEM_IS_16BIT_CODE',
1342 'IEM_IS_32BIT_CODE',
1343 'IEM_IS_64BIT_CODE',
1344 ):
1345 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1346 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1347 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1348 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1349 offParam += 1;
1350
1351 # Skip constants, globals, types (casts), sizeof and macros.
1352 elif ( sRef.startswith('IEM_OP_PRF_')
1353 or sRef.startswith('IEM_ACCESS_')
1354 or sRef.startswith('IEMINT_')
1355 or sRef.startswith('X86_GREG_')
1356 or sRef.startswith('X86_SREG_')
1357 or sRef.startswith('X86_EFL_')
1358 or sRef.startswith('X86_FSW_')
1359 or sRef.startswith('X86_FCW_')
1360 or sRef.startswith('X86_XCPT_')
1361 or sRef.startswith('IEMMODE_')
1362 or sRef.startswith('IEM_F_')
1363 or sRef.startswith('IEM_CIMPL_F_')
1364 or sRef.startswith('g_')
1365 or sRef.startswith('iemAImpl_')
1366 or sRef.startswith('kIemNativeGstReg_')
1367 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1368 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1369 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1370 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1371 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1372 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1373 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1374 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1375 'NIL_RTGCPTR',) ):
1376 pass;
1377
1378 # Skip certain macro invocations.
1379 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1380 elif ( ( '.' not in sRef
1381 and '-' not in sRef
1382 and sRef not in ('pVCpu', ) )
1383 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1384 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1385 oStmt, iParam, offStart));
1386 # Number.
1387 elif ch.isdigit():
1388 if ( ch == '0'
1389 and offParam + 2 <= len(sParam)
1390 and sParam[offParam + 1] in 'xX'
1391 and sParam[offParam + 2] in self.ksHexDigits ):
1392 offParam += 2;
1393 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1394 offParam += 1;
1395 else:
1396 while offParam < len(sParam) and sParam[offParam].isdigit():
1397 offParam += 1;
1398 # Comment?
1399 elif ( ch == '/'
1400 and offParam + 4 <= len(sParam)
1401 and sParam[offParam + 1] == '*'):
1402 offParam += 2;
1403 offNext = sParam.find('*/', offParam);
1404 if offNext < offParam:
1405 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1406 offParam = offNext + 2;
1407 # Whatever else.
1408 else:
1409 offParam += 1;
1410
1411 # Traverse the branches of conditionals.
1412 if isinstance(oStmt, iai.McStmtCond):
1413 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1414 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1415 return True;
1416
1417 def analyzeVariation(self, aoStmts):
1418 """
1419 2nd part of the analysis, done on each variation.
1420
1421 The variations may differ in parameter requirements and will end up with
1422 slightly different MC sequences. Thus this is done on each individually.
1423
1424 Returns dummy True - raises exception on trouble.
1425 """
1426 # Now scan the code for variables and field references that needs to
1427 # be passed to the threaded function because they are related to the
1428 # instruction decoding.
1429 self.analyzeFindThreadedParamRefs(aoStmts);
1430 self.analyzeConsolidateThreadedParamRefs();
1431
1432 # Morph the statement stream for the block into what we'll be using in the threaded function.
1433 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1434 if iParamRef != len(self.aoParamRefs):
1435 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1436
1437 return True;
1438
1439 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1440 """
1441 Produces generic C++ statments that emits a call to the thread function
1442 variation and any subsequent checks that may be necessary after that.
1443
1444 The sCallVarNm is the name of the variable with the threaded function
1445 to call. This is for the case where all the variations have the same
1446 parameters and only the threaded function number differs.
1447 """
1448 aoStmts = [
1449 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1450 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1451 cchIndent = cchIndent), # Scope and a hook for various stuff.
1452 ];
1453
1454 # The call to the threaded function.
1455 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1456 for iParam in range(self.cMinParams):
1457 asFrags = [];
1458 for aoRefs in self.dParamRefs.values():
1459 oRef = aoRefs[0];
1460 if oRef.iNewParam == iParam:
1461 sCast = '(uint64_t)'
1462 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1463 sCast = '(uint64_t)(u' + oRef.sType + ')';
1464 if oRef.offNewParam == 0:
1465 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1466 else:
1467 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1468 assert asFrags;
1469 asCallArgs.append(' | '.join(asFrags));
1470
1471 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1472
1473 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1474 # emit this mode check from the compilation loop. On the
1475 # plus side, this means we eliminate unnecessary call at
1476 # end of the TB. :-)
1477 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1478 ## mask and maybe emit additional checks.
1479 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1480 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1481 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1482 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1483 # cchIndent = cchIndent));
1484
1485 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1486 if not sCImplFlags:
1487 sCImplFlags = '0'
1488 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1489
1490 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1491 # indicates we should do so.
1492 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1493 asEndTbFlags = [];
1494 asTbBranchedFlags = [];
1495 for sFlag in self.oParent.dsCImplFlags:
1496 if self.kdCImplFlags[sFlag] is True:
1497 asEndTbFlags.append(sFlag);
1498 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1499 asTbBranchedFlags.append(sFlag);
1500 if ( asTbBranchedFlags
1501 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1502 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1503 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1504 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1505 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1506 if asEndTbFlags:
1507 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1508 cchIndent = cchIndent));
1509
1510 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1511 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1512
1513 return aoStmts;
1514
1515
1516class ThreadedFunction(object):
1517 """
1518 A threaded function.
1519 """
1520
1521 def __init__(self, oMcBlock: iai.McBlock) -> None:
1522 self.oMcBlock = oMcBlock # type: iai.McBlock
1523 # The remaining fields are only useful after analyze() has been called:
1524 ## Variations for this block. There is at least one.
1525 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1526 ## Variation dictionary containing the same as aoVariations.
1527 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1528 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1529 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1530 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1531 ## and those determined by analyzeCodeOperation().
1532 self.dsCImplFlags = {} # type: Dict[str, bool]
1533
1534 @staticmethod
1535 def dummyInstance():
1536 """ Gets a dummy instance. """
1537 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1538 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1539
1540 def hasWithFlagsCheckingAndClearingVariation(self):
1541 """
1542 Check if there is one or more with flags checking and clearing
1543 variations for this threaded function.
1544 """
1545 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1546 if sVarWithFlags in self.dVariations:
1547 return True;
1548 return False;
1549
1550 #
1551 # Analysis and code morphing.
1552 #
1553
1554 def raiseProblem(self, sMessage):
1555 """ Raises a problem. """
1556 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1557
1558 def error(self, sMessage, oGenerator):
1559 """ Emits an error via the generator object, causing it to fail. """
1560 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1561
1562 def warning(self, sMessage):
1563 """ Emits a warning. """
1564 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1565
1566 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1567 """ Scans the statements for MC variables and call arguments. """
1568 for oStmt in aoStmts:
1569 if isinstance(oStmt, iai.McStmtVar):
1570 if oStmt.sVarName in self.dVariables:
1571 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1572 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1573
1574 # There shouldn't be any variables or arguments declared inside if/
1575 # else blocks, but scan them too to be on the safe side.
1576 if isinstance(oStmt, iai.McStmtCond):
1577 #cBefore = len(self.dVariables);
1578 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1579 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1580 #if len(self.dVariables) != cBefore:
1581 # raise Exception('Variables/arguments defined in conditional branches!');
1582 return True;
1583
1584 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1585 """
1586 Analyzes the code looking clues as to additional side-effects.
1587
1588 Currently this is simply looking for branching and adding the relevant
1589 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1590 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1591
1592 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1593
1594 Returns annotation on return style.
1595 """
1596 sAnnotation = None;
1597 for oStmt in aoStmts:
1598 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1599 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1600 assert not fSeenConditional;
1601 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1602 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1603 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1604 if fSeenConditional:
1605 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1606
1607 # Check for CIMPL and AIMPL calls.
1608 if oStmt.sName.startswith('IEM_MC_CALL_'):
1609 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1610 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1611 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1612 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1613 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1614 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1615 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1616 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1617 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1618 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1619 else:
1620 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1621
1622 # Check for return statements.
1623 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1624 assert sAnnotation is None;
1625 sAnnotation = g_ksFinishAnnotation_Advance;
1626 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1627 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1628 assert sAnnotation is None;
1629 sAnnotation = g_ksFinishAnnotation_RelJmp;
1630 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1631 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1632 assert sAnnotation is None;
1633 sAnnotation = g_ksFinishAnnotation_SetJmp;
1634 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1635 assert sAnnotation is None;
1636 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1637
1638 # Collect MCs working on EFLAGS. Caller will check this.
1639 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1640 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1641 dEflStmts[oStmt.sName] = oStmt;
1642 elif isinstance(oStmt, iai.McStmtCall):
1643 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1644 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1645 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1646 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1647 dEflStmts[oStmt.sName] = oStmt;
1648
1649 # Process branches of conditionals recursively.
1650 if isinstance(oStmt, iai.McStmtCond):
1651 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1652 if oStmt.aoElseBranch:
1653 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1654
1655 return sAnnotation;
1656
1657 def analyze(self, oGenerator):
1658 """
1659 Analyzes the code, identifying the number of parameters it requires and such.
1660
1661 Returns dummy True - raises exception on trouble.
1662 """
1663
1664 # Check the block for errors before we proceed (will decode it).
1665 asErrors = self.oMcBlock.check();
1666 if asErrors:
1667 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1668 for sError in asErrors]));
1669
1670 # Decode the block into a list/tree of McStmt objects.
1671 aoStmts = self.oMcBlock.decode();
1672
1673 # Scan the statements for local variables and call arguments (self.dVariables).
1674 self.analyzeFindVariablesAndCallArgs(aoStmts);
1675
1676 # Scan the code for IEM_CIMPL_F_ and other clues.
1677 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1678 dEflStmts = {};
1679 self.analyzeCodeOperation(aoStmts, dEflStmts);
1680 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1681 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1682 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1683 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1684
1685 # Analyse EFLAGS related MCs and @opflmodify and friends.
1686 if dEflStmts:
1687 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1688 if ( oInstruction is None
1689 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1690 sMcNames = '+'.join(dEflStmts.keys());
1691 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1692 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1693 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1694 if not oInstruction.asFlModify:
1695 if oInstruction.sMnemonic not in [ 'not', ]:
1696 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
1697 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
1698 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
1699 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
1700 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
1701 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
1702 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
1703 if not oInstruction.asFlModify:
1704 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
1705 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
1706 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
1707 if not oInstruction.asFlTest:
1708 if oInstruction.sMnemonic not in [ 'not', ]:
1709 self.error('Expected @opfltest!', oGenerator);
1710 if oInstruction and oInstruction.asFlSet:
1711 for sFlag in oInstruction.asFlSet:
1712 if sFlag not in oInstruction.asFlModify:
1713 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
1714 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
1715 if oInstruction and oInstruction.asFlClear:
1716 for sFlag in oInstruction.asFlClear:
1717 if sFlag not in oInstruction.asFlModify:
1718 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
1719 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
1720
1721 # Create variations as needed.
1722 if iai.McStmt.findStmtByNames(aoStmts,
1723 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1724 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1725 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1726 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1727 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1728
1729 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1730 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1731 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1732 'IEM_MC_FETCH_MEM_U32' : True,
1733 'IEM_MC_FETCH_MEM_U64' : True,
1734 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1735 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1736 'IEM_MC_STORE_MEM_U32' : True,
1737 'IEM_MC_STORE_MEM_U64' : True, }):
1738 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1739 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1740 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1741 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1742 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1743 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1744 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1745 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1746 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1747 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1748 else:
1749 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1750 else:
1751 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1752 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1753 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1754 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1755 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1756 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1757 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1758 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1759 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1760 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1761 else:
1762 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1763
1764 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
1765 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
1766 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
1767 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
1768 asVariationsBase = asVariations;
1769 asVariations = [];
1770 for sVariation in asVariationsBase:
1771 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
1772 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
1773
1774 if not iai.McStmt.findStmtByNames(aoStmts,
1775 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1776 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1777 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1778 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1779 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1780 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1781 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1782 }):
1783 asVariations = [sVariation for sVariation in asVariations
1784 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1785
1786 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1787
1788 # Dictionary variant of the list.
1789 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1790
1791 # Continue the analysis on each variation.
1792 for oVariation in self.aoVariations:
1793 oVariation.analyzeVariation(aoStmts);
1794
1795 return True;
1796
1797 ## Used by emitThreadedCallStmts.
1798 kdVariationsWithNeedForPrefixCheck = {
1799 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1800 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1801 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1802 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1803 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1804 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1805 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1806 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1807 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1808 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1809 };
1810
1811 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
1812 """
1813 Worker for morphInputCode that returns a list of statements that emits
1814 the call to the threaded functions for the block.
1815
1816 The sBranch parameter is used with conditional branches where we'll emit
1817 different threaded calls depending on whether we're in the jump-taken or
1818 no-jump code path.
1819 """
1820 # Special case for only default variation:
1821 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1822 assert not sBranch;
1823 return self.aoVariations[0].emitThreadedCallStmts(0);
1824
1825 #
1826 # Case statement sub-class.
1827 #
1828 dByVari = self.dVariations;
1829 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1830 class Case:
1831 def __init__(self, sCond, sVarNm = None):
1832 self.sCond = sCond;
1833 self.sVarNm = sVarNm;
1834 self.oVar = dByVari[sVarNm] if sVarNm else None;
1835 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1836
1837 def toCode(self):
1838 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1839 if self.aoBody:
1840 aoStmts.extend(self.aoBody);
1841 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1842 return aoStmts;
1843
1844 def toFunctionAssignment(self):
1845 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1846 if self.aoBody:
1847 aoStmts.extend([
1848 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1849 iai.McCppGeneric('break;', cchIndent = 8),
1850 ]);
1851 return aoStmts;
1852
1853 def isSame(self, oThat):
1854 if not self.aoBody: # fall thru always matches.
1855 return True;
1856 if len(self.aoBody) != len(oThat.aoBody):
1857 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1858 return False;
1859 for iStmt, oStmt in enumerate(self.aoBody):
1860 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1861 assert isinstance(oStmt, iai.McCppGeneric);
1862 assert not isinstance(oStmt, iai.McStmtCond);
1863 if isinstance(oStmt, iai.McStmtCond):
1864 return False;
1865 if oStmt.sName != oThatStmt.sName:
1866 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1867 return False;
1868 if len(oStmt.asParams) != len(oThatStmt.asParams):
1869 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1870 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1871 return False;
1872 for iParam, sParam in enumerate(oStmt.asParams):
1873 if ( sParam != oThatStmt.asParams[iParam]
1874 and ( iParam != 1
1875 or not isinstance(oStmt, iai.McCppCall)
1876 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1877 or sParam != self.oVar.getIndexName()
1878 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1879 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1880 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1881 return False;
1882 return True;
1883
1884 #
1885 # Determine what we're switch on.
1886 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1887 #
1888 fSimple = True;
1889 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1890 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1891 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1892 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1893 # is not writable in 32-bit mode (at least), thus the penalty mode
1894 # for any accesses via it (simpler this way).)
1895 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1896 fSimple = False; # threaded functions.
1897 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1898 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1899 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1900
1901 #
1902 # Generate the case statements.
1903 #
1904 # pylintx: disable=x
1905 aoCases = [];
1906 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1907 assert not fSimple and not sBranch;
1908 aoCases.extend([
1909 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1910 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1911 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1912 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1913 ]);
1914 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1915 aoCases.extend([
1916 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1917 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1918 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1919 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1920 ]);
1921 elif ThrdFnVar.ksVariation_64 in dByVari:
1922 assert fSimple and not sBranch;
1923 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1924 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1925 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1926 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
1927 assert fSimple and sBranch;
1928 aoCases.append(Case('IEMMODE_64BIT',
1929 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
1930 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
1931 aoCases.append(Case('IEMMODE_64BIT | 32',
1932 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
1933
1934 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1935 assert not fSimple and not sBranch;
1936 aoCases.extend([
1937 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1938 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1939 Case('IEMMODE_32BIT | 16', None), # fall thru
1940 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1941 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1942 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1943 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1944 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1945 ]);
1946 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1947 aoCases.extend([
1948 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1949 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1950 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1951 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1952 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1953 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1954 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1955 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1956 ]);
1957 elif ThrdFnVar.ksVariation_32 in dByVari:
1958 assert fSimple and not sBranch;
1959 aoCases.extend([
1960 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1961 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1962 ]);
1963 if ThrdFnVar.ksVariation_32f in dByVari:
1964 aoCases.extend([
1965 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1966 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1967 ]);
1968 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
1969 assert fSimple and sBranch;
1970 aoCases.extend([
1971 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1972 Case('IEMMODE_32BIT',
1973 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
1974 ]);
1975 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
1976 aoCases.extend([
1977 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1978 Case('IEMMODE_32BIT | 32',
1979 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
1980 ]);
1981
1982 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1983 assert not fSimple and not sBranch;
1984 aoCases.extend([
1985 Case('IEMMODE_16BIT | 16', None), # fall thru
1986 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1987 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1988 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1989 ]);
1990 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1991 aoCases.extend([
1992 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1993 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1994 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1995 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1996 ]);
1997 elif ThrdFnVar.ksVariation_16 in dByVari:
1998 assert fSimple and not sBranch;
1999 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2000 if ThrdFnVar.ksVariation_16f in dByVari:
2001 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2002 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2003 assert fSimple and sBranch;
2004 aoCases.append(Case('IEMMODE_16BIT',
2005 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2006 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2007 aoCases.append(Case('IEMMODE_16BIT | 32',
2008 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2009
2010
2011 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2012 if not fSimple:
2013 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2014 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2015 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2016 if not fSimple:
2017 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2018 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2019
2020 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2021 assert fSimple and sBranch;
2022 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2023 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2024 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2025 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2026 assert fSimple and sBranch;
2027 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2028 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2029 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2030
2031 #
2032 # If the case bodies are all the same, except for the function called,
2033 # we can reduce the code size and hopefully compile time.
2034 #
2035 iFirstCaseWithBody = 0;
2036 while not aoCases[iFirstCaseWithBody].aoBody:
2037 iFirstCaseWithBody += 1
2038 fAllSameCases = True
2039 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2040 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2041 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2042 if fAllSameCases:
2043 aoStmts = [
2044 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2045 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2046 iai.McCppGeneric('{'),
2047 ];
2048 for oCase in aoCases:
2049 aoStmts.extend(oCase.toFunctionAssignment());
2050 aoStmts.extend([
2051 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2052 iai.McCppGeneric('}'),
2053 ]);
2054 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2055
2056 else:
2057 #
2058 # Generate the generic switch statement.
2059 #
2060 aoStmts = [
2061 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2062 iai.McCppGeneric('{'),
2063 ];
2064 for oCase in aoCases:
2065 aoStmts.extend(oCase.toCode());
2066 aoStmts.extend([
2067 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2068 iai.McCppGeneric('}'),
2069 ]);
2070
2071 return aoStmts;
2072
2073 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2074 """
2075 Adjusts (& copies) the statements for the input/decoder so it will emit
2076 calls to the right threaded functions for each block.
2077
2078 Returns list/tree of statements (aoStmts is not modified) and updated
2079 fCallEmitted status.
2080 """
2081 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2082 aoDecoderStmts = [];
2083
2084 for iStmt, oStmt in enumerate(aoStmts):
2085 # Copy the statement. Make a deep copy to make sure we've got our own
2086 # copies of all instance variables, even if a bit overkill at the moment.
2087 oNewStmt = copy.deepcopy(oStmt);
2088 aoDecoderStmts.append(oNewStmt);
2089 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2090 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2091 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2092
2093 # If we haven't emitted the threaded function call yet, look for
2094 # statements which it would naturally follow or preceed.
2095 if not fCallEmitted:
2096 if not oStmt.isCppStmt():
2097 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2098 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2099 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2100 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2101 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2102 aoDecoderStmts.pop();
2103 if not fIsConditional:
2104 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2105 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2106 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2107 else:
2108 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2109 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2110 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2111 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2112 aoDecoderStmts.append(oNewStmt);
2113 fCallEmitted = True;
2114
2115 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2116 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2117 if not sBranchAnnotation:
2118 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2119 assert fIsConditional;
2120 aoDecoderStmts.pop();
2121 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2122 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2123 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2124 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2125 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2126 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2127 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2128 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2129 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2130 else:
2131 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2132 aoDecoderStmts.append(oNewStmt);
2133 fCallEmitted = True;
2134
2135 elif ( not fIsConditional
2136 and oStmt.fDecode
2137 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2138 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2139 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2140 fCallEmitted = True;
2141
2142 # Process branches of conditionals recursively.
2143 if isinstance(oStmt, iai.McStmtCond):
2144 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2145 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2146 if oStmt.aoElseBranch:
2147 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2148 fCallEmitted, cDepth + 1,
2149 oStmt.oElseBranchAnnotation);
2150 else:
2151 fCallEmitted2 = False;
2152 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2153
2154 if not fCallEmitted and cDepth == 0:
2155 self.raiseProblem('Unable to insert call to threaded function.');
2156
2157 return (aoDecoderStmts, fCallEmitted);
2158
2159
2160 def generateInputCode(self):
2161 """
2162 Modifies the input code.
2163 """
2164 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2165
2166 if len(self.oMcBlock.aoStmts) == 1:
2167 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2168 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2169 if self.dsCImplFlags:
2170 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2171 else:
2172 sCode += '0;\n';
2173 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2174 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2175 sIndent = ' ' * (min(cchIndent, 2) - 2);
2176 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2177 return sCode;
2178
2179 # IEM_MC_BEGIN/END block
2180 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2181 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2182 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2183 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2184 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2185
2186# Short alias for ThreadedFunctionVariation.
2187ThrdFnVar = ThreadedFunctionVariation;
2188
2189
2190class IEMThreadedGenerator(object):
2191 """
2192 The threaded code generator & annotator.
2193 """
2194
2195 def __init__(self):
2196 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2197 self.oOptions = None # type: argparse.Namespace
2198 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2199 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2200 self.cErrors = 0;
2201
2202 #
2203 # Error reporting.
2204 #
2205
2206 def rawError(self, sCompleteMessage):
2207 """ Output a raw error and increment the error counter. """
2208 print(sCompleteMessage, file = sys.stderr);
2209 self.cErrors += 1;
2210 return False;
2211
2212 #
2213 # Processing.
2214 #
2215
2216 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2217 """
2218 Process the input files.
2219 """
2220
2221 # Parse the files.
2222 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2223
2224 # Create threaded functions for the MC blocks.
2225 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2226
2227 # Analyze the threaded functions.
2228 dRawParamCounts = {};
2229 dMinParamCounts = {};
2230 for oThreadedFunction in self.aoThreadedFuncs:
2231 oThreadedFunction.analyze(self);
2232 for oVariation in oThreadedFunction.aoVariations:
2233 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2234 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2235 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2236 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2237 print('debug: %s params: %4s raw, %4s min'
2238 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2239 file = sys.stderr);
2240
2241 # Populate aidxFirstFunctions. This is ASSUMING that
2242 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2243 iThreadedFunction = 0;
2244 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2245 self.aidxFirstFunctions = [];
2246 for oParser in self.aoParsers:
2247 self.aidxFirstFunctions.append(iThreadedFunction);
2248
2249 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2250 iThreadedFunction += 1;
2251 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2252
2253 # Analyze the threaded functions and their variations for native recompilation.
2254 if fNativeRecompilerEnabled:
2255 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2256
2257 # Gather arguments + variable statistics for the MC blocks.
2258 cMaxArgs = 0;
2259 cMaxVars = 0;
2260 cMaxVarsAndArgs = 0;
2261 cbMaxArgs = 0;
2262 cbMaxVars = 0;
2263 cbMaxVarsAndArgs = 0;
2264 for oThreadedFunction in self.aoThreadedFuncs:
2265 if oThreadedFunction.oMcBlock.cLocals >= 0:
2266 # Counts.
2267 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2268 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2269 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2270 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2271 if cMaxVarsAndArgs > 9:
2272 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2273 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2274 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2275 # Calc stack allocation size:
2276 cbArgs = 0;
2277 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2278 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2279 cbVars = 0;
2280 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2281 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2282 cbMaxVars = max(cbMaxVars, cbVars);
2283 cbMaxArgs = max(cbMaxArgs, cbArgs);
2284 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2285 if cbMaxVarsAndArgs >= 0xc0:
2286 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2287 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2288
2289 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2290 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2291
2292 if self.cErrors > 0:
2293 print('fatal error: %u error%s during processing. Details above.'
2294 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2295 return False;
2296 return True;
2297
2298 #
2299 # Output
2300 #
2301
2302 def generateLicenseHeader(self):
2303 """
2304 Returns the lines for a license header.
2305 """
2306 return [
2307 '/*',
2308 ' * Autogenerated by $Id: IEMAllThrdPython.py 103233 2024-02-07 00:09:53Z vboxsync $ ',
2309 ' * Do not edit!',
2310 ' */',
2311 '',
2312 '/*',
2313 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2314 ' *',
2315 ' * This file is part of VirtualBox base platform packages, as',
2316 ' * available from https://www.alldomusa.eu.org.',
2317 ' *',
2318 ' * This program is free software; you can redistribute it and/or',
2319 ' * modify it under the terms of the GNU General Public License',
2320 ' * as published by the Free Software Foundation, in version 3 of the',
2321 ' * License.',
2322 ' *',
2323 ' * This program is distributed in the hope that it will be useful, but',
2324 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2325 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2326 ' * General Public License for more details.',
2327 ' *',
2328 ' * You should have received a copy of the GNU General Public License',
2329 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2330 ' *',
2331 ' * The contents of this file may alternatively be used under the terms',
2332 ' * of the Common Development and Distribution License Version 1.0',
2333 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2334 ' * in the VirtualBox distribution, in which case the provisions of the',
2335 ' * CDDL are applicable instead of those of the GPL.',
2336 ' *',
2337 ' * You may elect to license modified versions of this file under the',
2338 ' * terms and conditions of either the GPL or the CDDL or both.',
2339 ' *',
2340 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2341 ' */',
2342 '',
2343 '',
2344 '',
2345 ];
2346
2347 ## List of built-in threaded functions with user argument counts and
2348 ## whether it has a native recompiler implementation.
2349 katBltIns = (
2350 ( 'Nop', 0, True ),
2351 ( 'LogCpuState', 0, True ),
2352
2353 ( 'DeferToCImpl0', 2, True ),
2354 ( 'CheckIrq', 0, True ),
2355 ( 'CheckMode', 1, True ),
2356 ( 'CheckHwInstrBps', 0, False ),
2357 ( 'CheckCsLim', 1, True ),
2358
2359 ( 'CheckCsLimAndOpcodes', 3, True ),
2360 ( 'CheckOpcodes', 3, True ),
2361 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2362
2363 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2364 ( 'CheckPcAndOpcodes', 3, True ),
2365 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2366
2367 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2368 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2369 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2370
2371 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2372 ( 'CheckOpcodesLoadingTlb', 3, True ),
2373 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2374
2375 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2376 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2377 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2378
2379 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2380 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2381 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2382 );
2383
2384 def generateThreadedFunctionsHeader(self, oOut):
2385 """
2386 Generates the threaded functions header file.
2387 Returns success indicator.
2388 """
2389
2390 asLines = self.generateLicenseHeader();
2391
2392 # Generate the threaded function table indexes.
2393 asLines += [
2394 'typedef enum IEMTHREADEDFUNCS',
2395 '{',
2396 ' kIemThreadedFunc_Invalid = 0,',
2397 '',
2398 ' /*',
2399 ' * Predefined',
2400 ' */',
2401 ];
2402 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2403
2404 iThreadedFunction = 1 + len(self.katBltIns);
2405 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2406 asLines += [
2407 '',
2408 ' /*',
2409 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2410 ' */',
2411 ];
2412 for oThreadedFunction in self.aoThreadedFuncs:
2413 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2414 if oVariation:
2415 iThreadedFunction += 1;
2416 oVariation.iEnumValue = iThreadedFunction;
2417 asLines.append(' ' + oVariation.getIndexName() + ',');
2418 asLines += [
2419 ' kIemThreadedFunc_End',
2420 '} IEMTHREADEDFUNCS;',
2421 '',
2422 ];
2423
2424 # Prototype the function table.
2425 asLines += [
2426 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2427 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2428 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2429 '#endif',
2430 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2431 ];
2432
2433 oOut.write('\n'.join(asLines));
2434 return True;
2435
2436 ksBitsToIntMask = {
2437 1: "UINT64_C(0x1)",
2438 2: "UINT64_C(0x3)",
2439 4: "UINT64_C(0xf)",
2440 8: "UINT64_C(0xff)",
2441 16: "UINT64_C(0xffff)",
2442 32: "UINT64_C(0xffffffff)",
2443 };
2444
2445 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2446 """
2447 Outputs code for unpacking parameters.
2448 This is shared by the threaded and native code generators.
2449 """
2450 aasVars = [];
2451 for aoRefs in oVariation.dParamRefs.values():
2452 oRef = aoRefs[0];
2453 if oRef.sType[0] != 'P':
2454 cBits = g_kdTypeInfo[oRef.sType][0];
2455 sType = g_kdTypeInfo[oRef.sType][2];
2456 else:
2457 cBits = 64;
2458 sType = oRef.sType;
2459
2460 sTypeDecl = sType + ' const';
2461
2462 if cBits == 64:
2463 assert oRef.offNewParam == 0;
2464 if sType == 'uint64_t':
2465 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2466 else:
2467 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2468 elif oRef.offNewParam == 0:
2469 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2470 else:
2471 sUnpack = '(%s)((%s >> %s) & %s);' \
2472 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2473
2474 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2475
2476 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2477 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2478 acchVars = [0, 0, 0, 0, 0];
2479 for asVar in aasVars:
2480 for iCol, sStr in enumerate(asVar):
2481 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2482 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2483 for asVar in sorted(aasVars):
2484 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2485 return True;
2486
2487 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2488 def generateThreadedFunctionsSource(self, oOut):
2489 """
2490 Generates the threaded functions source file.
2491 Returns success indicator.
2492 """
2493
2494 asLines = self.generateLicenseHeader();
2495 oOut.write('\n'.join(asLines));
2496
2497 #
2498 # Emit the function definitions.
2499 #
2500 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2501 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2502 oOut.write( '\n'
2503 + '\n'
2504 + '\n'
2505 + '\n'
2506 + '/*' + '*' * 128 + '\n'
2507 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2508 + '*' * 128 + '*/\n');
2509
2510 for oThreadedFunction in self.aoThreadedFuncs:
2511 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2512 if oVariation:
2513 oMcBlock = oThreadedFunction.oMcBlock;
2514
2515 # Function header
2516 oOut.write( '\n'
2517 + '\n'
2518 + '/**\n'
2519 + ' * #%u: %s at line %s offset %s in %s%s\n'
2520 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2521 os.path.split(oMcBlock.sSrcFile)[1],
2522 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2523 + ' */\n'
2524 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2525 + '{\n');
2526
2527 # Unpack parameters.
2528 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2529
2530 # RT_NOREF for unused parameters.
2531 if oVariation.cMinParams < g_kcThreadedParams:
2532 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2533
2534 # Now for the actual statements.
2535 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2536
2537 oOut.write('}\n');
2538
2539
2540 #
2541 # Generate the output tables in parallel.
2542 #
2543 asFuncTable = [
2544 '/**',
2545 ' * Function pointer table.',
2546 ' */',
2547 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2548 '{',
2549 ' /*Invalid*/ NULL,',
2550 ];
2551 asNameTable = [
2552 '/**',
2553 ' * Function name table.',
2554 ' */',
2555 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2556 '{',
2557 ' "Invalid",',
2558 ];
2559 asArgCntTab = [
2560 '/**',
2561 ' * Argument count table.',
2562 ' */',
2563 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2564 '{',
2565 ' 0, /*Invalid*/',
2566 ];
2567 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2568
2569 for asTable in aasTables:
2570 asTable.extend((
2571 '',
2572 ' /*',
2573 ' * Predefined.',
2574 ' */',
2575 ));
2576 for sFuncNm, cArgs, _ in self.katBltIns:
2577 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2578 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2579 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2580
2581 iThreadedFunction = 1 + len(self.katBltIns);
2582 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2583 for asTable in aasTables:
2584 asTable.extend((
2585 '',
2586 ' /*',
2587 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2588 ' */',
2589 ));
2590 for oThreadedFunction in self.aoThreadedFuncs:
2591 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2592 if oVariation:
2593 iThreadedFunction += 1;
2594 assert oVariation.iEnumValue == iThreadedFunction;
2595 sName = oVariation.getThreadedFunctionName();
2596 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2597 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2598 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2599
2600 for asTable in aasTables:
2601 asTable.append('};');
2602
2603 #
2604 # Output the tables.
2605 #
2606 oOut.write( '\n'
2607 + '\n');
2608 oOut.write('\n'.join(asFuncTable));
2609 oOut.write( '\n'
2610 + '\n'
2611 + '\n'
2612 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2613 oOut.write('\n'.join(asNameTable));
2614 oOut.write( '\n'
2615 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2616 + '\n'
2617 + '\n');
2618 oOut.write('\n'.join(asArgCntTab));
2619 oOut.write('\n');
2620
2621 return True;
2622
2623 def generateNativeFunctionsHeader(self, oOut):
2624 """
2625 Generates the native recompiler functions header file.
2626 Returns success indicator.
2627 """
2628 if not self.oOptions.fNativeRecompilerEnabled:
2629 return True;
2630
2631 asLines = self.generateLicenseHeader();
2632
2633 # Prototype the function table.
2634 asLines += [
2635 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2636 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2637 '',
2638 ];
2639
2640 # Emit indicators as to which of the builtin functions have a native
2641 # recompiler function and which not. (We only really need this for
2642 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2643 for atBltIn in self.katBltIns:
2644 if atBltIn[1]:
2645 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2646 else:
2647 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2648
2649 # Emit prototypes for the builtin functions we use in tables.
2650 asLines += [
2651 '',
2652 '/* Prototypes for built-in functions used in the above tables. */',
2653 ];
2654 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2655 if fHaveRecompFunc:
2656 asLines += [
2657 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
2658 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
2659 ];
2660
2661 oOut.write('\n'.join(asLines));
2662 return True;
2663
2664 def generateNativeFunctionsSource(self, oOut):
2665 """
2666 Generates the native recompiler functions source file.
2667 Returns success indicator.
2668 """
2669 if not self.oOptions.fNativeRecompilerEnabled:
2670 return True;
2671
2672 #
2673 # The file header.
2674 #
2675 oOut.write('\n'.join(self.generateLicenseHeader()));
2676
2677 #
2678 # Emit the functions.
2679 #
2680 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2681 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2682 oOut.write( '\n'
2683 + '\n'
2684 + '\n'
2685 + '\n'
2686 + '/*' + '*' * 128 + '\n'
2687 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2688 + '*' * 128 + '*/\n');
2689
2690 for oThreadedFunction in self.aoThreadedFuncs:
2691 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2692 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2693 oMcBlock = oThreadedFunction.oMcBlock;
2694
2695 # Function header
2696 oOut.write( '\n'
2697 + '\n'
2698 + '/**\n'
2699 + ' * #%u: %s at line %s offset %s in %s%s\n'
2700 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2701 os.path.split(oMcBlock.sSrcFile)[1],
2702 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2703 + ' */\n'
2704 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2705 + '{\n');
2706
2707 # Unpack parameters.
2708 self.generateFunctionParameterUnpacking(oVariation, oOut,
2709 ('pCallEntry->auParams[0]',
2710 'pCallEntry->auParams[1]',
2711 'pCallEntry->auParams[2]',));
2712
2713 # Now for the actual statements.
2714 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2715
2716 oOut.write('}\n');
2717
2718 #
2719 # Output the function table.
2720 #
2721 oOut.write( '\n'
2722 + '\n'
2723 + '/*\n'
2724 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2725 + ' */\n'
2726 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2727 + '{\n'
2728 + ' /*Invalid*/ NULL,'
2729 + '\n'
2730 + ' /*\n'
2731 + ' * Predefined.\n'
2732 + ' */\n'
2733 );
2734 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2735 if fHaveRecompFunc:
2736 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2737 else:
2738 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2739
2740 iThreadedFunction = 1 + len(self.katBltIns);
2741 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2742 oOut.write( ' /*\n'
2743 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2744 + ' */\n');
2745 for oThreadedFunction in self.aoThreadedFuncs:
2746 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2747 if oVariation:
2748 iThreadedFunction += 1;
2749 assert oVariation.iEnumValue == iThreadedFunction;
2750 sName = oVariation.getNativeFunctionName();
2751 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2752 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2753 else:
2754 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2755
2756 oOut.write( '};\n'
2757 + '\n');
2758 return True;
2759
2760 def generateNativeLivenessSource(self, oOut):
2761 """
2762 Generates the native recompiler liveness analysis functions source file.
2763 Returns success indicator.
2764 """
2765 if not self.oOptions.fNativeRecompilerEnabled:
2766 return True;
2767
2768 #
2769 # The file header.
2770 #
2771 oOut.write('\n'.join(self.generateLicenseHeader()));
2772
2773 #
2774 # Emit the functions.
2775 #
2776 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2777 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2778 oOut.write( '\n'
2779 + '\n'
2780 + '\n'
2781 + '\n'
2782 + '/*' + '*' * 128 + '\n'
2783 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2784 + '*' * 128 + '*/\n');
2785
2786 for oThreadedFunction in self.aoThreadedFuncs:
2787 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2788 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2789 oMcBlock = oThreadedFunction.oMcBlock;
2790
2791 # Function header
2792 oOut.write( '\n'
2793 + '\n'
2794 + '/**\n'
2795 + ' * #%u: %s at line %s offset %s in %s%s\n'
2796 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2797 os.path.split(oMcBlock.sSrcFile)[1],
2798 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2799 + ' */\n'
2800 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
2801 + '{\n');
2802
2803 # Unpack parameters.
2804 self.generateFunctionParameterUnpacking(oVariation, oOut,
2805 ('pCallEntry->auParams[0]',
2806 'pCallEntry->auParams[1]',
2807 'pCallEntry->auParams[2]',));
2808 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
2809 for aoRefs in oVariation.dParamRefs.values():
2810 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
2811 oOut.write(' %s\n' % (' '.join(asNoRefs),));
2812
2813 # Now for the actual statements.
2814 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2815
2816 oOut.write('}\n');
2817
2818 #
2819 # Output the function table.
2820 #
2821 oOut.write( '\n'
2822 + '\n'
2823 + '/*\n'
2824 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2825 + ' */\n'
2826 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
2827 + '{\n'
2828 + ' /*Invalid*/ NULL,'
2829 + '\n'
2830 + ' /*\n'
2831 + ' * Predefined.\n'
2832 + ' */\n'
2833 );
2834 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2835 if fHaveRecompFunc:
2836 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
2837 else:
2838 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2839
2840 iThreadedFunction = 1 + len(self.katBltIns);
2841 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2842 oOut.write( ' /*\n'
2843 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2844 + ' */\n');
2845 for oThreadedFunction in self.aoThreadedFuncs:
2846 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2847 if oVariation:
2848 iThreadedFunction += 1;
2849 assert oVariation.iEnumValue == iThreadedFunction;
2850 sName = oVariation.getLivenessFunctionName();
2851 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2852 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2853 else:
2854 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2855
2856 oOut.write( '};\n'
2857 + '\n');
2858 return True;
2859
2860
2861 def getThreadedFunctionByIndex(self, idx):
2862 """
2863 Returns a ThreadedFunction object for the given index. If the index is
2864 out of bounds, a dummy is returned.
2865 """
2866 if idx < len(self.aoThreadedFuncs):
2867 return self.aoThreadedFuncs[idx];
2868 return ThreadedFunction.dummyInstance();
2869
2870 def generateModifiedInput(self, oOut, idxFile):
2871 """
2872 Generates the combined modified input source/header file.
2873 Returns success indicator.
2874 """
2875 #
2876 # File header and assert assumptions.
2877 #
2878 oOut.write('\n'.join(self.generateLicenseHeader()));
2879 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2880
2881 #
2882 # Iterate all parsers (input files) and output the ones related to the
2883 # file set given by idxFile.
2884 #
2885 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2886 # Is this included in the file set?
2887 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2888 fInclude = -1;
2889 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2890 if sSrcBaseFile == aoInfo[0].lower():
2891 fInclude = aoInfo[2] in (-1, idxFile);
2892 break;
2893 if fInclude is not True:
2894 assert fInclude is False;
2895 continue;
2896
2897 # Output it.
2898 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2899
2900 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2901 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2902 iLine = 0;
2903 while iLine < len(oParser.asLines):
2904 sLine = oParser.asLines[iLine];
2905 iLine += 1; # iBeginLine and iEndLine are 1-based.
2906
2907 # Can we pass it thru?
2908 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2909 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2910 oOut.write(sLine);
2911 #
2912 # Single MC block. Just extract it and insert the replacement.
2913 #
2914 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2915 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2916 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2917 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2918 sModified = oThreadedFunction.generateInputCode().strip();
2919 oOut.write(sModified);
2920
2921 iLine = oThreadedFunction.oMcBlock.iEndLine;
2922 sLine = oParser.asLines[iLine - 1];
2923 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2924 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2925 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2926 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2927
2928 # Advance
2929 iThreadedFunction += 1;
2930 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2931 #
2932 # Macro expansion line that have sublines and may contain multiple MC blocks.
2933 #
2934 else:
2935 offLine = 0;
2936 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2937 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2938
2939 sModified = oThreadedFunction.generateInputCode().strip();
2940 assert ( sModified.startswith('IEM_MC_BEGIN')
2941 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2942 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2943 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2944 ), 'sModified="%s"' % (sModified,);
2945 oOut.write(sModified);
2946
2947 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2948
2949 # Advance
2950 iThreadedFunction += 1;
2951 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2952
2953 # Last line segment.
2954 if offLine < len(sLine):
2955 oOut.write(sLine[offLine : ]);
2956
2957 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2958
2959 return True;
2960
2961 def generateModifiedInput1(self, oOut):
2962 """
2963 Generates the combined modified input source/header file, part 1.
2964 Returns success indicator.
2965 """
2966 return self.generateModifiedInput(oOut, 1);
2967
2968 def generateModifiedInput2(self, oOut):
2969 """
2970 Generates the combined modified input source/header file, part 2.
2971 Returns success indicator.
2972 """
2973 return self.generateModifiedInput(oOut, 2);
2974
2975 def generateModifiedInput3(self, oOut):
2976 """
2977 Generates the combined modified input source/header file, part 3.
2978 Returns success indicator.
2979 """
2980 return self.generateModifiedInput(oOut, 3);
2981
2982 def generateModifiedInput4(self, oOut):
2983 """
2984 Generates the combined modified input source/header file, part 4.
2985 Returns success indicator.
2986 """
2987 return self.generateModifiedInput(oOut, 4);
2988
2989
2990 #
2991 # Main
2992 #
2993
2994 def main(self, asArgs):
2995 """
2996 C-like main function.
2997 Returns exit code.
2998 """
2999
3000 #
3001 # Parse arguments
3002 #
3003 sScriptDir = os.path.dirname(__file__);
3004 oParser = argparse.ArgumentParser(add_help = False);
3005 oParser.add_argument('asInFiles',
3006 metavar = 'input.cpp.h',
3007 nargs = '*',
3008 default = [os.path.join(sScriptDir, aoInfo[0])
3009 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3010 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3011 oParser.add_argument('--host-arch',
3012 metavar = 'arch',
3013 dest = 'sHostArch',
3014 action = 'store',
3015 default = None,
3016 help = 'The host architecture.');
3017
3018 oParser.add_argument('--out-thrd-funcs-hdr',
3019 metavar = 'file-thrd-funcs.h',
3020 dest = 'sOutFileThrdFuncsHdr',
3021 action = 'store',
3022 default = '-',
3023 help = 'The output header file for the threaded functions.');
3024 oParser.add_argument('--out-thrd-funcs-cpp',
3025 metavar = 'file-thrd-funcs.cpp',
3026 dest = 'sOutFileThrdFuncsCpp',
3027 action = 'store',
3028 default = '-',
3029 help = 'The output C++ file for the threaded functions.');
3030 oParser.add_argument('--out-n8ve-funcs-hdr',
3031 metavar = 'file-n8tv-funcs.h',
3032 dest = 'sOutFileN8veFuncsHdr',
3033 action = 'store',
3034 default = '-',
3035 help = 'The output header file for the native recompiler functions.');
3036 oParser.add_argument('--out-n8ve-funcs-cpp',
3037 metavar = 'file-n8tv-funcs.cpp',
3038 dest = 'sOutFileN8veFuncsCpp',
3039 action = 'store',
3040 default = '-',
3041 help = 'The output C++ file for the native recompiler functions.');
3042 oParser.add_argument('--out-n8ve-liveness-cpp',
3043 metavar = 'file-n8tv-liveness.cpp',
3044 dest = 'sOutFileN8veLivenessCpp',
3045 action = 'store',
3046 default = '-',
3047 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3048 oParser.add_argument('--native',
3049 dest = 'fNativeRecompilerEnabled',
3050 action = 'store_true',
3051 default = False,
3052 help = 'Enables generating the files related to native recompilation.');
3053 oParser.add_argument('--out-mod-input1',
3054 metavar = 'file-instr.cpp.h',
3055 dest = 'sOutFileModInput1',
3056 action = 'store',
3057 default = '-',
3058 help = 'The output C++/header file for modified input instruction files part 1.');
3059 oParser.add_argument('--out-mod-input2',
3060 metavar = 'file-instr.cpp.h',
3061 dest = 'sOutFileModInput2',
3062 action = 'store',
3063 default = '-',
3064 help = 'The output C++/header file for modified input instruction files part 2.');
3065 oParser.add_argument('--out-mod-input3',
3066 metavar = 'file-instr.cpp.h',
3067 dest = 'sOutFileModInput3',
3068 action = 'store',
3069 default = '-',
3070 help = 'The output C++/header file for modified input instruction files part 3.');
3071 oParser.add_argument('--out-mod-input4',
3072 metavar = 'file-instr.cpp.h',
3073 dest = 'sOutFileModInput4',
3074 action = 'store',
3075 default = '-',
3076 help = 'The output C++/header file for modified input instruction files part 4.');
3077 oParser.add_argument('--help', '-h', '-?',
3078 action = 'help',
3079 help = 'Display help and exit.');
3080 oParser.add_argument('--version', '-V',
3081 action = 'version',
3082 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3083 % (__version__.split()[1], iai.__version__.split()[1],),
3084 help = 'Displays the version/revision of the script and exit.');
3085 self.oOptions = oParser.parse_args(asArgs[1:]);
3086 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3087
3088 #
3089 # Process the instructions specified in the IEM sources.
3090 #
3091 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3092 #
3093 # Generate the output files.
3094 #
3095 aaoOutputFiles = (
3096 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3097 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3098 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3099 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3100 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3101 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3102 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3103 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3104 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3105 );
3106 fRc = True;
3107 for sOutFile, fnGenMethod in aaoOutputFiles:
3108 if sOutFile == '-':
3109 fRc = fnGenMethod(sys.stdout) and fRc;
3110 else:
3111 try:
3112 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3113 except Exception as oXcpt:
3114 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3115 return 1;
3116 fRc = fnGenMethod(oOut) and fRc;
3117 oOut.close();
3118 if fRc:
3119 return 0;
3120
3121 return 1;
3122
3123
3124if __name__ == '__main__':
3125 sys.exit(IEMThreadedGenerator().main(sys.argv));
3126
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette