VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103376

最後變更 在這個檔案從103376是 103351,由 vboxsync 提交於 14 月 前

VMM/IEM: Only need an uint8_t type for the extended 8-bit grp references (max value is 19). Parfait bugref:3409 bugref:10370

  • 屬性 svn:eol-style 設為 LF
  • 屬性 svn:executable 設為 *
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 160.4 KB
 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103351 2024-02-14 12:51:58Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.alldomusa.eu.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103351 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 if self.oParent.oMcBlock.iInFunction == 0:
625 return 'kIemThreadedFunc_%s%s' % ( sName, self.sVariation, );
626 return 'kIemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
627
628 def getThreadedFunctionName(self):
629 sName = self.oParent.oMcBlock.sFunction;
630 if sName.startswith('iemOp_'):
631 sName = sName[len('iemOp_'):];
632 if self.oParent.oMcBlock.iInFunction == 0:
633 return 'iemThreadedFunc_%s%s' % ( sName, self.sVariation, );
634 return 'iemThreadedFunc_%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getLivenessFunctionName(self):
640 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
641
642 def getShortName(self):
643 sName = self.oParent.oMcBlock.sFunction;
644 if sName.startswith('iemOp_'):
645 sName = sName[len('iemOp_'):];
646 if self.oParent.oMcBlock.iInFunction == 0:
647 return '%s%s' % ( sName, self.sVariation, );
648 return '%s_%s%s' % ( sName, self.oParent.oMcBlock.iInFunction, self.sVariation, );
649
650 def isWithFlagsCheckingAndClearingVariation(self):
651 """
652 Checks if this is a variation that checks and clears EFLAGS.
653 """
654 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
655
656 #
657 # Analysis and code morphing.
658 #
659
660 def raiseProblem(self, sMessage):
661 """ Raises a problem. """
662 self.oParent.raiseProblem(sMessage);
663
664 def warning(self, sMessage):
665 """ Emits a warning. """
666 self.oParent.warning(sMessage);
667
668 def analyzeReferenceToType(self, sRef):
669 """
670 Translates a variable or structure reference to a type.
671 Returns type name.
672 Raises exception if unable to figure it out.
673 """
674 ch0 = sRef[0];
675 if ch0 == 'u':
676 if sRef.startswith('u32'):
677 return 'uint32_t';
678 if sRef.startswith('u8') or sRef == 'uReg':
679 return 'uint8_t';
680 if sRef.startswith('u64'):
681 return 'uint64_t';
682 if sRef.startswith('u16'):
683 return 'uint16_t';
684 elif ch0 == 'b':
685 return 'uint8_t';
686 elif ch0 == 'f':
687 return 'bool';
688 elif ch0 == 'i':
689 if sRef.startswith('i8'):
690 return 'int8_t';
691 if sRef.startswith('i16'):
692 return 'int16_t';
693 if sRef.startswith('i32'):
694 return 'int32_t';
695 if sRef.startswith('i64'):
696 return 'int64_t';
697 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
698 return 'uint8_t';
699 elif ch0 == 'p':
700 if sRef.find('-') < 0:
701 return 'uintptr_t';
702 if sRef.startswith('pVCpu->iem.s.'):
703 sField = sRef[len('pVCpu->iem.s.') : ];
704 if sField in g_kdIemFieldToType:
705 if g_kdIemFieldToType[sField][0]:
706 return g_kdIemFieldToType[sField][0];
707 elif ch0 == 'G' and sRef.startswith('GCPtr'):
708 return 'uint64_t';
709 elif ch0 == 'e':
710 if sRef == 'enmEffOpSize':
711 return 'IEMMODE';
712 elif ch0 == 'o':
713 if sRef.startswith('off32'):
714 return 'uint32_t';
715 elif sRef == 'cbFrame': # enter
716 return 'uint16_t';
717 elif sRef == 'cShift': ## @todo risky
718 return 'uint8_t';
719
720 self.raiseProblem('Unknown reference: %s' % (sRef,));
721 return None; # Shut up pylint 2.16.2.
722
723 def analyzeCallToType(self, sFnRef):
724 """
725 Determins the type of an indirect function call.
726 """
727 assert sFnRef[0] == 'p';
728
729 #
730 # Simple?
731 #
732 if sFnRef.find('-') < 0:
733 oDecoderFunction = self.oParent.oMcBlock.oFunction;
734
735 # Try the argument list of the function defintion macro invocation first.
736 iArg = 2;
737 while iArg < len(oDecoderFunction.asDefArgs):
738 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
739 return oDecoderFunction.asDefArgs[iArg - 1];
740 iArg += 1;
741
742 # Then check out line that includes the word and looks like a variable declaration.
743 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
744 for sLine in oDecoderFunction.asLines:
745 oMatch = oRe.match(sLine);
746 if oMatch:
747 if not oMatch.group(1).startswith('const'):
748 return oMatch.group(1);
749 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
750
751 #
752 # Deal with the pImpl->pfnXxx:
753 #
754 elif sFnRef.startswith('pImpl->pfn'):
755 sMember = sFnRef[len('pImpl->') : ];
756 sBaseType = self.analyzeCallToType('pImpl');
757 offBits = sMember.rfind('U') + 1;
758 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
759 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
760 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
761 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
762 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
763 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
764 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
765 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
766 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
767 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
768
769 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
770
771 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
772 return None; # Shut up pylint 2.16.2.
773
774 def analyze8BitGRegStmt(self, oStmt):
775 """
776 Gets the 8-bit general purpose register access details of the given statement.
777 ASSUMES the statement is one accessing an 8-bit GREG.
778 """
779 idxReg = 0;
780 if ( oStmt.sName.find('_FETCH_') > 0
781 or oStmt.sName.find('_REF_') > 0
782 or oStmt.sName.find('_TO_LOCAL') > 0):
783 idxReg = 1;
784
785 sRegRef = oStmt.asParams[idxReg];
786 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
787 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
788 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
789 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
790 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
791 else:
792 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
793
794 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
795 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
796 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
797 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
798 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
799 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
800 else:
801 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
802 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
803 sStdRef = 'bOther8Ex';
804
805 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
806 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
807 return (idxReg, sOrgExpr, sStdRef);
808
809
810 ## Maps memory related MCs to info for FLAT conversion.
811 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
812 ## segmentation checking for every memory access. Only applied to access
813 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
814 ## the latter (CS) is just to keep things simple (we could safely fetch via
815 ## it, but only in 64-bit mode could we safely write via it, IIRC).
816 kdMemMcToFlatInfo = {
817 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
818 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
819 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
820 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
821 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
822 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
823 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
824 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
825 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
826 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
827 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
828 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
829 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
830 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
831 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
832 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
833 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
834 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
835 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
836 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
837 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
838 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
839 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
840 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
841 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
842 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
843 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
844 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
845 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
846 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
847 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
848 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
849 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
850 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
851 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
852 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
853 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
854 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
855 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
856 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
857 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
858 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
859 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
860 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
861 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
862 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
863 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
864 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
865 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
866 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
867 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
868 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
869 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
870 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
871 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
872 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
873 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
874 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
875 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
876 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
877 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
878 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
879 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
880 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
881 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
882 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
883 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
884 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
885 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
886 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
887 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
888 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
889 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
890 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
891 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
892 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
893 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
894 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
895 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
896 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
897 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
898 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
899 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
900 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
901 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
902 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
903 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
904 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
905 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
906 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
907 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
908 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
909 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
910 };
911
912 kdMemMcToFlatInfoStack = {
913 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
914 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
915 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
916 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
917 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
918 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
919 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
920 };
921
922 kdThreadedCalcRmEffAddrMcByVariation = {
923 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
924 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
925 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
926 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
927 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
928 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
929 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
930 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
931 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
932 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
933 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
934 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
935 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
936 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
937 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
938 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
939 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
940 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
941 };
942
943 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0):
944 """
945 Transforms (copy) the statements into those for the threaded function.
946
947 Returns list/tree of statements (aoStmts is not modified) and the new
948 iParamRef value.
949 """
950 #
951 # We'll be traversing aoParamRefs in parallel to the statements, so we
952 # must match the traversal in analyzeFindThreadedParamRefs exactly.
953 #
954 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
955 aoThreadedStmts = [];
956 for oStmt in aoStmts:
957 # Skip C++ statements that is purely related to decoding.
958 if not oStmt.isCppStmt() or not oStmt.fDecode:
959 # Copy the statement. Make a deep copy to make sure we've got our own
960 # copies of all instance variables, even if a bit overkill at the moment.
961 oNewStmt = copy.deepcopy(oStmt);
962 aoThreadedStmts.append(oNewStmt);
963 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
964
965 # If the statement has parameter references, process the relevant parameters.
966 # We grab the references relevant to this statement and apply them in reserve order.
967 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
968 iParamRefFirst = iParamRef;
969 while True:
970 iParamRef += 1;
971 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
972 break;
973
974 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
975 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
976 oCurRef = self.aoParamRefs[iCurRef];
977 if oCurRef.iParam is not None:
978 assert oCurRef.oStmt == oStmt;
979 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
980 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
981 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
982 or oCurRef.fCustomRef), \
983 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
984 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
985 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
986 + oCurRef.sNewName \
987 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
988
989 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
990 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
991 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
992 assert len(oNewStmt.asParams) == 3;
993
994 if self.sVariation in self.kdVariationsWithFlatAddr16:
995 oNewStmt.asParams = [
996 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
997 ];
998 else:
999 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1000 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1001 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1002
1003 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1004 oNewStmt.asParams = [
1005 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1006 ];
1007 else:
1008 oNewStmt.asParams = [
1009 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1010 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1011 ];
1012 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1013 elif ( oNewStmt.sName
1014 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1015 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1016 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1017 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1018 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1019 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1020 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1021 and self.sVariation not in self.kdVariationsOnlyPre386):
1022 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1023 oNewStmt.sName += '_THREADED';
1024 if self.sVariation in self.kdVariationsOnly64NoFlags:
1025 oNewStmt.sName += '_PC64';
1026 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1027 oNewStmt.sName += '_PC64_WITH_FLAGS';
1028 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1029 oNewStmt.sName += '_PC16';
1030 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1031 oNewStmt.sName += '_PC16_WITH_FLAGS';
1032 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1033 assert self.sVariation != self.ksVariation_Default;
1034 oNewStmt.sName += '_PC32';
1035 else:
1036 oNewStmt.sName += '_PC32_WITH_FLAGS';
1037
1038 # This is making the wrong branch of conditionals break out of the TB.
1039 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1040 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1041 sExitTbStatus = 'VINF_SUCCESS';
1042 if self.sVariation in self.kdVariationsWithConditional:
1043 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1044 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1045 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1046 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1047 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1048 oNewStmt.asParams.append(sExitTbStatus);
1049
1050 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1051 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1052 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1053 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1054
1055 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1056 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1057 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1058 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1059 oNewStmt.sName += '_THREADED';
1060
1061 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1062 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1063 oNewStmt.sName += '_THREADED';
1064 oNewStmt.idxFn += 1;
1065 oNewStmt.idxParams += 1;
1066 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1067
1068 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1069 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1070 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1071 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1072 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1073 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1074 if idxEffSeg != -1:
1075 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1076 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1077 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1078 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1079 oNewStmt.asParams.pop(idxEffSeg);
1080 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1081
1082 # ... PUSH and POP also needs flat variants, but these differ a little.
1083 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1084 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1085 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1086 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1087 self.kdVariationsWithFlat64StackAddress)];
1088
1089 # Add EFLAGS usage annotations to relevant MCs.
1090 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'):
1091 oInstruction = self.oParent.oMcBlock.oInstruction;
1092 oNewStmt.sName += '_EX';
1093 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1094 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1095
1096 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1097 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1098 dState['IEM_MC_ASSERT_EFLAGS'] = True;
1099
1100 # Process branches of conditionals recursively.
1101 if isinstance(oStmt, iai.McStmtCond):
1102 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState, iParamRef);
1103 if oStmt.aoElseBranch:
1104 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1105 dState, iParamRef);
1106
1107 return (aoThreadedStmts, iParamRef);
1108
1109
1110 def analyzeConsolidateThreadedParamRefs(self):
1111 """
1112 Consolidate threaded function parameter references into a dictionary
1113 with lists of the references to each variable/field.
1114 """
1115 # Gather unique parameters.
1116 self.dParamRefs = {};
1117 for oRef in self.aoParamRefs:
1118 if oRef.sStdRef not in self.dParamRefs:
1119 self.dParamRefs[oRef.sStdRef] = [oRef,];
1120 else:
1121 self.dParamRefs[oRef.sStdRef].append(oRef);
1122
1123 # Generate names for them for use in the threaded function.
1124 dParamNames = {};
1125 for sName, aoRefs in self.dParamRefs.items():
1126 # Morph the reference expression into a name.
1127 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1128 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1129 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1130 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1131 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1132 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1133 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1134 else:
1135 sName += 'P';
1136
1137 # Ensure it's unique.
1138 if sName in dParamNames:
1139 for i in range(10):
1140 if sName + str(i) not in dParamNames:
1141 sName += str(i);
1142 break;
1143 dParamNames[sName] = True;
1144
1145 # Update all the references.
1146 for oRef in aoRefs:
1147 oRef.sNewName = sName;
1148
1149 # Organize them by size too for the purpose of optimize them.
1150 dBySize = {} # type: Dict[str, str]
1151 for sStdRef, aoRefs in self.dParamRefs.items():
1152 if aoRefs[0].sType[0] != 'P':
1153 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1154 assert(cBits <= 64);
1155 else:
1156 cBits = 64;
1157
1158 if cBits not in dBySize:
1159 dBySize[cBits] = [sStdRef,]
1160 else:
1161 dBySize[cBits].append(sStdRef);
1162
1163 # Pack the parameters as best as we can, starting with the largest ones
1164 # and ASSUMING a 64-bit parameter size.
1165 self.cMinParams = 0;
1166 offNewParam = 0;
1167 for cBits in sorted(dBySize.keys(), reverse = True):
1168 for sStdRef in dBySize[cBits]:
1169 if offNewParam == 0 or offNewParam + cBits > 64:
1170 self.cMinParams += 1;
1171 offNewParam = cBits;
1172 else:
1173 offNewParam += cBits;
1174 assert(offNewParam <= 64);
1175
1176 for oRef in self.dParamRefs[sStdRef]:
1177 oRef.iNewParam = self.cMinParams - 1;
1178 oRef.offNewParam = offNewParam - cBits;
1179
1180 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1181 if self.cMinParams >= 4:
1182 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1183 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1184
1185 return True;
1186
1187 ksHexDigits = '0123456789abcdefABCDEF';
1188
1189 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1190 """
1191 Scans the statements for things that have to passed on to the threaded
1192 function (populates self.aoParamRefs).
1193 """
1194 for oStmt in aoStmts:
1195 # Some statements we can skip alltogether.
1196 if isinstance(oStmt, iai.McCppPreProc):
1197 continue;
1198 if oStmt.isCppStmt() and oStmt.fDecode:
1199 continue;
1200 if oStmt.sName in ('IEM_MC_BEGIN',):
1201 continue;
1202
1203 if isinstance(oStmt, iai.McStmtVar):
1204 if oStmt.sValue is None:
1205 continue;
1206 aiSkipParams = { 0: True, 1: True, 3: True };
1207 else:
1208 aiSkipParams = {};
1209
1210 # Several statements have implicit parameters and some have different parameters.
1211 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1212 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1213 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1214 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1215 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1216 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1217
1218 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1219 and self.sVariation not in self.kdVariationsOnlyPre386):
1220 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1221
1222 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1223 # This is being pretty presumptive about bRm always being the RM byte...
1224 assert len(oStmt.asParams) == 3;
1225 assert oStmt.asParams[1] == 'bRm';
1226
1227 if self.sVariation in self.kdVariationsWithFlatAddr16:
1228 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1229 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1230 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1231 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1232 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1233 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1234 'uint8_t', oStmt, sStdRef = 'bSib'));
1235 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1236 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1237 else:
1238 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1239 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1240 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1241 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1242 'uint8_t', oStmt, sStdRef = 'bSib'));
1243 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1244 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1245 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1246 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1247 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1248
1249 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1250 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1251 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1252 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1253 aiSkipParams[idxReg] = True; # Skip the parameter below.
1254
1255 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1256 if ( self.sVariation in self.kdVariationsWithFlatAddress
1257 and oStmt.sName in self.kdMemMcToFlatInfo
1258 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1259 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1260
1261 # Inspect the target of calls to see if we need to pass down a
1262 # function pointer or function table pointer for it to work.
1263 if isinstance(oStmt, iai.McStmtCall):
1264 if oStmt.sFn[0] == 'p':
1265 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1266 elif ( oStmt.sFn[0] != 'i'
1267 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1268 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1269 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1270 aiSkipParams[oStmt.idxFn] = True;
1271
1272 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1273 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1274 assert oStmt.idxFn == 2;
1275 aiSkipParams[0] = True;
1276
1277
1278 # Check all the parameters for bogus references.
1279 for iParam, sParam in enumerate(oStmt.asParams):
1280 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1281 # The parameter may contain a C expression, so we have to try
1282 # extract the relevant bits, i.e. variables and fields while
1283 # ignoring operators and parentheses.
1284 offParam = 0;
1285 while offParam < len(sParam):
1286 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1287 ch = sParam[offParam];
1288 if ch.isalpha() or ch == '_':
1289 offStart = offParam;
1290 offParam += 1;
1291 while offParam < len(sParam):
1292 ch = sParam[offParam];
1293 if not ch.isalnum() and ch != '_' and ch != '.':
1294 if ch != '-' or sParam[offParam + 1] != '>':
1295 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1296 if ( ch == '('
1297 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1298 offParam += len('(pVM)->') - 1;
1299 else:
1300 break;
1301 offParam += 1;
1302 offParam += 1;
1303 sRef = sParam[offStart : offParam];
1304
1305 # For register references, we pass the full register indexes instead as macros
1306 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1307 # threaded function will be more efficient if we just pass the register index
1308 # as a 4-bit param.
1309 if ( sRef.startswith('IEM_GET_MODRM')
1310 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1311 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1312 if sParam[offParam] != '(':
1313 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1314 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1315 if asMacroParams is None:
1316 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1317 offParam = offCloseParam + 1;
1318 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1319 oStmt, iParam, offStart));
1320
1321 # We can skip known variables.
1322 elif sRef in self.oParent.dVariables:
1323 pass;
1324
1325 # Skip certain macro invocations.
1326 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1327 'IEM_GET_GUEST_CPU_FEATURES',
1328 'IEM_IS_GUEST_CPU_AMD',
1329 'IEM_IS_16BIT_CODE',
1330 'IEM_IS_32BIT_CODE',
1331 'IEM_IS_64BIT_CODE',
1332 ):
1333 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1334 if sParam[offParam] != '(':
1335 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1336 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1337 if asMacroParams is None:
1338 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1339 offParam = offCloseParam + 1;
1340
1341 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1342 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1343 'IEM_IS_16BIT_CODE',
1344 'IEM_IS_32BIT_CODE',
1345 'IEM_IS_64BIT_CODE',
1346 ):
1347 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1348 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1349 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1350 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1351 offParam += 1;
1352
1353 # Skip constants, globals, types (casts), sizeof and macros.
1354 elif ( sRef.startswith('IEM_OP_PRF_')
1355 or sRef.startswith('IEM_ACCESS_')
1356 or sRef.startswith('IEMINT_')
1357 or sRef.startswith('X86_GREG_')
1358 or sRef.startswith('X86_SREG_')
1359 or sRef.startswith('X86_EFL_')
1360 or sRef.startswith('X86_FSW_')
1361 or sRef.startswith('X86_FCW_')
1362 or sRef.startswith('X86_XCPT_')
1363 or sRef.startswith('IEMMODE_')
1364 or sRef.startswith('IEM_F_')
1365 or sRef.startswith('IEM_CIMPL_F_')
1366 or sRef.startswith('g_')
1367 or sRef.startswith('iemAImpl_')
1368 or sRef.startswith('kIemNativeGstReg_')
1369 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1370 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1371 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1372 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1373 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1374 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1375 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1376 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1377 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1378 'NIL_RTGCPTR',) ):
1379 pass;
1380
1381 # Skip certain macro invocations.
1382 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1383 elif ( ( '.' not in sRef
1384 and '-' not in sRef
1385 and sRef not in ('pVCpu', ) )
1386 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1387 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1388 oStmt, iParam, offStart));
1389 # Number.
1390 elif ch.isdigit():
1391 if ( ch == '0'
1392 and offParam + 2 <= len(sParam)
1393 and sParam[offParam + 1] in 'xX'
1394 and sParam[offParam + 2] in self.ksHexDigits ):
1395 offParam += 2;
1396 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1397 offParam += 1;
1398 else:
1399 while offParam < len(sParam) and sParam[offParam].isdigit():
1400 offParam += 1;
1401 # Comment?
1402 elif ( ch == '/'
1403 and offParam + 4 <= len(sParam)
1404 and sParam[offParam + 1] == '*'):
1405 offParam += 2;
1406 offNext = sParam.find('*/', offParam);
1407 if offNext < offParam:
1408 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1409 offParam = offNext + 2;
1410 # Whatever else.
1411 else:
1412 offParam += 1;
1413
1414 # Traverse the branches of conditionals.
1415 if isinstance(oStmt, iai.McStmtCond):
1416 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1417 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1418 return True;
1419
1420 def analyzeVariation(self, aoStmts):
1421 """
1422 2nd part of the analysis, done on each variation.
1423
1424 The variations may differ in parameter requirements and will end up with
1425 slightly different MC sequences. Thus this is done on each individually.
1426
1427 Returns dummy True - raises exception on trouble.
1428 """
1429 # Now scan the code for variables and field references that needs to
1430 # be passed to the threaded function because they are related to the
1431 # instruction decoding.
1432 self.analyzeFindThreadedParamRefs(aoStmts);
1433 self.analyzeConsolidateThreadedParamRefs();
1434
1435 # Morph the statement stream for the block into what we'll be using in the threaded function.
1436 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1437 if iParamRef != len(self.aoParamRefs):
1438 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1439
1440 return True;
1441
1442 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1443 """
1444 Produces generic C++ statments that emits a call to the thread function
1445 variation and any subsequent checks that may be necessary after that.
1446
1447 The sCallVarNm is the name of the variable with the threaded function
1448 to call. This is for the case where all the variations have the same
1449 parameters and only the threaded function number differs.
1450 """
1451 aoStmts = [
1452 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1453 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1454 cchIndent = cchIndent), # Scope and a hook for various stuff.
1455 ];
1456
1457 # The call to the threaded function.
1458 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1459 for iParam in range(self.cMinParams):
1460 asFrags = [];
1461 for aoRefs in self.dParamRefs.values():
1462 oRef = aoRefs[0];
1463 if oRef.iNewParam == iParam:
1464 sCast = '(uint64_t)'
1465 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1466 sCast = '(uint64_t)(u' + oRef.sType + ')';
1467 if oRef.offNewParam == 0:
1468 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1469 else:
1470 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1471 assert asFrags;
1472 asCallArgs.append(' | '.join(asFrags));
1473
1474 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1475
1476 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1477 # emit this mode check from the compilation loop. On the
1478 # plus side, this means we eliminate unnecessary call at
1479 # end of the TB. :-)
1480 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1481 ## mask and maybe emit additional checks.
1482 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1483 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1484 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1485 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1486 # cchIndent = cchIndent));
1487
1488 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1489 if not sCImplFlags:
1490 sCImplFlags = '0'
1491 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1492
1493 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1494 # indicates we should do so.
1495 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1496 asEndTbFlags = [];
1497 asTbBranchedFlags = [];
1498 for sFlag in self.oParent.dsCImplFlags:
1499 if self.kdCImplFlags[sFlag] is True:
1500 asEndTbFlags.append(sFlag);
1501 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1502 asTbBranchedFlags.append(sFlag);
1503 if ( asTbBranchedFlags
1504 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1505 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1506 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1507 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1508 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1509 if asEndTbFlags:
1510 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1511 cchIndent = cchIndent));
1512
1513 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1514 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1515
1516 return aoStmts;
1517
1518
1519class ThreadedFunction(object):
1520 """
1521 A threaded function.
1522 """
1523
1524 def __init__(self, oMcBlock: iai.McBlock) -> None:
1525 self.oMcBlock = oMcBlock # type: iai.McBlock
1526 # The remaining fields are only useful after analyze() has been called:
1527 ## Variations for this block. There is at least one.
1528 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1529 ## Variation dictionary containing the same as aoVariations.
1530 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1531 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1532 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1533 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1534 ## and those determined by analyzeCodeOperation().
1535 self.dsCImplFlags = {} # type: Dict[str, bool]
1536
1537 @staticmethod
1538 def dummyInstance():
1539 """ Gets a dummy instance. """
1540 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1541 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1542
1543 def hasWithFlagsCheckingAndClearingVariation(self):
1544 """
1545 Check if there is one or more with flags checking and clearing
1546 variations for this threaded function.
1547 """
1548 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1549 if sVarWithFlags in self.dVariations:
1550 return True;
1551 return False;
1552
1553 #
1554 # Analysis and code morphing.
1555 #
1556
1557 def raiseProblem(self, sMessage):
1558 """ Raises a problem. """
1559 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1560
1561 def error(self, sMessage, oGenerator):
1562 """ Emits an error via the generator object, causing it to fail. """
1563 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1564
1565 def warning(self, sMessage):
1566 """ Emits a warning. """
1567 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1568
1569 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1570 """ Scans the statements for MC variables and call arguments. """
1571 for oStmt in aoStmts:
1572 if isinstance(oStmt, iai.McStmtVar):
1573 if oStmt.sVarName in self.dVariables:
1574 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1575 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1576
1577 # There shouldn't be any variables or arguments declared inside if/
1578 # else blocks, but scan them too to be on the safe side.
1579 if isinstance(oStmt, iai.McStmtCond):
1580 #cBefore = len(self.dVariables);
1581 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1582 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1583 #if len(self.dVariables) != cBefore:
1584 # raise Exception('Variables/arguments defined in conditional branches!');
1585 return True;
1586
1587 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1588 """
1589 Analyzes the code looking clues as to additional side-effects.
1590
1591 Currently this is simply looking for branching and adding the relevant
1592 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1593 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1594
1595 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1596
1597 Returns annotation on return style.
1598 """
1599 sAnnotation = None;
1600 for oStmt in aoStmts:
1601 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1602 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1603 assert not fSeenConditional;
1604 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1605 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1606 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1607 if fSeenConditional:
1608 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1609
1610 # Check for CIMPL and AIMPL calls.
1611 if oStmt.sName.startswith('IEM_MC_CALL_'):
1612 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1613 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1614 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1615 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1616 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1617 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1618 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1619 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1620 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1621 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1622 else:
1623 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1624
1625 # Check for return statements.
1626 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1627 assert sAnnotation is None;
1628 sAnnotation = g_ksFinishAnnotation_Advance;
1629 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1630 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1631 assert sAnnotation is None;
1632 sAnnotation = g_ksFinishAnnotation_RelJmp;
1633 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1634 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1635 assert sAnnotation is None;
1636 sAnnotation = g_ksFinishAnnotation_SetJmp;
1637 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1638 assert sAnnotation is None;
1639 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1640
1641 # Collect MCs working on EFLAGS. Caller will check this.
1642 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1643 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1644 dEflStmts[oStmt.sName] = oStmt;
1645 elif isinstance(oStmt, iai.McStmtCall):
1646 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1647 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1648 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1649 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1650 dEflStmts[oStmt.sName] = oStmt;
1651
1652 # Process branches of conditionals recursively.
1653 if isinstance(oStmt, iai.McStmtCond):
1654 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1655 if oStmt.aoElseBranch:
1656 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1657
1658 return sAnnotation;
1659
1660 def analyze(self, oGenerator):
1661 """
1662 Analyzes the code, identifying the number of parameters it requires and such.
1663
1664 Returns dummy True - raises exception on trouble.
1665 """
1666
1667 # Check the block for errors before we proceed (will decode it).
1668 asErrors = self.oMcBlock.check();
1669 if asErrors:
1670 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1671 for sError in asErrors]));
1672
1673 # Decode the block into a list/tree of McStmt objects.
1674 aoStmts = self.oMcBlock.decode();
1675
1676 # Scan the statements for local variables and call arguments (self.dVariables).
1677 self.analyzeFindVariablesAndCallArgs(aoStmts);
1678
1679 # Scan the code for IEM_CIMPL_F_ and other clues.
1680 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1681 dEflStmts = {};
1682 self.analyzeCodeOperation(aoStmts, dEflStmts);
1683 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1684 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1685 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1686 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1687
1688 # Analyse EFLAGS related MCs and @opflmodify and friends.
1689 if dEflStmts:
1690 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1691 if ( oInstruction is None
1692 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1693 sMcNames = '+'.join(dEflStmts.keys());
1694 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1695 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1696 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1697 if not oInstruction.asFlModify:
1698 if oInstruction.sMnemonic not in [ 'not', ]:
1699 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
1700 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
1701 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
1702 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
1703 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
1704 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
1705 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
1706 if not oInstruction.asFlModify:
1707 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
1708 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
1709 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
1710 if not oInstruction.asFlTest:
1711 if oInstruction.sMnemonic not in [ 'not', ]:
1712 self.error('Expected @opfltest!', oGenerator);
1713 if oInstruction and oInstruction.asFlSet:
1714 for sFlag in oInstruction.asFlSet:
1715 if sFlag not in oInstruction.asFlModify:
1716 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
1717 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
1718 if oInstruction and oInstruction.asFlClear:
1719 for sFlag in oInstruction.asFlClear:
1720 if sFlag not in oInstruction.asFlModify:
1721 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
1722 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
1723
1724 # Create variations as needed.
1725 if iai.McStmt.findStmtByNames(aoStmts,
1726 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
1727 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
1728 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
1729 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
1730 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
1731
1732 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
1733 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
1734 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
1735 'IEM_MC_FETCH_MEM_U32' : True,
1736 'IEM_MC_FETCH_MEM_U64' : True,
1737 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
1738 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
1739 'IEM_MC_STORE_MEM_U32' : True,
1740 'IEM_MC_STORE_MEM_U64' : True, }):
1741 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1742 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
1743 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1744 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
1745 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1746 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
1747 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1748 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
1749 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1750 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1751 else:
1752 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
1753 else:
1754 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
1755 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
1756 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1757 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
1758 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
1759 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
1760 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
1761 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
1762 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
1763 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
1764 else:
1765 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
1766
1767 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
1768 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
1769 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
1770 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
1771 asVariationsBase = asVariations;
1772 asVariations = [];
1773 for sVariation in asVariationsBase:
1774 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
1775 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
1776
1777 if not iai.McStmt.findStmtByNames(aoStmts,
1778 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
1779 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1780 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1781 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1782 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
1783 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
1784 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
1785 }):
1786 asVariations = [sVariation for sVariation in asVariations
1787 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
1788
1789 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
1790
1791 # Dictionary variant of the list.
1792 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
1793
1794 # Continue the analysis on each variation.
1795 for oVariation in self.aoVariations:
1796 oVariation.analyzeVariation(aoStmts);
1797
1798 return True;
1799
1800 ## Used by emitThreadedCallStmts.
1801 kdVariationsWithNeedForPrefixCheck = {
1802 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
1803 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
1804 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
1805 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
1806 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
1807 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
1808 ThreadedFunctionVariation.ksVariation_32_Flat: True,
1809 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
1810 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
1811 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
1812 };
1813
1814 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
1815 """
1816 Worker for morphInputCode that returns a list of statements that emits
1817 the call to the threaded functions for the block.
1818
1819 The sBranch parameter is used with conditional branches where we'll emit
1820 different threaded calls depending on whether we're in the jump-taken or
1821 no-jump code path.
1822 """
1823 # Special case for only default variation:
1824 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
1825 assert not sBranch;
1826 return self.aoVariations[0].emitThreadedCallStmts(0);
1827
1828 #
1829 # Case statement sub-class.
1830 #
1831 dByVari = self.dVariations;
1832 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
1833 class Case:
1834 def __init__(self, sCond, sVarNm = None):
1835 self.sCond = sCond;
1836 self.sVarNm = sVarNm;
1837 self.oVar = dByVari[sVarNm] if sVarNm else None;
1838 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
1839
1840 def toCode(self):
1841 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1842 if self.aoBody:
1843 aoStmts.extend(self.aoBody);
1844 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
1845 return aoStmts;
1846
1847 def toFunctionAssignment(self):
1848 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
1849 if self.aoBody:
1850 aoStmts.extend([
1851 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
1852 iai.McCppGeneric('break;', cchIndent = 8),
1853 ]);
1854 return aoStmts;
1855
1856 def isSame(self, oThat):
1857 if not self.aoBody: # fall thru always matches.
1858 return True;
1859 if len(self.aoBody) != len(oThat.aoBody):
1860 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
1861 return False;
1862 for iStmt, oStmt in enumerate(self.aoBody):
1863 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
1864 assert isinstance(oStmt, iai.McCppGeneric);
1865 assert not isinstance(oStmt, iai.McStmtCond);
1866 if isinstance(oStmt, iai.McStmtCond):
1867 return False;
1868 if oStmt.sName != oThatStmt.sName:
1869 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
1870 return False;
1871 if len(oStmt.asParams) != len(oThatStmt.asParams):
1872 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
1873 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
1874 return False;
1875 for iParam, sParam in enumerate(oStmt.asParams):
1876 if ( sParam != oThatStmt.asParams[iParam]
1877 and ( iParam != 1
1878 or not isinstance(oStmt, iai.McCppCall)
1879 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
1880 or sParam != self.oVar.getIndexName()
1881 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
1882 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
1883 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
1884 return False;
1885 return True;
1886
1887 #
1888 # Determine what we're switch on.
1889 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
1890 #
1891 fSimple = True;
1892 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
1893 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
1894 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
1895 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
1896 # is not writable in 32-bit mode (at least), thus the penalty mode
1897 # for any accesses via it (simpler this way).)
1898 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
1899 fSimple = False; # threaded functions.
1900 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1901 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
1902 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
1903
1904 #
1905 # Generate the case statements.
1906 #
1907 # pylintx: disable=x
1908 aoCases = [];
1909 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
1910 assert not fSimple and not sBranch;
1911 aoCases.extend([
1912 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
1913 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
1914 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
1915 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
1916 ]);
1917 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
1918 aoCases.extend([
1919 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
1920 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
1921 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
1922 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
1923 ]);
1924 elif ThrdFnVar.ksVariation_64 in dByVari:
1925 assert fSimple and not sBranch;
1926 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
1927 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
1928 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
1929 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
1930 assert fSimple and sBranch;
1931 aoCases.append(Case('IEMMODE_64BIT',
1932 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
1933 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
1934 aoCases.append(Case('IEMMODE_64BIT | 32',
1935 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
1936
1937 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
1938 assert not fSimple and not sBranch;
1939 aoCases.extend([
1940 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
1941 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
1942 Case('IEMMODE_32BIT | 16', None), # fall thru
1943 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1944 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
1945 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
1946 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
1947 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
1948 ]);
1949 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
1950 aoCases.extend([
1951 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
1952 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
1953 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
1954 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1955 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
1956 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
1957 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
1958 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
1959 ]);
1960 elif ThrdFnVar.ksVariation_32 in dByVari:
1961 assert fSimple and not sBranch;
1962 aoCases.extend([
1963 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1964 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
1965 ]);
1966 if ThrdFnVar.ksVariation_32f in dByVari:
1967 aoCases.extend([
1968 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1969 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
1970 ]);
1971 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
1972 assert fSimple and sBranch;
1973 aoCases.extend([
1974 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
1975 Case('IEMMODE_32BIT',
1976 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
1977 ]);
1978 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
1979 aoCases.extend([
1980 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
1981 Case('IEMMODE_32BIT | 32',
1982 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
1983 ]);
1984
1985 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
1986 assert not fSimple and not sBranch;
1987 aoCases.extend([
1988 Case('IEMMODE_16BIT | 16', None), # fall thru
1989 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
1990 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
1991 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
1992 ]);
1993 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
1994 aoCases.extend([
1995 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
1996 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
1997 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
1998 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
1999 ]);
2000 elif ThrdFnVar.ksVariation_16 in dByVari:
2001 assert fSimple and not sBranch;
2002 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2003 if ThrdFnVar.ksVariation_16f in dByVari:
2004 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2005 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2006 assert fSimple and sBranch;
2007 aoCases.append(Case('IEMMODE_16BIT',
2008 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2009 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2010 aoCases.append(Case('IEMMODE_16BIT | 32',
2011 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2012
2013
2014 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2015 if not fSimple:
2016 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2017 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2018 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2019 if not fSimple:
2020 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2021 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2022
2023 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2024 assert fSimple and sBranch;
2025 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2026 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2027 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2028 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2029 assert fSimple and sBranch;
2030 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2031 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2032 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2033
2034 #
2035 # If the case bodies are all the same, except for the function called,
2036 # we can reduce the code size and hopefully compile time.
2037 #
2038 iFirstCaseWithBody = 0;
2039 while not aoCases[iFirstCaseWithBody].aoBody:
2040 iFirstCaseWithBody += 1
2041 fAllSameCases = True
2042 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2043 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2044 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2045 if fAllSameCases:
2046 aoStmts = [
2047 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2048 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2049 iai.McCppGeneric('{'),
2050 ];
2051 for oCase in aoCases:
2052 aoStmts.extend(oCase.toFunctionAssignment());
2053 aoStmts.extend([
2054 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2055 iai.McCppGeneric('}'),
2056 ]);
2057 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2058
2059 else:
2060 #
2061 # Generate the generic switch statement.
2062 #
2063 aoStmts = [
2064 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2065 iai.McCppGeneric('{'),
2066 ];
2067 for oCase in aoCases:
2068 aoStmts.extend(oCase.toCode());
2069 aoStmts.extend([
2070 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2071 iai.McCppGeneric('}'),
2072 ]);
2073
2074 return aoStmts;
2075
2076 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2077 """
2078 Adjusts (& copies) the statements for the input/decoder so it will emit
2079 calls to the right threaded functions for each block.
2080
2081 Returns list/tree of statements (aoStmts is not modified) and updated
2082 fCallEmitted status.
2083 """
2084 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2085 aoDecoderStmts = [];
2086
2087 for iStmt, oStmt in enumerate(aoStmts):
2088 # Copy the statement. Make a deep copy to make sure we've got our own
2089 # copies of all instance variables, even if a bit overkill at the moment.
2090 oNewStmt = copy.deepcopy(oStmt);
2091 aoDecoderStmts.append(oNewStmt);
2092 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2093 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2094 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2095
2096 # If we haven't emitted the threaded function call yet, look for
2097 # statements which it would naturally follow or preceed.
2098 if not fCallEmitted:
2099 if not oStmt.isCppStmt():
2100 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2101 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2102 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2103 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2104 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2105 aoDecoderStmts.pop();
2106 if not fIsConditional:
2107 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2108 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2109 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2110 else:
2111 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2112 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2113 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2114 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2115 aoDecoderStmts.append(oNewStmt);
2116 fCallEmitted = True;
2117
2118 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2119 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2120 if not sBranchAnnotation:
2121 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2122 assert fIsConditional;
2123 aoDecoderStmts.pop();
2124 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2125 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2126 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2127 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2128 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2129 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2130 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2131 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2132 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2133 else:
2134 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2135 aoDecoderStmts.append(oNewStmt);
2136 fCallEmitted = True;
2137
2138 elif ( not fIsConditional
2139 and oStmt.fDecode
2140 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2141 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2142 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2143 fCallEmitted = True;
2144
2145 # Process branches of conditionals recursively.
2146 if isinstance(oStmt, iai.McStmtCond):
2147 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2148 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2149 if oStmt.aoElseBranch:
2150 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2151 fCallEmitted, cDepth + 1,
2152 oStmt.oElseBranchAnnotation);
2153 else:
2154 fCallEmitted2 = False;
2155 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2156
2157 if not fCallEmitted and cDepth == 0:
2158 self.raiseProblem('Unable to insert call to threaded function.');
2159
2160 return (aoDecoderStmts, fCallEmitted);
2161
2162
2163 def generateInputCode(self):
2164 """
2165 Modifies the input code.
2166 """
2167 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2168
2169 if len(self.oMcBlock.aoStmts) == 1:
2170 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2171 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2172 if self.dsCImplFlags:
2173 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2174 else:
2175 sCode += '0;\n';
2176 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2177 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2178 sIndent = ' ' * (min(cchIndent, 2) - 2);
2179 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2180 return sCode;
2181
2182 # IEM_MC_BEGIN/END block
2183 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2184 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2185 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2186 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2187 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2188
2189# Short alias for ThreadedFunctionVariation.
2190ThrdFnVar = ThreadedFunctionVariation;
2191
2192
2193class IEMThreadedGenerator(object):
2194 """
2195 The threaded code generator & annotator.
2196 """
2197
2198 def __init__(self):
2199 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2200 self.oOptions = None # type: argparse.Namespace
2201 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2202 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2203 self.cErrors = 0;
2204
2205 #
2206 # Error reporting.
2207 #
2208
2209 def rawError(self, sCompleteMessage):
2210 """ Output a raw error and increment the error counter. """
2211 print(sCompleteMessage, file = sys.stderr);
2212 self.cErrors += 1;
2213 return False;
2214
2215 #
2216 # Processing.
2217 #
2218
2219 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2220 """
2221 Process the input files.
2222 """
2223
2224 # Parse the files.
2225 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2226
2227 # Create threaded functions for the MC blocks.
2228 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2229
2230 # Analyze the threaded functions.
2231 dRawParamCounts = {};
2232 dMinParamCounts = {};
2233 for oThreadedFunction in self.aoThreadedFuncs:
2234 oThreadedFunction.analyze(self);
2235 for oVariation in oThreadedFunction.aoVariations:
2236 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2237 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2238 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2239 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2240 print('debug: %s params: %4s raw, %4s min'
2241 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2242 file = sys.stderr);
2243
2244 # Populate aidxFirstFunctions. This is ASSUMING that
2245 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2246 iThreadedFunction = 0;
2247 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2248 self.aidxFirstFunctions = [];
2249 for oParser in self.aoParsers:
2250 self.aidxFirstFunctions.append(iThreadedFunction);
2251
2252 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2253 iThreadedFunction += 1;
2254 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2255
2256 # Analyze the threaded functions and their variations for native recompilation.
2257 if fNativeRecompilerEnabled:
2258 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2259
2260 # Gather arguments + variable statistics for the MC blocks.
2261 cMaxArgs = 0;
2262 cMaxVars = 0;
2263 cMaxVarsAndArgs = 0;
2264 cbMaxArgs = 0;
2265 cbMaxVars = 0;
2266 cbMaxVarsAndArgs = 0;
2267 for oThreadedFunction in self.aoThreadedFuncs:
2268 if oThreadedFunction.oMcBlock.cLocals >= 0:
2269 # Counts.
2270 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2271 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2272 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2273 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2274 if cMaxVarsAndArgs > 9:
2275 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2276 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2277 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2278 # Calc stack allocation size:
2279 cbArgs = 0;
2280 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2281 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2282 cbVars = 0;
2283 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2284 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2285 cbMaxVars = max(cbMaxVars, cbVars);
2286 cbMaxArgs = max(cbMaxArgs, cbArgs);
2287 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2288 if cbMaxVarsAndArgs >= 0xc0:
2289 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2290 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2291
2292 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2293 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2294
2295 if self.cErrors > 0:
2296 print('fatal error: %u error%s during processing. Details above.'
2297 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2298 return False;
2299 return True;
2300
2301 #
2302 # Output
2303 #
2304
2305 def generateLicenseHeader(self):
2306 """
2307 Returns the lines for a license header.
2308 """
2309 return [
2310 '/*',
2311 ' * Autogenerated by $Id: IEMAllThrdPython.py 103351 2024-02-14 12:51:58Z vboxsync $ ',
2312 ' * Do not edit!',
2313 ' */',
2314 '',
2315 '/*',
2316 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2317 ' *',
2318 ' * This file is part of VirtualBox base platform packages, as',
2319 ' * available from https://www.alldomusa.eu.org.',
2320 ' *',
2321 ' * This program is free software; you can redistribute it and/or',
2322 ' * modify it under the terms of the GNU General Public License',
2323 ' * as published by the Free Software Foundation, in version 3 of the',
2324 ' * License.',
2325 ' *',
2326 ' * This program is distributed in the hope that it will be useful, but',
2327 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2328 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2329 ' * General Public License for more details.',
2330 ' *',
2331 ' * You should have received a copy of the GNU General Public License',
2332 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2333 ' *',
2334 ' * The contents of this file may alternatively be used under the terms',
2335 ' * of the Common Development and Distribution License Version 1.0',
2336 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2337 ' * in the VirtualBox distribution, in which case the provisions of the',
2338 ' * CDDL are applicable instead of those of the GPL.',
2339 ' *',
2340 ' * You may elect to license modified versions of this file under the',
2341 ' * terms and conditions of either the GPL or the CDDL or both.',
2342 ' *',
2343 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2344 ' */',
2345 '',
2346 '',
2347 '',
2348 ];
2349
2350 ## List of built-in threaded functions with user argument counts and
2351 ## whether it has a native recompiler implementation.
2352 katBltIns = (
2353 ( 'Nop', 0, True ),
2354 ( 'LogCpuState', 0, True ),
2355
2356 ( 'DeferToCImpl0', 2, True ),
2357 ( 'CheckIrq', 0, True ),
2358 ( 'CheckMode', 1, True ),
2359 ( 'CheckHwInstrBps', 0, False ),
2360 ( 'CheckCsLim', 1, True ),
2361
2362 ( 'CheckCsLimAndOpcodes', 3, True ),
2363 ( 'CheckOpcodes', 3, True ),
2364 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2365
2366 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2367 ( 'CheckPcAndOpcodes', 3, True ),
2368 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2369
2370 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2371 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2372 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2373
2374 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2375 ( 'CheckOpcodesLoadingTlb', 3, True ),
2376 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2377
2378 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2379 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2380 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2381
2382 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2383 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2384 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2385 );
2386
2387 def generateThreadedFunctionsHeader(self, oOut):
2388 """
2389 Generates the threaded functions header file.
2390 Returns success indicator.
2391 """
2392
2393 asLines = self.generateLicenseHeader();
2394
2395 # Generate the threaded function table indexes.
2396 asLines += [
2397 'typedef enum IEMTHREADEDFUNCS',
2398 '{',
2399 ' kIemThreadedFunc_Invalid = 0,',
2400 '',
2401 ' /*',
2402 ' * Predefined',
2403 ' */',
2404 ];
2405 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2406
2407 iThreadedFunction = 1 + len(self.katBltIns);
2408 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2409 asLines += [
2410 '',
2411 ' /*',
2412 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2413 ' */',
2414 ];
2415 for oThreadedFunction in self.aoThreadedFuncs:
2416 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2417 if oVariation:
2418 iThreadedFunction += 1;
2419 oVariation.iEnumValue = iThreadedFunction;
2420 asLines.append(' ' + oVariation.getIndexName() + ',');
2421 asLines += [
2422 ' kIemThreadedFunc_End',
2423 '} IEMTHREADEDFUNCS;',
2424 '',
2425 ];
2426
2427 # Prototype the function table.
2428 asLines += [
2429 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2430 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2431 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2432 '#endif',
2433 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2434 ];
2435
2436 oOut.write('\n'.join(asLines));
2437 return True;
2438
2439 ksBitsToIntMask = {
2440 1: "UINT64_C(0x1)",
2441 2: "UINT64_C(0x3)",
2442 4: "UINT64_C(0xf)",
2443 8: "UINT64_C(0xff)",
2444 16: "UINT64_C(0xffff)",
2445 32: "UINT64_C(0xffffffff)",
2446 };
2447
2448 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2449 """
2450 Outputs code for unpacking parameters.
2451 This is shared by the threaded and native code generators.
2452 """
2453 aasVars = [];
2454 for aoRefs in oVariation.dParamRefs.values():
2455 oRef = aoRefs[0];
2456 if oRef.sType[0] != 'P':
2457 cBits = g_kdTypeInfo[oRef.sType][0];
2458 sType = g_kdTypeInfo[oRef.sType][2];
2459 else:
2460 cBits = 64;
2461 sType = oRef.sType;
2462
2463 sTypeDecl = sType + ' const';
2464
2465 if cBits == 64:
2466 assert oRef.offNewParam == 0;
2467 if sType == 'uint64_t':
2468 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2469 else:
2470 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2471 elif oRef.offNewParam == 0:
2472 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2473 else:
2474 sUnpack = '(%s)((%s >> %s) & %s);' \
2475 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2476
2477 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2478
2479 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2480 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2481 acchVars = [0, 0, 0, 0, 0];
2482 for asVar in aasVars:
2483 for iCol, sStr in enumerate(asVar):
2484 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2485 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2486 for asVar in sorted(aasVars):
2487 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2488 return True;
2489
2490 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2491 def generateThreadedFunctionsSource(self, oOut):
2492 """
2493 Generates the threaded functions source file.
2494 Returns success indicator.
2495 """
2496
2497 asLines = self.generateLicenseHeader();
2498 oOut.write('\n'.join(asLines));
2499
2500 #
2501 # Emit the function definitions.
2502 #
2503 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2504 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2505 oOut.write( '\n'
2506 + '\n'
2507 + '\n'
2508 + '\n'
2509 + '/*' + '*' * 128 + '\n'
2510 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2511 + '*' * 128 + '*/\n');
2512
2513 for oThreadedFunction in self.aoThreadedFuncs:
2514 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2515 if oVariation:
2516 oMcBlock = oThreadedFunction.oMcBlock;
2517
2518 # Function header
2519 oOut.write( '\n'
2520 + '\n'
2521 + '/**\n'
2522 + ' * #%u: %s at line %s offset %s in %s%s\n'
2523 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2524 os.path.split(oMcBlock.sSrcFile)[1],
2525 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2526 + ' */\n'
2527 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2528 + '{\n');
2529
2530 # Unpack parameters.
2531 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2532
2533 # RT_NOREF for unused parameters.
2534 if oVariation.cMinParams < g_kcThreadedParams:
2535 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2536
2537 # Now for the actual statements.
2538 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2539
2540 oOut.write('}\n');
2541
2542
2543 #
2544 # Generate the output tables in parallel.
2545 #
2546 asFuncTable = [
2547 '/**',
2548 ' * Function pointer table.',
2549 ' */',
2550 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2551 '{',
2552 ' /*Invalid*/ NULL,',
2553 ];
2554 asNameTable = [
2555 '/**',
2556 ' * Function name table.',
2557 ' */',
2558 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2559 '{',
2560 ' "Invalid",',
2561 ];
2562 asArgCntTab = [
2563 '/**',
2564 ' * Argument count table.',
2565 ' */',
2566 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2567 '{',
2568 ' 0, /*Invalid*/',
2569 ];
2570 aasTables = (asFuncTable, asNameTable, asArgCntTab,);
2571
2572 for asTable in aasTables:
2573 asTable.extend((
2574 '',
2575 ' /*',
2576 ' * Predefined.',
2577 ' */',
2578 ));
2579 for sFuncNm, cArgs, _ in self.katBltIns:
2580 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2581 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2582 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2583
2584 iThreadedFunction = 1 + len(self.katBltIns);
2585 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2586 for asTable in aasTables:
2587 asTable.extend((
2588 '',
2589 ' /*',
2590 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2591 ' */',
2592 ));
2593 for oThreadedFunction in self.aoThreadedFuncs:
2594 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2595 if oVariation:
2596 iThreadedFunction += 1;
2597 assert oVariation.iEnumValue == iThreadedFunction;
2598 sName = oVariation.getThreadedFunctionName();
2599 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2600 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2601 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2602
2603 for asTable in aasTables:
2604 asTable.append('};');
2605
2606 #
2607 # Output the tables.
2608 #
2609 oOut.write( '\n'
2610 + '\n');
2611 oOut.write('\n'.join(asFuncTable));
2612 oOut.write( '\n'
2613 + '\n'
2614 + '\n'
2615 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2616 oOut.write('\n'.join(asNameTable));
2617 oOut.write( '\n'
2618 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2619 + '\n'
2620 + '\n');
2621 oOut.write('\n'.join(asArgCntTab));
2622 oOut.write('\n');
2623
2624 return True;
2625
2626 def generateNativeFunctionsHeader(self, oOut):
2627 """
2628 Generates the native recompiler functions header file.
2629 Returns success indicator.
2630 """
2631 if not self.oOptions.fNativeRecompilerEnabled:
2632 return True;
2633
2634 asLines = self.generateLicenseHeader();
2635
2636 # Prototype the function table.
2637 asLines += [
2638 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2639 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2640 '',
2641 ];
2642
2643 # Emit indicators as to which of the builtin functions have a native
2644 # recompiler function and which not. (We only really need this for
2645 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2646 for atBltIn in self.katBltIns:
2647 if atBltIn[1]:
2648 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2649 else:
2650 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2651
2652 # Emit prototypes for the builtin functions we use in tables.
2653 asLines += [
2654 '',
2655 '/* Prototypes for built-in functions used in the above tables. */',
2656 ];
2657 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2658 if fHaveRecompFunc:
2659 asLines += [
2660 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
2661 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
2662 ];
2663
2664 oOut.write('\n'.join(asLines));
2665 return True;
2666
2667 def generateNativeFunctionsSource(self, oOut):
2668 """
2669 Generates the native recompiler functions source file.
2670 Returns success indicator.
2671 """
2672 if not self.oOptions.fNativeRecompilerEnabled:
2673 return True;
2674
2675 #
2676 # The file header.
2677 #
2678 oOut.write('\n'.join(self.generateLicenseHeader()));
2679
2680 #
2681 # Emit the functions.
2682 #
2683 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2684 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2685 oOut.write( '\n'
2686 + '\n'
2687 + '\n'
2688 + '\n'
2689 + '/*' + '*' * 128 + '\n'
2690 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2691 + '*' * 128 + '*/\n');
2692
2693 for oThreadedFunction in self.aoThreadedFuncs:
2694 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2695 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2696 oMcBlock = oThreadedFunction.oMcBlock;
2697
2698 # Function header
2699 oOut.write( '\n'
2700 + '\n'
2701 + '/**\n'
2702 + ' * #%u: %s at line %s offset %s in %s%s\n'
2703 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2704 os.path.split(oMcBlock.sSrcFile)[1],
2705 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2706 + ' */\n'
2707 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
2708 + '{\n');
2709
2710 # Unpack parameters.
2711 self.generateFunctionParameterUnpacking(oVariation, oOut,
2712 ('pCallEntry->auParams[0]',
2713 'pCallEntry->auParams[1]',
2714 'pCallEntry->auParams[2]',));
2715
2716 # Now for the actual statements.
2717 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2718
2719 oOut.write('}\n');
2720
2721 #
2722 # Output the function table.
2723 #
2724 oOut.write( '\n'
2725 + '\n'
2726 + '/*\n'
2727 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2728 + ' */\n'
2729 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
2730 + '{\n'
2731 + ' /*Invalid*/ NULL,'
2732 + '\n'
2733 + ' /*\n'
2734 + ' * Predefined.\n'
2735 + ' */\n'
2736 );
2737 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2738 if fHaveRecompFunc:
2739 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
2740 else:
2741 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2742
2743 iThreadedFunction = 1 + len(self.katBltIns);
2744 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2745 oOut.write( ' /*\n'
2746 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2747 + ' */\n');
2748 for oThreadedFunction in self.aoThreadedFuncs:
2749 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2750 if oVariation:
2751 iThreadedFunction += 1;
2752 assert oVariation.iEnumValue == iThreadedFunction;
2753 sName = oVariation.getNativeFunctionName();
2754 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2755 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2756 else:
2757 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2758
2759 oOut.write( '};\n'
2760 + '\n');
2761 return True;
2762
2763 def generateNativeLivenessSource(self, oOut):
2764 """
2765 Generates the native recompiler liveness analysis functions source file.
2766 Returns success indicator.
2767 """
2768 if not self.oOptions.fNativeRecompilerEnabled:
2769 return True;
2770
2771 #
2772 # The file header.
2773 #
2774 oOut.write('\n'.join(self.generateLicenseHeader()));
2775
2776 #
2777 # Emit the functions.
2778 #
2779 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2780 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2781 oOut.write( '\n'
2782 + '\n'
2783 + '\n'
2784 + '\n'
2785 + '/*' + '*' * 128 + '\n'
2786 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2787 + '*' * 128 + '*/\n');
2788
2789 for oThreadedFunction in self.aoThreadedFuncs:
2790 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
2791 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2792 oMcBlock = oThreadedFunction.oMcBlock;
2793
2794 # Function header
2795 oOut.write( '\n'
2796 + '\n'
2797 + '/**\n'
2798 + ' * #%u: %s at line %s offset %s in %s%s\n'
2799 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2800 os.path.split(oMcBlock.sSrcFile)[1],
2801 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2802 + ' */\n'
2803 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
2804 + '{\n');
2805
2806 # Unpack parameters.
2807 self.generateFunctionParameterUnpacking(oVariation, oOut,
2808 ('pCallEntry->auParams[0]',
2809 'pCallEntry->auParams[1]',
2810 'pCallEntry->auParams[2]',));
2811 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
2812 for aoRefs in oVariation.dParamRefs.values():
2813 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
2814 oOut.write(' %s\n' % (' '.join(asNoRefs),));
2815
2816 # Now for the actual statements.
2817 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
2818
2819 oOut.write('}\n');
2820
2821 #
2822 # Output the function table.
2823 #
2824 oOut.write( '\n'
2825 + '\n'
2826 + '/*\n'
2827 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
2828 + ' */\n'
2829 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
2830 + '{\n'
2831 + ' /*Invalid*/ NULL,'
2832 + '\n'
2833 + ' /*\n'
2834 + ' * Predefined.\n'
2835 + ' */\n'
2836 );
2837 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
2838 if fHaveRecompFunc:
2839 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
2840 else:
2841 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
2842
2843 iThreadedFunction = 1 + len(self.katBltIns);
2844 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2845 oOut.write( ' /*\n'
2846 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
2847 + ' */\n');
2848 for oThreadedFunction in self.aoThreadedFuncs:
2849 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2850 if oVariation:
2851 iThreadedFunction += 1;
2852 assert oVariation.iEnumValue == iThreadedFunction;
2853 sName = oVariation.getLivenessFunctionName();
2854 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
2855 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
2856 else:
2857 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
2858
2859 oOut.write( '};\n'
2860 + '\n');
2861 return True;
2862
2863
2864 def getThreadedFunctionByIndex(self, idx):
2865 """
2866 Returns a ThreadedFunction object for the given index. If the index is
2867 out of bounds, a dummy is returned.
2868 """
2869 if idx < len(self.aoThreadedFuncs):
2870 return self.aoThreadedFuncs[idx];
2871 return ThreadedFunction.dummyInstance();
2872
2873 def generateModifiedInput(self, oOut, idxFile):
2874 """
2875 Generates the combined modified input source/header file.
2876 Returns success indicator.
2877 """
2878 #
2879 # File header and assert assumptions.
2880 #
2881 oOut.write('\n'.join(self.generateLicenseHeader()));
2882 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
2883
2884 #
2885 # Iterate all parsers (input files) and output the ones related to the
2886 # file set given by idxFile.
2887 #
2888 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
2889 # Is this included in the file set?
2890 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
2891 fInclude = -1;
2892 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
2893 if sSrcBaseFile == aoInfo[0].lower():
2894 fInclude = aoInfo[2] in (-1, idxFile);
2895 break;
2896 if fInclude is not True:
2897 assert fInclude is False;
2898 continue;
2899
2900 # Output it.
2901 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
2902
2903 iThreadedFunction = self.aidxFirstFunctions[idxParser];
2904 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2905 iLine = 0;
2906 while iLine < len(oParser.asLines):
2907 sLine = oParser.asLines[iLine];
2908 iLine += 1; # iBeginLine and iEndLine are 1-based.
2909
2910 # Can we pass it thru?
2911 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
2912 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
2913 oOut.write(sLine);
2914 #
2915 # Single MC block. Just extract it and insert the replacement.
2916 #
2917 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
2918 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
2919 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
2920 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
2921 sModified = oThreadedFunction.generateInputCode().strip();
2922 oOut.write(sModified);
2923
2924 iLine = oThreadedFunction.oMcBlock.iEndLine;
2925 sLine = oParser.asLines[iLine - 1];
2926 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
2927 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
2928 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
2929 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
2930
2931 # Advance
2932 iThreadedFunction += 1;
2933 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2934 #
2935 # Macro expansion line that have sublines and may contain multiple MC blocks.
2936 #
2937 else:
2938 offLine = 0;
2939 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
2940 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
2941
2942 sModified = oThreadedFunction.generateInputCode().strip();
2943 assert ( sModified.startswith('IEM_MC_BEGIN')
2944 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
2945 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
2946 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
2947 ), 'sModified="%s"' % (sModified,);
2948 oOut.write(sModified);
2949
2950 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
2951
2952 # Advance
2953 iThreadedFunction += 1;
2954 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2955
2956 # Last line segment.
2957 if offLine < len(sLine):
2958 oOut.write(sLine[offLine : ]);
2959
2960 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
2961
2962 return True;
2963
2964 def generateModifiedInput1(self, oOut):
2965 """
2966 Generates the combined modified input source/header file, part 1.
2967 Returns success indicator.
2968 """
2969 return self.generateModifiedInput(oOut, 1);
2970
2971 def generateModifiedInput2(self, oOut):
2972 """
2973 Generates the combined modified input source/header file, part 2.
2974 Returns success indicator.
2975 """
2976 return self.generateModifiedInput(oOut, 2);
2977
2978 def generateModifiedInput3(self, oOut):
2979 """
2980 Generates the combined modified input source/header file, part 3.
2981 Returns success indicator.
2982 """
2983 return self.generateModifiedInput(oOut, 3);
2984
2985 def generateModifiedInput4(self, oOut):
2986 """
2987 Generates the combined modified input source/header file, part 4.
2988 Returns success indicator.
2989 """
2990 return self.generateModifiedInput(oOut, 4);
2991
2992
2993 #
2994 # Main
2995 #
2996
2997 def main(self, asArgs):
2998 """
2999 C-like main function.
3000 Returns exit code.
3001 """
3002
3003 #
3004 # Parse arguments
3005 #
3006 sScriptDir = os.path.dirname(__file__);
3007 oParser = argparse.ArgumentParser(add_help = False);
3008 oParser.add_argument('asInFiles',
3009 metavar = 'input.cpp.h',
3010 nargs = '*',
3011 default = [os.path.join(sScriptDir, aoInfo[0])
3012 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3013 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3014 oParser.add_argument('--host-arch',
3015 metavar = 'arch',
3016 dest = 'sHostArch',
3017 action = 'store',
3018 default = None,
3019 help = 'The host architecture.');
3020
3021 oParser.add_argument('--out-thrd-funcs-hdr',
3022 metavar = 'file-thrd-funcs.h',
3023 dest = 'sOutFileThrdFuncsHdr',
3024 action = 'store',
3025 default = '-',
3026 help = 'The output header file for the threaded functions.');
3027 oParser.add_argument('--out-thrd-funcs-cpp',
3028 metavar = 'file-thrd-funcs.cpp',
3029 dest = 'sOutFileThrdFuncsCpp',
3030 action = 'store',
3031 default = '-',
3032 help = 'The output C++ file for the threaded functions.');
3033 oParser.add_argument('--out-n8ve-funcs-hdr',
3034 metavar = 'file-n8tv-funcs.h',
3035 dest = 'sOutFileN8veFuncsHdr',
3036 action = 'store',
3037 default = '-',
3038 help = 'The output header file for the native recompiler functions.');
3039 oParser.add_argument('--out-n8ve-funcs-cpp',
3040 metavar = 'file-n8tv-funcs.cpp',
3041 dest = 'sOutFileN8veFuncsCpp',
3042 action = 'store',
3043 default = '-',
3044 help = 'The output C++ file for the native recompiler functions.');
3045 oParser.add_argument('--out-n8ve-liveness-cpp',
3046 metavar = 'file-n8tv-liveness.cpp',
3047 dest = 'sOutFileN8veLivenessCpp',
3048 action = 'store',
3049 default = '-',
3050 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3051 oParser.add_argument('--native',
3052 dest = 'fNativeRecompilerEnabled',
3053 action = 'store_true',
3054 default = False,
3055 help = 'Enables generating the files related to native recompilation.');
3056 oParser.add_argument('--out-mod-input1',
3057 metavar = 'file-instr.cpp.h',
3058 dest = 'sOutFileModInput1',
3059 action = 'store',
3060 default = '-',
3061 help = 'The output C++/header file for modified input instruction files part 1.');
3062 oParser.add_argument('--out-mod-input2',
3063 metavar = 'file-instr.cpp.h',
3064 dest = 'sOutFileModInput2',
3065 action = 'store',
3066 default = '-',
3067 help = 'The output C++/header file for modified input instruction files part 2.');
3068 oParser.add_argument('--out-mod-input3',
3069 metavar = 'file-instr.cpp.h',
3070 dest = 'sOutFileModInput3',
3071 action = 'store',
3072 default = '-',
3073 help = 'The output C++/header file for modified input instruction files part 3.');
3074 oParser.add_argument('--out-mod-input4',
3075 metavar = 'file-instr.cpp.h',
3076 dest = 'sOutFileModInput4',
3077 action = 'store',
3078 default = '-',
3079 help = 'The output C++/header file for modified input instruction files part 4.');
3080 oParser.add_argument('--help', '-h', '-?',
3081 action = 'help',
3082 help = 'Display help and exit.');
3083 oParser.add_argument('--version', '-V',
3084 action = 'version',
3085 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3086 % (__version__.split()[1], iai.__version__.split()[1],),
3087 help = 'Displays the version/revision of the script and exit.');
3088 self.oOptions = oParser.parse_args(asArgs[1:]);
3089 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3090
3091 #
3092 # Process the instructions specified in the IEM sources.
3093 #
3094 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3095 #
3096 # Generate the output files.
3097 #
3098 aaoOutputFiles = (
3099 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3100 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3101 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3102 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3103 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3104 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3105 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3106 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3107 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3108 );
3109 fRc = True;
3110 for sOutFile, fnGenMethod in aaoOutputFiles:
3111 if sOutFile == '-':
3112 fRc = fnGenMethod(sys.stdout) and fRc;
3113 else:
3114 try:
3115 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3116 except Exception as oXcpt:
3117 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3118 return 1;
3119 fRc = fnGenMethod(oOut) and fRc;
3120 oOut.close();
3121 if fRc:
3122 return 0;
3123
3124 return 1;
3125
3126
3127if __name__ == '__main__':
3128 sys.exit(IEMThreadedGenerator().main(sys.argv));
3129
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette