VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 103720

最後變更 在這個檔案從103720是 103678,由 vboxsync 提交於 13 月 前

VMM/IEM: Optimization all 'sub same,same' and 'xor same,same' register zeroing variants. Simplifies the associated macros. bugref:10376

  • 屬性 svn:eol-style 設為 LF
  • 屬性 svn:executable 設為 *
  • 屬性 svn:keywords 設為 Author Date Id Revision
檔案大小: 178.7 KB
 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 103678 2024-03-05 09:56:20Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.alldomusa.eu.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 103678 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMSSERESULT': ( 128+32, False, 'IEMSSERESULT', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
87 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
88 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
89}; #| g_kdTypeInfo; - requires 3.9
90g_kdTypeInfo2.update(g_kdTypeInfo);
91
92def getTypeBitCount(sType):
93 """
94 Translate a type to size in bits
95 """
96 if sType in g_kdTypeInfo2:
97 return g_kdTypeInfo2[sType][0];
98 if '*' in sType or sType[0] == 'P':
99 return 64;
100 #raise Exception('Unknown type: %s' % (sType,));
101 print('error: Unknown type: %s' % (sType,));
102 return 64;
103
104g_kdIemFieldToType = {
105 # Illegal ones:
106 'offInstrNextByte': ( None, ),
107 'cbInstrBuf': ( None, ),
108 'pbInstrBuf': ( None, ),
109 'uInstrBufPc': ( None, ),
110 'cbInstrBufTotal': ( None, ),
111 'offCurInstrStart': ( None, ),
112 'cbOpcode': ( None, ),
113 'offOpcode': ( None, ),
114 'offModRm': ( None, ),
115 # Okay ones.
116 'fPrefixes': ( 'uint32_t', ),
117 'uRexReg': ( 'uint8_t', ),
118 'uRexB': ( 'uint8_t', ),
119 'uRexIndex': ( 'uint8_t', ),
120 'iEffSeg': ( 'uint8_t', ),
121 'enmEffOpSize': ( 'IEMMODE', ),
122 'enmDefAddrMode': ( 'IEMMODE', ),
123 'enmEffAddrMode': ( 'IEMMODE', ),
124 'enmDefOpSize': ( 'IEMMODE', ),
125 'idxPrefix': ( 'uint8_t', ),
126 'uVex3rdReg': ( 'uint8_t', ),
127 'uVexLength': ( 'uint8_t', ),
128 'fEvexStuff': ( 'uint8_t', ),
129 'uFpuOpcode': ( 'uint16_t', ),
130};
131
132## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
133## @{
134g_ksFinishAnnotation_Advance = 'Advance';
135g_ksFinishAnnotation_RelJmp = 'RelJmp';
136g_ksFinishAnnotation_SetJmp = 'SetJmp';
137g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
138## @}
139
140
141class ThreadedParamRef(object):
142 """
143 A parameter reference for a threaded function.
144 """
145
146 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
147 ## The name / reference in the original code.
148 self.sOrgRef = sOrgRef;
149 ## Normalized name to deal with spaces in macro invocations and such.
150 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
151 ## Indicates that sOrgRef may not match the parameter.
152 self.fCustomRef = sStdRef is not None;
153 ## The type (typically derived).
154 self.sType = sType;
155 ## The statement making the reference.
156 self.oStmt = oStmt;
157 ## The parameter containing the references. None if implicit.
158 self.iParam = iParam;
159 ## The offset in the parameter of the reference.
160 self.offParam = offParam;
161
162 ## The variable name in the threaded function.
163 self.sNewName = 'x';
164 ## The this is packed into.
165 self.iNewParam = 99;
166 ## The bit offset in iNewParam.
167 self.offNewParam = 1024
168
169
170class ThreadedFunctionVariation(object):
171 """ Threaded function variation. """
172
173 ## @name Variations.
174 ## These variations will match translation block selection/distinctions as well.
175 ## @{
176 # pylint: disable=line-too-long
177 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
178 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
179 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
180 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
181 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
182 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
183 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
184 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
185 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
186 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
187 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
188 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
189 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
190 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
191 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
192 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
193 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
194 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
195 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
196 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
197 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
198 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
199 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
200 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
201 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
202 ksVariation_64 = '_64'; ##< 64-bit mode code.
203 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
204 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
205 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
206 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
207 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
208 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
209 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
210 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
211 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
212 # pylint: enable=line-too-long
213 kasVariations = (
214 ksVariation_Default,
215 ksVariation_16,
216 ksVariation_16f,
217 ksVariation_16_Jmp,
218 ksVariation_16f_Jmp,
219 ksVariation_16_NoJmp,
220 ksVariation_16f_NoJmp,
221 ksVariation_16_Addr32,
222 ksVariation_16f_Addr32,
223 ksVariation_16_Pre386,
224 ksVariation_16f_Pre386,
225 ksVariation_16_Pre386_Jmp,
226 ksVariation_16f_Pre386_Jmp,
227 ksVariation_16_Pre386_NoJmp,
228 ksVariation_16f_Pre386_NoJmp,
229 ksVariation_32,
230 ksVariation_32f,
231 ksVariation_32_Jmp,
232 ksVariation_32f_Jmp,
233 ksVariation_32_NoJmp,
234 ksVariation_32f_NoJmp,
235 ksVariation_32_Flat,
236 ksVariation_32f_Flat,
237 ksVariation_32_Addr16,
238 ksVariation_32f_Addr16,
239 ksVariation_64,
240 ksVariation_64f,
241 ksVariation_64_Jmp,
242 ksVariation_64f_Jmp,
243 ksVariation_64_NoJmp,
244 ksVariation_64f_NoJmp,
245 ksVariation_64_FsGs,
246 ksVariation_64f_FsGs,
247 ksVariation_64_Addr32,
248 ksVariation_64f_Addr32,
249 );
250 kasVariationsWithoutAddress = (
251 ksVariation_16,
252 ksVariation_16f,
253 ksVariation_16_Pre386,
254 ksVariation_16f_Pre386,
255 ksVariation_32,
256 ksVariation_32f,
257 ksVariation_64,
258 ksVariation_64f,
259 );
260 kasVariationsWithoutAddressNot286 = (
261 ksVariation_16,
262 ksVariation_16f,
263 ksVariation_32,
264 ksVariation_32f,
265 ksVariation_64,
266 ksVariation_64f,
267 );
268 kasVariationsWithoutAddressNot286Not64 = (
269 ksVariation_16,
270 ksVariation_16f,
271 ksVariation_32,
272 ksVariation_32f,
273 );
274 kasVariationsWithoutAddressNot64 = (
275 ksVariation_16,
276 ksVariation_16f,
277 ksVariation_16_Pre386,
278 ksVariation_16f_Pre386,
279 ksVariation_32,
280 ksVariation_32f,
281 );
282 kasVariationsWithoutAddressOnly64 = (
283 ksVariation_64,
284 ksVariation_64f,
285 );
286 kasVariationsWithAddress = (
287 ksVariation_16,
288 ksVariation_16f,
289 ksVariation_16_Addr32,
290 ksVariation_16f_Addr32,
291 ksVariation_16_Pre386,
292 ksVariation_16f_Pre386,
293 ksVariation_32,
294 ksVariation_32f,
295 ksVariation_32_Flat,
296 ksVariation_32f_Flat,
297 ksVariation_32_Addr16,
298 ksVariation_32f_Addr16,
299 ksVariation_64,
300 ksVariation_64f,
301 ksVariation_64_FsGs,
302 ksVariation_64f_FsGs,
303 ksVariation_64_Addr32,
304 ksVariation_64f_Addr32,
305 );
306 kasVariationsWithAddressNot286 = (
307 ksVariation_16,
308 ksVariation_16f,
309 ksVariation_16_Addr32,
310 ksVariation_16f_Addr32,
311 ksVariation_32,
312 ksVariation_32f,
313 ksVariation_32_Flat,
314 ksVariation_32f_Flat,
315 ksVariation_32_Addr16,
316 ksVariation_32f_Addr16,
317 ksVariation_64,
318 ksVariation_64f,
319 ksVariation_64_FsGs,
320 ksVariation_64f_FsGs,
321 ksVariation_64_Addr32,
322 ksVariation_64f_Addr32,
323 );
324 kasVariationsWithAddressNot286Not64 = (
325 ksVariation_16,
326 ksVariation_16f,
327 ksVariation_16_Addr32,
328 ksVariation_16f_Addr32,
329 ksVariation_32,
330 ksVariation_32f,
331 ksVariation_32_Flat,
332 ksVariation_32f_Flat,
333 ksVariation_32_Addr16,
334 ksVariation_32f_Addr16,
335 );
336 kasVariationsWithAddressNot64 = (
337 ksVariation_16,
338 ksVariation_16f,
339 ksVariation_16_Addr32,
340 ksVariation_16f_Addr32,
341 ksVariation_16_Pre386,
342 ksVariation_16f_Pre386,
343 ksVariation_32,
344 ksVariation_32f,
345 ksVariation_32_Flat,
346 ksVariation_32f_Flat,
347 ksVariation_32_Addr16,
348 ksVariation_32f_Addr16,
349 );
350 kasVariationsWithAddressOnly64 = (
351 ksVariation_64,
352 ksVariation_64f,
353 ksVariation_64_FsGs,
354 ksVariation_64f_FsGs,
355 ksVariation_64_Addr32,
356 ksVariation_64f_Addr32,
357 );
358 kasVariationsOnlyPre386 = (
359 ksVariation_16_Pre386,
360 ksVariation_16f_Pre386,
361 );
362 kasVariationsEmitOrder = (
363 ksVariation_Default,
364 ksVariation_64,
365 ksVariation_64f,
366 ksVariation_64_Jmp,
367 ksVariation_64f_Jmp,
368 ksVariation_64_NoJmp,
369 ksVariation_64f_NoJmp,
370 ksVariation_64_FsGs,
371 ksVariation_64f_FsGs,
372 ksVariation_32_Flat,
373 ksVariation_32f_Flat,
374 ksVariation_32,
375 ksVariation_32f,
376 ksVariation_32_Jmp,
377 ksVariation_32f_Jmp,
378 ksVariation_32_NoJmp,
379 ksVariation_32f_NoJmp,
380 ksVariation_16,
381 ksVariation_16f,
382 ksVariation_16_Jmp,
383 ksVariation_16f_Jmp,
384 ksVariation_16_NoJmp,
385 ksVariation_16f_NoJmp,
386 ksVariation_16_Addr32,
387 ksVariation_16f_Addr32,
388 ksVariation_16_Pre386,
389 ksVariation_16f_Pre386,
390 ksVariation_16_Pre386_Jmp,
391 ksVariation_16f_Pre386_Jmp,
392 ksVariation_16_Pre386_NoJmp,
393 ksVariation_16f_Pre386_NoJmp,
394 ksVariation_32_Addr16,
395 ksVariation_32f_Addr16,
396 ksVariation_64_Addr32,
397 ksVariation_64f_Addr32,
398 );
399 kdVariationNames = {
400 ksVariation_Default: 'defer-to-cimpl',
401 ksVariation_16: '16-bit',
402 ksVariation_16f: '16-bit w/ eflag checking and clearing',
403 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
404 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
405 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
406 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
407 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
408 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
409 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
410 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
411 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
412 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
413 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
414 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
415 ksVariation_32: '32-bit',
416 ksVariation_32f: '32-bit w/ eflag checking and clearing',
417 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
418 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
419 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
420 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
421 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
422 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
423 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
424 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
425 ksVariation_64: '64-bit',
426 ksVariation_64f: '64-bit w/ eflag checking and clearing',
427 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
428 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
429 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
430 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
431 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
432 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
433 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
434 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
435 };
436 kdVariationsWithEflagsCheckingAndClearing = {
437 ksVariation_16f: True,
438 ksVariation_16f_Jmp: True,
439 ksVariation_16f_NoJmp: True,
440 ksVariation_16f_Addr32: True,
441 ksVariation_16f_Pre386: True,
442 ksVariation_16f_Pre386_Jmp: True,
443 ksVariation_16f_Pre386_NoJmp: True,
444 ksVariation_32f: True,
445 ksVariation_32f_Jmp: True,
446 ksVariation_32f_NoJmp: True,
447 ksVariation_32f_Flat: True,
448 ksVariation_32f_Addr16: True,
449 ksVariation_64f: True,
450 ksVariation_64f_Jmp: True,
451 ksVariation_64f_NoJmp: True,
452 ksVariation_64f_FsGs: True,
453 ksVariation_64f_Addr32: True,
454 };
455 kdVariationsOnly64NoFlags = {
456 ksVariation_64: True,
457 ksVariation_64_Jmp: True,
458 ksVariation_64_NoJmp: True,
459 ksVariation_64_FsGs: True,
460 ksVariation_64_Addr32: True,
461 };
462 kdVariationsOnly64WithFlags = {
463 ksVariation_64f: True,
464 ksVariation_64f_Jmp: True,
465 ksVariation_64f_NoJmp: True,
466 ksVariation_64f_FsGs: True,
467 ksVariation_64f_Addr32: True,
468 };
469 kdVariationsOnlyPre386NoFlags = {
470 ksVariation_16_Pre386: True,
471 ksVariation_16_Pre386_Jmp: True,
472 ksVariation_16_Pre386_NoJmp: True,
473 };
474 kdVariationsOnlyPre386WithFlags = {
475 ksVariation_16f_Pre386: True,
476 ksVariation_16f_Pre386_Jmp: True,
477 ksVariation_16f_Pre386_NoJmp: True,
478 };
479 kdVariationsWithFlatAddress = {
480 ksVariation_32_Flat: True,
481 ksVariation_32f_Flat: True,
482 ksVariation_64: True,
483 ksVariation_64f: True,
484 ksVariation_64_Addr32: True,
485 ksVariation_64f_Addr32: True,
486 };
487 kdVariationsWithFlatStackAddress = {
488 ksVariation_32_Flat: True,
489 ksVariation_32f_Flat: True,
490 ksVariation_64: True,
491 ksVariation_64f: True,
492 ksVariation_64_FsGs: True,
493 ksVariation_64f_FsGs: True,
494 ksVariation_64_Addr32: True,
495 ksVariation_64f_Addr32: True,
496 };
497 kdVariationsWithFlat64StackAddress = {
498 ksVariation_64: True,
499 ksVariation_64f: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64f_FsGs: True,
502 ksVariation_64_Addr32: True,
503 ksVariation_64f_Addr32: True,
504 };
505 kdVariationsWithFlatAddr16 = {
506 ksVariation_16: True,
507 ksVariation_16f: True,
508 ksVariation_16_Pre386: True,
509 ksVariation_16f_Pre386: True,
510 ksVariation_32_Addr16: True,
511 ksVariation_32f_Addr16: True,
512 };
513 kdVariationsWithFlatAddr32No64 = {
514 ksVariation_16_Addr32: True,
515 ksVariation_16f_Addr32: True,
516 ksVariation_32: True,
517 ksVariation_32f: True,
518 ksVariation_32_Flat: True,
519 ksVariation_32f_Flat: True,
520 };
521 kdVariationsWithAddressOnly64 = {
522 ksVariation_64: True,
523 ksVariation_64f: True,
524 ksVariation_64_FsGs: True,
525 ksVariation_64f_FsGs: True,
526 ksVariation_64_Addr32: True,
527 ksVariation_64f_Addr32: True,
528 };
529 kdVariationsWithConditional = {
530 ksVariation_16_Jmp: True,
531 ksVariation_16_NoJmp: True,
532 ksVariation_16_Pre386_Jmp: True,
533 ksVariation_16_Pre386_NoJmp: True,
534 ksVariation_32_Jmp: True,
535 ksVariation_32_NoJmp: True,
536 ksVariation_64_Jmp: True,
537 ksVariation_64_NoJmp: True,
538 ksVariation_16f_Jmp: True,
539 ksVariation_16f_NoJmp: True,
540 ksVariation_16f_Pre386_Jmp: True,
541 ksVariation_16f_Pre386_NoJmp: True,
542 ksVariation_32f_Jmp: True,
543 ksVariation_32f_NoJmp: True,
544 ksVariation_64f_Jmp: True,
545 ksVariation_64f_NoJmp: True,
546 };
547 kdVariationsWithConditionalNoJmp = {
548 ksVariation_16_NoJmp: True,
549 ksVariation_16_Pre386_NoJmp: True,
550 ksVariation_32_NoJmp: True,
551 ksVariation_64_NoJmp: True,
552 ksVariation_16f_NoJmp: True,
553 ksVariation_16f_Pre386_NoJmp: True,
554 ksVariation_32f_NoJmp: True,
555 ksVariation_64f_NoJmp: True,
556 };
557 kdVariationsOnlyPre386 = {
558 ksVariation_16_Pre386: True,
559 ksVariation_16f_Pre386: True,
560 ksVariation_16_Pre386_Jmp: True,
561 ksVariation_16f_Pre386_Jmp: True,
562 ksVariation_16_Pre386_NoJmp: True,
563 ksVariation_16f_Pre386_NoJmp: True,
564 };
565 ## @}
566
567 ## IEM_CIMPL_F_XXX flags that we know.
568 ## The value indicates whether it terminates the TB or not. The goal is to
569 ## improve the recompiler so all but END_TB will be False.
570 ##
571 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
572 kdCImplFlags = {
573 'IEM_CIMPL_F_MODE': False,
574 'IEM_CIMPL_F_BRANCH_DIRECT': False,
575 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
576 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
577 'IEM_CIMPL_F_BRANCH_FAR': True,
578 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
579 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
580 'IEM_CIMPL_F_BRANCH_STACK': False,
581 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
582 'IEM_CIMPL_F_RFLAGS': False,
583 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
584 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
585 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
587 'IEM_CIMPL_F_STATUS_FLAGS': False,
588 'IEM_CIMPL_F_VMEXIT': False,
589 'IEM_CIMPL_F_FPU': False,
590 'IEM_CIMPL_F_REP': False,
591 'IEM_CIMPL_F_IO': False,
592 'IEM_CIMPL_F_END_TB': True,
593 'IEM_CIMPL_F_XCPT': True,
594 'IEM_CIMPL_F_CALLS_CIMPL': False,
595 'IEM_CIMPL_F_CALLS_AIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
597 };
598
599 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
600 self.oParent = oThreadedFunction # type: ThreadedFunction
601 ##< ksVariation_Xxxx.
602 self.sVariation = sVariation
603
604 ## Threaded function parameter references.
605 self.aoParamRefs = [] # type: List[ThreadedParamRef]
606 ## Unique parameter references.
607 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
608 ## Minimum number of parameters to the threaded function.
609 self.cMinParams = 0;
610
611 ## List/tree of statements for the threaded function.
612 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
613
614 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
615 self.iEnumValue = -1;
616
617 ## Native recompilation details for this variation.
618 self.oNativeRecomp = None;
619
620 def getIndexName(self):
621 sName = self.oParent.oMcBlock.sFunction;
622 if sName.startswith('iemOp_'):
623 sName = sName[len('iemOp_'):];
624 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
625
626 def getThreadedFunctionName(self):
627 sName = self.oParent.oMcBlock.sFunction;
628 if sName.startswith('iemOp_'):
629 sName = sName[len('iemOp_'):];
630 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
631
632 def getNativeFunctionName(self):
633 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
634
635 def getLivenessFunctionName(self):
636 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
637
638 def getShortName(self):
639 sName = self.oParent.oMcBlock.sFunction;
640 if sName.startswith('iemOp_'):
641 sName = sName[len('iemOp_'):];
642 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
643
644 def getThreadedFunctionStatisticsName(self):
645 sName = self.oParent.oMcBlock.sFunction;
646 if sName.startswith('iemOp_'):
647 sName = sName[len('iemOp_'):];
648
649 sVarNm = self.sVariation;
650 if sVarNm:
651 if sVarNm.startswith('_'):
652 sVarNm = sVarNm[1:];
653 if sVarNm.endswith('_Jmp'):
654 sVarNm = sVarNm[:-4];
655 sName += '_Jmp';
656 elif sVarNm.endswith('_NoJmp'):
657 sVarNm = sVarNm[:-6];
658 sName += '_NoJmp';
659 else:
660 sVarNm = 'DeferToCImpl';
661
662 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
663
664 def isWithFlagsCheckingAndClearingVariation(self):
665 """
666 Checks if this is a variation that checks and clears EFLAGS.
667 """
668 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
669
670 #
671 # Analysis and code morphing.
672 #
673
674 def raiseProblem(self, sMessage):
675 """ Raises a problem. """
676 self.oParent.raiseProblem(sMessage);
677
678 def warning(self, sMessage):
679 """ Emits a warning. """
680 self.oParent.warning(sMessage);
681
682 def analyzeReferenceToType(self, sRef):
683 """
684 Translates a variable or structure reference to a type.
685 Returns type name.
686 Raises exception if unable to figure it out.
687 """
688 ch0 = sRef[0];
689 if ch0 == 'u':
690 if sRef.startswith('u32'):
691 return 'uint32_t';
692 if sRef.startswith('u8') or sRef == 'uReg':
693 return 'uint8_t';
694 if sRef.startswith('u64'):
695 return 'uint64_t';
696 if sRef.startswith('u16'):
697 return 'uint16_t';
698 elif ch0 == 'b':
699 return 'uint8_t';
700 elif ch0 == 'f':
701 return 'bool';
702 elif ch0 == 'i':
703 if sRef.startswith('i8'):
704 return 'int8_t';
705 if sRef.startswith('i16'):
706 return 'int16_t';
707 if sRef.startswith('i32'):
708 return 'int32_t';
709 if sRef.startswith('i64'):
710 return 'int64_t';
711 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
712 return 'uint8_t';
713 elif ch0 == 'p':
714 if sRef.find('-') < 0:
715 return 'uintptr_t';
716 if sRef.startswith('pVCpu->iem.s.'):
717 sField = sRef[len('pVCpu->iem.s.') : ];
718 if sField in g_kdIemFieldToType:
719 if g_kdIemFieldToType[sField][0]:
720 return g_kdIemFieldToType[sField][0];
721 elif ch0 == 'G' and sRef.startswith('GCPtr'):
722 return 'uint64_t';
723 elif ch0 == 'e':
724 if sRef == 'enmEffOpSize':
725 return 'IEMMODE';
726 elif ch0 == 'o':
727 if sRef.startswith('off32'):
728 return 'uint32_t';
729 elif sRef == 'cbFrame': # enter
730 return 'uint16_t';
731 elif sRef == 'cShift': ## @todo risky
732 return 'uint8_t';
733
734 self.raiseProblem('Unknown reference: %s' % (sRef,));
735 return None; # Shut up pylint 2.16.2.
736
737 def analyzeCallToType(self, sFnRef):
738 """
739 Determins the type of an indirect function call.
740 """
741 assert sFnRef[0] == 'p';
742
743 #
744 # Simple?
745 #
746 if sFnRef.find('-') < 0:
747 oDecoderFunction = self.oParent.oMcBlock.oFunction;
748
749 # Try the argument list of the function defintion macro invocation first.
750 iArg = 2;
751 while iArg < len(oDecoderFunction.asDefArgs):
752 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
753 return oDecoderFunction.asDefArgs[iArg - 1];
754 iArg += 1;
755
756 # Then check out line that includes the word and looks like a variable declaration.
757 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
758 for sLine in oDecoderFunction.asLines:
759 oMatch = oRe.match(sLine);
760 if oMatch:
761 if not oMatch.group(1).startswith('const'):
762 return oMatch.group(1);
763 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
764
765 #
766 # Deal with the pImpl->pfnXxx:
767 #
768 elif sFnRef.startswith('pImpl->pfn'):
769 sMember = sFnRef[len('pImpl->') : ];
770 sBaseType = self.analyzeCallToType('pImpl');
771 offBits = sMember.rfind('U') + 1;
772 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
773 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
774 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
780 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
782 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
783
784 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
785
786 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
787 return None; # Shut up pylint 2.16.2.
788
789 def analyze8BitGRegStmt(self, oStmt):
790 """
791 Gets the 8-bit general purpose register access details of the given statement.
792 ASSUMES the statement is one accessing an 8-bit GREG.
793 """
794 idxReg = 0;
795 if ( oStmt.sName.find('_FETCH_') > 0
796 or oStmt.sName.find('_REF_') > 0
797 or oStmt.sName.find('_TO_LOCAL') > 0):
798 idxReg = 1;
799
800 sRegRef = oStmt.asParams[idxReg];
801 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
802 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
803 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
804 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
805 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
806 else:
807 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) ? (%s) : (%s) + 12)' % (sRegRef, sRegRef, sRegRef);
808
809 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
810 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
811 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
812 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
813 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
814 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
815 else:
816 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
817 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
818 sStdRef = 'bOther8Ex';
819
820 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
821 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
822 return (idxReg, sOrgExpr, sStdRef);
823
824
825 ## Maps memory related MCs to info for FLAT conversion.
826 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
827 ## segmentation checking for every memory access. Only applied to access
828 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
829 ## the latter (CS) is just to keep things simple (we could safely fetch via
830 ## it, but only in 64-bit mode could we safely write via it, IIRC).
831 kdMemMcToFlatInfo = {
832 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
833 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
834 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
835 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
836 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
837 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
838 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
839 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
840 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
841 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
842 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
843 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
844 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
845 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
846 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
847 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
848 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
849 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
850 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
851 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
852 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
853 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
854 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
855 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
856 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
857 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
858 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
859 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
860 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
861 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
862 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
863 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
864 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
865 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
866 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
867 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
868 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
869 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
870 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
871 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
872 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
873 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
874 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
875 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
876 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
877 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
878 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
879 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
880 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
881 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
882 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
883 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
884 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
885 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
886 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
887 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
888 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
889 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
890 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
891 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
892 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
893 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
894 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
895 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
896 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
897 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
898 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
899 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
900 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
901 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
902 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
903 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
904 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
905 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
906 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
907 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
908 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
909 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
910 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
911 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
912 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
913 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
914 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
915 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
916 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
917 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
918 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
919 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
920 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
921 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
922 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
923 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
924 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
925 };
926
927 kdMemMcToFlatInfoStack = {
928 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
929 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
930 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
931 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
932 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
933 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
934 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
935 };
936
937 kdThreadedCalcRmEffAddrMcByVariation = {
938 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
939 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
940 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
941 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
942 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
945 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
946 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
947 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
948 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
951 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
952 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
953 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
954 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
955 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
956 };
957
958 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
959 """
960 Transforms (copy) the statements into those for the threaded function.
961
962 Returns list/tree of statements (aoStmts is not modified) and the new
963 iParamRef value.
964 """
965 #
966 # We'll be traversing aoParamRefs in parallel to the statements, so we
967 # must match the traversal in analyzeFindThreadedParamRefs exactly.
968 #
969 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
970 aoThreadedStmts = [];
971 for oStmt in aoStmts:
972 # Skip C++ statements that is purely related to decoding.
973 if not oStmt.isCppStmt() or not oStmt.fDecode:
974 # Copy the statement. Make a deep copy to make sure we've got our own
975 # copies of all instance variables, even if a bit overkill at the moment.
976 oNewStmt = copy.deepcopy(oStmt);
977 aoThreadedStmts.append(oNewStmt);
978 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
979
980 # If the statement has parameter references, process the relevant parameters.
981 # We grab the references relevant to this statement and apply them in reserve order.
982 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
983 iParamRefFirst = iParamRef;
984 while True:
985 iParamRef += 1;
986 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
987 break;
988
989 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
990 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
991 oCurRef = self.aoParamRefs[iCurRef];
992 if oCurRef.iParam is not None:
993 assert oCurRef.oStmt == oStmt;
994 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
995 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
996 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
997 or oCurRef.fCustomRef), \
998 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
999 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1000 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1001 + oCurRef.sNewName \
1002 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1003
1004 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1005 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1006 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1007 assert len(oNewStmt.asParams) == 3;
1008
1009 if self.sVariation in self.kdVariationsWithFlatAddr16:
1010 oNewStmt.asParams = [
1011 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1012 ];
1013 else:
1014 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1015 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1016 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1019 oNewStmt.asParams = [
1020 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1021 ];
1022 else:
1023 oNewStmt.asParams = [
1024 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1025 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1026 ];
1027 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1028 elif ( oNewStmt.sName
1029 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1030 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1031 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )):
1032 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1033 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1034 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1035 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', )
1036 and self.sVariation not in self.kdVariationsOnlyPre386):
1037 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1038 oNewStmt.sName += '_THREADED';
1039 if self.sVariation in self.kdVariationsOnly64NoFlags:
1040 oNewStmt.sName += '_PC64';
1041 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1042 oNewStmt.sName += '_PC64_WITH_FLAGS';
1043 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1044 oNewStmt.sName += '_PC16';
1045 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1046 oNewStmt.sName += '_PC16_WITH_FLAGS';
1047 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1048 assert self.sVariation != self.ksVariation_Default;
1049 oNewStmt.sName += '_PC32';
1050 else:
1051 oNewStmt.sName += '_PC32_WITH_FLAGS';
1052
1053 # This is making the wrong branch of conditionals break out of the TB.
1054 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1055 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1056 sExitTbStatus = 'VINF_SUCCESS';
1057 if self.sVariation in self.kdVariationsWithConditional:
1058 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1059 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1060 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1061 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1062 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1063 oNewStmt.asParams.append(sExitTbStatus);
1064
1065 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1066 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1067 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1068 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1069 del dState['IEM_MC_ASSERT_EFLAGS'];
1070
1071 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1072 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1073 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1074 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1075 oNewStmt.sName += '_THREADED';
1076
1077 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1078 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1079 oNewStmt.sName += '_THREADED';
1080 oNewStmt.idxFn += 1;
1081 oNewStmt.idxParams += 1;
1082 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1083
1084 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1085 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1086 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1087 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1088 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1089 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1090 if idxEffSeg != -1:
1091 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1092 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1093 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1094 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1095 oNewStmt.asParams.pop(idxEffSeg);
1096 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1097
1098 # ... PUSH and POP also needs flat variants, but these differ a little.
1099 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1100 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1101 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1102 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1103 self.kdVariationsWithFlat64StackAddress)];
1104
1105 # Add EFLAGS usage annotations to relevant MCs.
1106 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'):
1107 oInstruction = self.oParent.oMcBlock.oInstruction;
1108 oNewStmt.sName += '_EX';
1109 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1110 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1111
1112 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1113 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1114 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1115
1116 # Process branches of conditionals recursively.
1117 if isinstance(oStmt, iai.McStmtCond):
1118 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1119 iParamRef, iLevel + 1);
1120 if oStmt.aoElseBranch:
1121 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1122 dState, iParamRef, iLevel + 1);
1123
1124 # Insert an MC so we can assert the correctioness of modified flags annotations
1125 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1126 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1127 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1128 del dState['IEM_MC_ASSERT_EFLAGS'];
1129
1130 return (aoThreadedStmts, iParamRef);
1131
1132
1133 def analyzeConsolidateThreadedParamRefs(self):
1134 """
1135 Consolidate threaded function parameter references into a dictionary
1136 with lists of the references to each variable/field.
1137 """
1138 # Gather unique parameters.
1139 self.dParamRefs = {};
1140 for oRef in self.aoParamRefs:
1141 if oRef.sStdRef not in self.dParamRefs:
1142 self.dParamRefs[oRef.sStdRef] = [oRef,];
1143 else:
1144 self.dParamRefs[oRef.sStdRef].append(oRef);
1145
1146 # Generate names for them for use in the threaded function.
1147 dParamNames = {};
1148 for sName, aoRefs in self.dParamRefs.items():
1149 # Morph the reference expression into a name.
1150 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1151 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1152 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1153 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1154 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1155 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1156 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1157 else:
1158 sName += 'P';
1159
1160 # Ensure it's unique.
1161 if sName in dParamNames:
1162 for i in range(10):
1163 if sName + str(i) not in dParamNames:
1164 sName += str(i);
1165 break;
1166 dParamNames[sName] = True;
1167
1168 # Update all the references.
1169 for oRef in aoRefs:
1170 oRef.sNewName = sName;
1171
1172 # Organize them by size too for the purpose of optimize them.
1173 dBySize = {} # type: Dict[str, str]
1174 for sStdRef, aoRefs in self.dParamRefs.items():
1175 if aoRefs[0].sType[0] != 'P':
1176 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1177 assert(cBits <= 64);
1178 else:
1179 cBits = 64;
1180
1181 if cBits not in dBySize:
1182 dBySize[cBits] = [sStdRef,]
1183 else:
1184 dBySize[cBits].append(sStdRef);
1185
1186 # Pack the parameters as best as we can, starting with the largest ones
1187 # and ASSUMING a 64-bit parameter size.
1188 self.cMinParams = 0;
1189 offNewParam = 0;
1190 for cBits in sorted(dBySize.keys(), reverse = True):
1191 for sStdRef in dBySize[cBits]:
1192 if offNewParam == 0 or offNewParam + cBits > 64:
1193 self.cMinParams += 1;
1194 offNewParam = cBits;
1195 else:
1196 offNewParam += cBits;
1197 assert(offNewParam <= 64);
1198
1199 for oRef in self.dParamRefs[sStdRef]:
1200 oRef.iNewParam = self.cMinParams - 1;
1201 oRef.offNewParam = offNewParam - cBits;
1202
1203 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1204 if self.cMinParams >= 4:
1205 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1206 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1207
1208 return True;
1209
1210 ksHexDigits = '0123456789abcdefABCDEF';
1211
1212 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1213 """
1214 Scans the statements for things that have to passed on to the threaded
1215 function (populates self.aoParamRefs).
1216 """
1217 for oStmt in aoStmts:
1218 # Some statements we can skip alltogether.
1219 if isinstance(oStmt, iai.McCppPreProc):
1220 continue;
1221 if oStmt.isCppStmt() and oStmt.fDecode:
1222 continue;
1223 if oStmt.sName in ('IEM_MC_BEGIN',):
1224 continue;
1225
1226 if isinstance(oStmt, iai.McStmtVar):
1227 if oStmt.sValue is None:
1228 continue;
1229 aiSkipParams = { 0: True, 1: True, 3: True };
1230 else:
1231 aiSkipParams = {};
1232
1233 # Several statements have implicit parameters and some have different parameters.
1234 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1235 'IEM_MC_REL_JMP_S32_AND_FINISH', 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1',
1236 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1237 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1238 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1239 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1240
1241 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH',)
1242 and self.sVariation not in self.kdVariationsOnlyPre386):
1243 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1244
1245 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1246 # This is being pretty presumptive about bRm always being the RM byte...
1247 assert len(oStmt.asParams) == 3;
1248 assert oStmt.asParams[1] == 'bRm';
1249
1250 if self.sVariation in self.kdVariationsWithFlatAddr16:
1251 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1252 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1253 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1254 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1255 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1256 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1257 'uint8_t', oStmt, sStdRef = 'bSib'));
1258 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1259 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1260 else:
1261 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1262 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1263 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1264 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1265 'uint8_t', oStmt, sStdRef = 'bSib'));
1266 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1267 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1268 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1269 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1270 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1271
1272 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1273 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1274 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1275 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1276 aiSkipParams[idxReg] = True; # Skip the parameter below.
1277
1278 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1279 if ( self.sVariation in self.kdVariationsWithFlatAddress
1280 and oStmt.sName in self.kdMemMcToFlatInfo
1281 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1282 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1283
1284 # Inspect the target of calls to see if we need to pass down a
1285 # function pointer or function table pointer for it to work.
1286 if isinstance(oStmt, iai.McStmtCall):
1287 if oStmt.sFn[0] == 'p':
1288 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1289 elif ( oStmt.sFn[0] != 'i'
1290 and not oStmt.sFn.startswith('RT_CONCAT3')
1291 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1292 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1293 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1294 aiSkipParams[oStmt.idxFn] = True;
1295
1296 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1297 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1298 assert oStmt.idxFn == 2;
1299 aiSkipParams[0] = True;
1300
1301 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1302 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1303 aiSkipParams[0] = True;
1304
1305
1306 # Check all the parameters for bogus references.
1307 for iParam, sParam in enumerate(oStmt.asParams):
1308 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1309 # The parameter may contain a C expression, so we have to try
1310 # extract the relevant bits, i.e. variables and fields while
1311 # ignoring operators and parentheses.
1312 offParam = 0;
1313 while offParam < len(sParam):
1314 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1315 ch = sParam[offParam];
1316 if ch.isalpha() or ch == '_':
1317 offStart = offParam;
1318 offParam += 1;
1319 while offParam < len(sParam):
1320 ch = sParam[offParam];
1321 if not ch.isalnum() and ch != '_' and ch != '.':
1322 if ch != '-' or sParam[offParam + 1] != '>':
1323 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1324 if ( ch == '('
1325 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1326 offParam += len('(pVM)->') - 1;
1327 else:
1328 break;
1329 offParam += 1;
1330 offParam += 1;
1331 sRef = sParam[offStart : offParam];
1332
1333 # For register references, we pass the full register indexes instead as macros
1334 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1335 # threaded function will be more efficient if we just pass the register index
1336 # as a 4-bit param.
1337 if ( sRef.startswith('IEM_GET_MODRM')
1338 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV') ):
1339 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1340 if sParam[offParam] != '(':
1341 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1342 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1343 if asMacroParams is None:
1344 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1345 offParam = offCloseParam + 1;
1346 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1347 oStmt, iParam, offStart));
1348
1349 # We can skip known variables.
1350 elif sRef in self.oParent.dVariables:
1351 pass;
1352
1353 # Skip certain macro invocations.
1354 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1355 'IEM_GET_GUEST_CPU_FEATURES',
1356 'IEM_IS_GUEST_CPU_AMD',
1357 'IEM_IS_16BIT_CODE',
1358 'IEM_IS_32BIT_CODE',
1359 'IEM_IS_64BIT_CODE',
1360 ):
1361 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1362 if sParam[offParam] != '(':
1363 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1364 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1365 if asMacroParams is None:
1366 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1367 offParam = offCloseParam + 1;
1368
1369 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1370 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1371 'IEM_IS_16BIT_CODE',
1372 'IEM_IS_32BIT_CODE',
1373 'IEM_IS_64BIT_CODE',
1374 ):
1375 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1376 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1377 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1378 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1379 offParam += 1;
1380
1381 # Skip constants, globals, types (casts), sizeof and macros.
1382 elif ( sRef.startswith('IEM_OP_PRF_')
1383 or sRef.startswith('IEM_ACCESS_')
1384 or sRef.startswith('IEMINT_')
1385 or sRef.startswith('X86_GREG_')
1386 or sRef.startswith('X86_SREG_')
1387 or sRef.startswith('X86_EFL_')
1388 or sRef.startswith('X86_FSW_')
1389 or sRef.startswith('X86_FCW_')
1390 or sRef.startswith('X86_XCPT_')
1391 or sRef.startswith('IEMMODE_')
1392 or sRef.startswith('IEM_F_')
1393 or sRef.startswith('IEM_CIMPL_F_')
1394 or sRef.startswith('g_')
1395 or sRef.startswith('iemAImpl_')
1396 or sRef.startswith('kIemNativeGstReg_')
1397 or sRef.startswith('RT_ARCH_VAL_')
1398 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1399 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1400 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1401 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1402 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1403 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1404 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1405 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1406 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1407 'NIL_RTGCPTR',) ):
1408 pass;
1409
1410 # Skip certain macro invocations.
1411 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1412 elif ( ( '.' not in sRef
1413 and '-' not in sRef
1414 and sRef not in ('pVCpu', ) )
1415 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1416 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1417 oStmt, iParam, offStart));
1418 # Number.
1419 elif ch.isdigit():
1420 if ( ch == '0'
1421 and offParam + 2 <= len(sParam)
1422 and sParam[offParam + 1] in 'xX'
1423 and sParam[offParam + 2] in self.ksHexDigits ):
1424 offParam += 2;
1425 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1426 offParam += 1;
1427 else:
1428 while offParam < len(sParam) and sParam[offParam].isdigit():
1429 offParam += 1;
1430 # Comment?
1431 elif ( ch == '/'
1432 and offParam + 4 <= len(sParam)
1433 and sParam[offParam + 1] == '*'):
1434 offParam += 2;
1435 offNext = sParam.find('*/', offParam);
1436 if offNext < offParam:
1437 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1438 offParam = offNext + 2;
1439 # Whatever else.
1440 else:
1441 offParam += 1;
1442
1443 # Traverse the branches of conditionals.
1444 if isinstance(oStmt, iai.McStmtCond):
1445 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1446 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1447 return True;
1448
1449 def analyzeVariation(self, aoStmts):
1450 """
1451 2nd part of the analysis, done on each variation.
1452
1453 The variations may differ in parameter requirements and will end up with
1454 slightly different MC sequences. Thus this is done on each individually.
1455
1456 Returns dummy True - raises exception on trouble.
1457 """
1458 # Now scan the code for variables and field references that needs to
1459 # be passed to the threaded function because they are related to the
1460 # instruction decoding.
1461 self.analyzeFindThreadedParamRefs(aoStmts);
1462 self.analyzeConsolidateThreadedParamRefs();
1463
1464 # Morph the statement stream for the block into what we'll be using in the threaded function.
1465 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1466 if iParamRef != len(self.aoParamRefs):
1467 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1468
1469 return True;
1470
1471 def emitThreadedCallStmts(self, cchIndent, sCallVarNm = None):
1472 """
1473 Produces generic C++ statments that emits a call to the thread function
1474 variation and any subsequent checks that may be necessary after that.
1475
1476 The sCallVarNm is the name of the variable with the threaded function
1477 to call. This is for the case where all the variations have the same
1478 parameters and only the threaded function number differs.
1479 """
1480 aoStmts = [
1481 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1482 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1483 cchIndent = cchIndent), # Scope and a hook for various stuff.
1484 ];
1485
1486 # The call to the threaded function.
1487 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1488 for iParam in range(self.cMinParams):
1489 asFrags = [];
1490 for aoRefs in self.dParamRefs.values():
1491 oRef = aoRefs[0];
1492 if oRef.iNewParam == iParam:
1493 sCast = '(uint64_t)'
1494 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1495 sCast = '(uint64_t)(u' + oRef.sType + ')';
1496 if oRef.offNewParam == 0:
1497 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1498 else:
1499 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1500 assert asFrags;
1501 asCallArgs.append(' | '.join(asFrags));
1502
1503 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,), asCallArgs, cchIndent = cchIndent));
1504
1505 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1506 # emit this mode check from the compilation loop. On the
1507 # plus side, this means we eliminate unnecessary call at
1508 # end of the TB. :-)
1509 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1510 ## mask and maybe emit additional checks.
1511 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1512 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1513 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1514 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1515 # cchIndent = cchIndent));
1516
1517 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1518 if not sCImplFlags:
1519 sCImplFlags = '0'
1520 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1521
1522 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1523 # indicates we should do so.
1524 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1525 asEndTbFlags = [];
1526 asTbBranchedFlags = [];
1527 for sFlag in self.oParent.dsCImplFlags:
1528 if self.kdCImplFlags[sFlag] is True:
1529 asEndTbFlags.append(sFlag);
1530 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1531 asTbBranchedFlags.append(sFlag);
1532 if ( asTbBranchedFlags
1533 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1534 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1535 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1536 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1537 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1538 if asEndTbFlags:
1539 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1540 cchIndent = cchIndent));
1541
1542 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1543 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1544
1545 return aoStmts;
1546
1547
1548class ThreadedFunction(object):
1549 """
1550 A threaded function.
1551 """
1552
1553 def __init__(self, oMcBlock: iai.McBlock) -> None:
1554 self.oMcBlock = oMcBlock # type: iai.McBlock
1555 # The remaining fields are only useful after analyze() has been called:
1556 ## Variations for this block. There is at least one.
1557 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1558 ## Variation dictionary containing the same as aoVariations.
1559 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1560 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1561 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1562 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1563 ## and those determined by analyzeCodeOperation().
1564 self.dsCImplFlags = {} # type: Dict[str, bool]
1565 ## The unique sub-name for this threaded function.
1566 self.sSubName = '';
1567 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1568 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1569
1570 @staticmethod
1571 def dummyInstance():
1572 """ Gets a dummy instance. """
1573 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1574 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1575
1576 def hasWithFlagsCheckingAndClearingVariation(self):
1577 """
1578 Check if there is one or more with flags checking and clearing
1579 variations for this threaded function.
1580 """
1581 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1582 if sVarWithFlags in self.dVariations:
1583 return True;
1584 return False;
1585
1586 #
1587 # Analysis and code morphing.
1588 #
1589
1590 def raiseProblem(self, sMessage):
1591 """ Raises a problem. """
1592 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1593
1594 def error(self, sMessage, oGenerator):
1595 """ Emits an error via the generator object, causing it to fail. """
1596 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1597
1598 def warning(self, sMessage):
1599 """ Emits a warning. """
1600 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1601
1602 ## Used by analyzeAndAnnotateName for memory MC blocks.
1603 kdAnnotateNameMemStmts = {
1604 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1605 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1606 'IEM_MC_FETCH_MEM_D80': '__mem80',
1607 'IEM_MC_FETCH_MEM_I16': '__mem16',
1608 'IEM_MC_FETCH_MEM_I32': '__mem32',
1609 'IEM_MC_FETCH_MEM_I64': '__mem64',
1610 'IEM_MC_FETCH_MEM_R32': '__mem32',
1611 'IEM_MC_FETCH_MEM_R64': '__mem64',
1612 'IEM_MC_FETCH_MEM_R80': '__mem80',
1613 'IEM_MC_FETCH_MEM_U128': '__mem128',
1614 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1615 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1616 'IEM_MC_FETCH_MEM_U16': '__mem16',
1617 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1618 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1619 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1620 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1621 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1622 'IEM_MC_FETCH_MEM_U256': '__mem256',
1623 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1624 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1625 'IEM_MC_FETCH_MEM_U32': '__mem32',
1626 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1627 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1628 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1629 'IEM_MC_FETCH_MEM_U64': '__mem64',
1630 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1631 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1632 'IEM_MC_FETCH_MEM_U8': '__mem8',
1633 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1634 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1635 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1636 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1637 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1638 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1639 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1640 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1641 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1642 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1643 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1644 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1645 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1646 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1647 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1648 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1649 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1650 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1651
1652 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1653 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1654 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1655 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1656 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1657 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1658 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1659 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1660 'IEM_MC_STORE_MEM_U128': '__mem128',
1661 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1662 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1663 'IEM_MC_STORE_MEM_U16': '__mem16',
1664 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1665 'IEM_MC_STORE_MEM_U256': '__mem256',
1666 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1667 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1668 'IEM_MC_STORE_MEM_U32': '__mem32',
1669 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1670 'IEM_MC_STORE_MEM_U64': '__mem64',
1671 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1672 'IEM_MC_STORE_MEM_U8': '__mem8',
1673 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1674
1675 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1676 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1677 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1678 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1679 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1680 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1681 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1682 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1683 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1684 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1685 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1686 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1687 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1688 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1689 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1690 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1691 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1692 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1693 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1694 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1695 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1696 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1697 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1698 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1699 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1700 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1701 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1702 };
1703 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1704 kdAnnotateNameRegStmts = {
1705 'IEM_MC_FETCH_GREG_U8': '__greg8',
1706 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1707 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1708 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1709 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1710 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1711 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1712 'IEM_MC_FETCH_GREG_U16': '__greg16',
1713 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1714 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1715 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1716 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1717 'IEM_MC_FETCH_GREG_U32': '__greg32',
1718 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1719 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1720 'IEM_MC_FETCH_GREG_U64': '__greg64',
1721 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1722 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1723 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1724
1725 'IEM_MC_STORE_GREG_U8': '__greg8',
1726 'IEM_MC_STORE_GREG_U16': '__greg16',
1727 'IEM_MC_STORE_GREG_U32': '__greg32',
1728 'IEM_MC_STORE_GREG_U64': '__greg64',
1729 'IEM_MC_STORE_GREG_I64': '__greg64',
1730 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1731 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1732 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1733 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1734 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1735 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1736
1737 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1738 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1739 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1740 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1741 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1742 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1743 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1744
1745 'IEM_MC_REF_GREG_U8': '__greg8',
1746 'IEM_MC_REF_GREG_U16': '__greg16',
1747 'IEM_MC_REF_GREG_U32': '__greg32',
1748 'IEM_MC_REF_GREG_U64': '__greg64',
1749 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1750 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1751 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1752 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1753 'IEM_MC_REF_GREG_I32': '__greg32',
1754 'IEM_MC_REF_GREG_I64': '__greg64',
1755 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1756 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1757
1758 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1759 'IEM_MC_REF_FPUREG': '__fpu',
1760
1761 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1762 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1763 'IEM_MC_STORE_MREG_U64': '__mreg64',
1764 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1765 'IEM_MC_REF_MREG_U64': '__mreg64',
1766 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1767 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1768
1769 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1770 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1771 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1772 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1773 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1774 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1775 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1776 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1777 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1778 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1779 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1780
1781 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1782 'IEM_MC_STORE_XREG_U128': '__xreg128',
1783 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1784 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1785 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1786 'IEM_MC_STORE_XREG_U64': '__xreg64',
1787 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1788 'IEM_MC_STORE_XREG_U32': '__xreg32',
1789 'IEM_MC_STORE_XREG_U16': '__xreg16',
1790 'IEM_MC_STORE_XREG_U8': '__xreg8',
1791 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1792 'IEM_MC_STORE_XREG_HI_U64': '__xreg64hi',
1793 'IEM_MC_STORE_XREG_R32': '__xreg32',
1794 'IEM_MC_STORE_XREG_R64': '__xreg64',
1795 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1796 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1797 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1798 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1799 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1800 'IEM_MC_REF_XREG_U128': '__xreg128',
1801 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1802 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1803 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1804 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1805 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1806 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1807 'IEM_MC_COPY_XREG_U128': '__xreg128',
1808
1809 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1810 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1811 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1812 'IEM_MC_FETCH_YREG_2ND_U64': '__yreg64',
1813 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1814 'IEM_MC_STORE_YREG_U128': '__yreg128',
1815 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1816 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1817 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1818 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1819 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1820 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1821 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1822 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1823 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1824 'IEM_MC_REF_YREG_U128': '__yreg128',
1825 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1826 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1827 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1828 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1829 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1830 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1831 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1832 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1833 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1834 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1835 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1836 };
1837 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1838 """
1839 Scans the statements and variation lists for clues about the threaded function,
1840 and sets self.sSubName if successfull.
1841 """
1842 dHits = {};
1843 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1844 if cHits > 0:
1845 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1846 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1847 else:
1848 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1849 if not cHits:
1850 return;
1851 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1852 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1853 self.sSubName = sName;
1854 return;
1855
1856 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1857 """ Scans the statements for MC variables and call arguments. """
1858 for oStmt in aoStmts:
1859 if isinstance(oStmt, iai.McStmtVar):
1860 if oStmt.sVarName in self.dVariables:
1861 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1862 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1863
1864 # There shouldn't be any variables or arguments declared inside if/
1865 # else blocks, but scan them too to be on the safe side.
1866 if isinstance(oStmt, iai.McStmtCond):
1867 #cBefore = len(self.dVariables);
1868 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1869 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1870 #if len(self.dVariables) != cBefore:
1871 # raise Exception('Variables/arguments defined in conditional branches!');
1872 return True;
1873
1874 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
1875 """
1876 Analyzes the code looking clues as to additional side-effects.
1877
1878 Currently this is simply looking for branching and adding the relevant
1879 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
1880 dictionary with a copy of self.oMcBlock.dsCImplFlags.
1881
1882 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
1883
1884 Returns annotation on return style.
1885 """
1886 sAnnotation = None;
1887 for oStmt in aoStmts:
1888 # Set IEM_IMPL_C_F_BRANCH if we see any branching MCs.
1889 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
1890 assert not fSeenConditional;
1891 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
1892 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
1893 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
1894 if fSeenConditional:
1895 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
1896
1897 # Check for CIMPL and AIMPL calls.
1898 if oStmt.sName.startswith('IEM_MC_CALL_'):
1899 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1900 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
1901 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
1902 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')
1903 or oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_')):
1904 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
1905 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
1906 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
1907 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
1908 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
1909 else:
1910 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
1911
1912 # Check for return statements.
1913 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH',):
1914 assert sAnnotation is None;
1915 sAnnotation = g_ksFinishAnnotation_Advance;
1916 elif oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1917 'IEM_MC_REL_JMP_S32_AND_FINISH',):
1918 assert sAnnotation is None;
1919 sAnnotation = g_ksFinishAnnotation_RelJmp;
1920 elif oStmt.sName in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1921 'IEM_MC_SET_RIP_U64_AND_FINISH',):
1922 assert sAnnotation is None;
1923 sAnnotation = g_ksFinishAnnotation_SetJmp;
1924 elif oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1925 assert sAnnotation is None;
1926 sAnnotation = g_ksFinishAnnotation_DeferToCImpl;
1927
1928 # Collect MCs working on EFLAGS. Caller will check this.
1929 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',
1930 'IEM_MC_ARG_LOCAL_EFLAGS', ):
1931 dEflStmts[oStmt.sName] = oStmt;
1932 elif isinstance(oStmt, iai.McStmtCall):
1933 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
1934 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
1935 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
1936 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
1937 dEflStmts[oStmt.sName] = oStmt;
1938
1939 # Process branches of conditionals recursively.
1940 if isinstance(oStmt, iai.McStmtCond):
1941 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
1942 if oStmt.aoElseBranch:
1943 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
1944
1945 return sAnnotation;
1946
1947 def analyze(self, oGenerator):
1948 """
1949 Analyzes the code, identifying the number of parameters it requires and such.
1950
1951 Returns dummy True - raises exception on trouble.
1952 """
1953
1954 #
1955 # Decode the block into a list/tree of McStmt objects.
1956 #
1957 aoStmts = self.oMcBlock.decode();
1958
1959 #
1960 # Check the block for errors before we proceed (will decode it).
1961 #
1962 asErrors = self.oMcBlock.check();
1963 if asErrors:
1964 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
1965 for sError in asErrors]));
1966
1967 #
1968 # Scan the statements for local variables and call arguments (self.dVariables).
1969 #
1970 self.analyzeFindVariablesAndCallArgs(aoStmts);
1971
1972 #
1973 # Scan the code for IEM_CIMPL_F_ and other clues.
1974 #
1975 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
1976 dEflStmts = {};
1977 self.analyzeCodeOperation(aoStmts, dEflStmts);
1978 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
1979 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
1980 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags) > 1):
1981 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE calls', oGenerator);
1982
1983 #
1984 # Analyse EFLAGS related MCs and @opflmodify and friends.
1985 #
1986 if dEflStmts:
1987 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
1988 if ( oInstruction is None
1989 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
1990 sMcNames = '+'.join(dEflStmts.keys());
1991 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
1992 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
1993 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts:
1994 if not oInstruction.asFlModify:
1995 if oInstruction.sMnemonic not in [ 'not', ]:
1996 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);
1997 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
1998 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
1999 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2000 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2001 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2002 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2003 if not oInstruction.asFlModify:
2004 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2005 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2006 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2007 if not oInstruction.asFlTest:
2008 if oInstruction.sMnemonic not in [ 'not', ]:
2009 self.error('Expected @opfltest!', oGenerator);
2010 if oInstruction and oInstruction.asFlSet:
2011 for sFlag in oInstruction.asFlSet:
2012 if sFlag not in oInstruction.asFlModify:
2013 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2014 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2015 if oInstruction and oInstruction.asFlClear:
2016 for sFlag in oInstruction.asFlClear:
2017 if sFlag not in oInstruction.asFlModify:
2018 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2019 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2020
2021 #
2022 # Create variations as needed.
2023 #
2024 if iai.McStmt.findStmtByNames(aoStmts,
2025 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2026 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2027 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2028 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2029 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2030
2031 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2032 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2033 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2034 'IEM_MC_FETCH_MEM_U32' : True,
2035 'IEM_MC_FETCH_MEM_U64' : True,
2036 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2037 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2038 'IEM_MC_STORE_MEM_U32' : True,
2039 'IEM_MC_STORE_MEM_U64' : True, }):
2040 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2041 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2042 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2043 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2044 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2045 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2046 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2047 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2048 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2049 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2050 else:
2051 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2052 else:
2053 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2054 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2055 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2056 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2057 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2058 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2059 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2060 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2061 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2062 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2063 else:
2064 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2065
2066 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2067 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2068 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2069 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2070 asVariationsBase = asVariations;
2071 asVariations = [];
2072 for sVariation in asVariationsBase:
2073 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2074 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2075
2076 if not iai.McStmt.findStmtByNames(aoStmts,
2077 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2078 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2079 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2080 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2081 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2082 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2083 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2084 }):
2085 asVariations = [sVariation for sVariation in asVariations
2086 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2087
2088 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2089
2090 # Dictionary variant of the list.
2091 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2092
2093 #
2094 # Try annotate the threaded function name.
2095 #
2096 self.analyzeAndAnnotateName(aoStmts);
2097
2098 #
2099 # Continue the analysis on each variation.
2100 #
2101 for oVariation in self.aoVariations:
2102 oVariation.analyzeVariation(aoStmts);
2103
2104 return True;
2105
2106 ## Used by emitThreadedCallStmts.
2107 kdVariationsWithNeedForPrefixCheck = {
2108 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2109 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2110 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2111 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2112 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2113 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2114 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2115 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2116 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2117 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2118 };
2119
2120 def emitThreadedCallStmts(self, sBranch = None): # pylint: disable=too-many-statements
2121 """
2122 Worker for morphInputCode that returns a list of statements that emits
2123 the call to the threaded functions for the block.
2124
2125 The sBranch parameter is used with conditional branches where we'll emit
2126 different threaded calls depending on whether we're in the jump-taken or
2127 no-jump code path.
2128 """
2129 # Special case for only default variation:
2130 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2131 assert not sBranch;
2132 return self.aoVariations[0].emitThreadedCallStmts(0);
2133
2134 #
2135 # Case statement sub-class.
2136 #
2137 dByVari = self.dVariations;
2138 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2139 class Case:
2140 def __init__(self, sCond, sVarNm = None):
2141 self.sCond = sCond;
2142 self.sVarNm = sVarNm;
2143 self.oVar = dByVari[sVarNm] if sVarNm else None;
2144 self.aoBody = self.oVar.emitThreadedCallStmts(8) if sVarNm else None;
2145
2146 def toCode(self):
2147 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2148 if self.aoBody:
2149 aoStmts.extend(self.aoBody);
2150 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2151 return aoStmts;
2152
2153 def toFunctionAssignment(self):
2154 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2155 if self.aoBody:
2156 aoStmts.extend([
2157 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2158 iai.McCppGeneric('break;', cchIndent = 8),
2159 ]);
2160 return aoStmts;
2161
2162 def isSame(self, oThat):
2163 if not self.aoBody: # fall thru always matches.
2164 return True;
2165 if len(self.aoBody) != len(oThat.aoBody):
2166 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2167 return False;
2168 for iStmt, oStmt in enumerate(self.aoBody):
2169 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2170 assert isinstance(oStmt, iai.McCppGeneric);
2171 assert not isinstance(oStmt, iai.McStmtCond);
2172 if isinstance(oStmt, iai.McStmtCond):
2173 return False;
2174 if oStmt.sName != oThatStmt.sName:
2175 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2176 return False;
2177 if len(oStmt.asParams) != len(oThatStmt.asParams):
2178 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2179 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2180 return False;
2181 for iParam, sParam in enumerate(oStmt.asParams):
2182 if ( sParam != oThatStmt.asParams[iParam]
2183 and ( iParam != 1
2184 or not isinstance(oStmt, iai.McCppCall)
2185 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2186 or sParam != self.oVar.getIndexName()
2187 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2188 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2189 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2190 return False;
2191 return True;
2192
2193 #
2194 # Determine what we're switch on.
2195 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2196 #
2197 fSimple = True;
2198 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2199 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2200 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2201 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2202 # is not writable in 32-bit mode (at least), thus the penalty mode
2203 # for any accesses via it (simpler this way).)
2204 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2205 fSimple = False; # threaded functions.
2206 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2207 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2208 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2209
2210 #
2211 # Generate the case statements.
2212 #
2213 # pylintx: disable=x
2214 aoCases = [];
2215 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2216 assert not fSimple and not sBranch;
2217 aoCases.extend([
2218 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2219 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2220 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2221 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2222 ]);
2223 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2224 aoCases.extend([
2225 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2226 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2227 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2228 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2229 ]);
2230 elif ThrdFnVar.ksVariation_64 in dByVari:
2231 assert fSimple and not sBranch;
2232 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2233 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2234 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2235 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2236 assert fSimple and sBranch;
2237 aoCases.append(Case('IEMMODE_64BIT',
2238 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2239 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2240 aoCases.append(Case('IEMMODE_64BIT | 32',
2241 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2242
2243 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2244 assert not fSimple and not sBranch;
2245 aoCases.extend([
2246 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2247 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2248 Case('IEMMODE_32BIT | 16', None), # fall thru
2249 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2250 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2251 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2252 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2253 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2254 ]);
2255 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2256 aoCases.extend([
2257 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2258 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2259 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2260 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2261 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2262 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2263 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2264 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2265 ]);
2266 elif ThrdFnVar.ksVariation_32 in dByVari:
2267 assert fSimple and not sBranch;
2268 aoCases.extend([
2269 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2270 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2271 ]);
2272 if ThrdFnVar.ksVariation_32f in dByVari:
2273 aoCases.extend([
2274 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2275 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2276 ]);
2277 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2278 assert fSimple and sBranch;
2279 aoCases.extend([
2280 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2281 Case('IEMMODE_32BIT',
2282 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2283 ]);
2284 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2285 aoCases.extend([
2286 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2287 Case('IEMMODE_32BIT | 32',
2288 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2289 ]);
2290
2291 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2292 assert not fSimple and not sBranch;
2293 aoCases.extend([
2294 Case('IEMMODE_16BIT | 16', None), # fall thru
2295 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2296 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2297 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2298 ]);
2299 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2300 aoCases.extend([
2301 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2302 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2303 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2304 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2305 ]);
2306 elif ThrdFnVar.ksVariation_16 in dByVari:
2307 assert fSimple and not sBranch;
2308 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2309 if ThrdFnVar.ksVariation_16f in dByVari:
2310 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2311 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2312 assert fSimple and sBranch;
2313 aoCases.append(Case('IEMMODE_16BIT',
2314 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2315 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2316 aoCases.append(Case('IEMMODE_16BIT | 32',
2317 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2318
2319
2320 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2321 if not fSimple:
2322 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2323 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2324 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2325 if not fSimple:
2326 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2327 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2328
2329 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2330 assert fSimple and sBranch;
2331 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2332 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2333 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2334 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2335 assert fSimple and sBranch;
2336 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2337 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2338 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2339
2340 #
2341 # If the case bodies are all the same, except for the function called,
2342 # we can reduce the code size and hopefully compile time.
2343 #
2344 iFirstCaseWithBody = 0;
2345 while not aoCases[iFirstCaseWithBody].aoBody:
2346 iFirstCaseWithBody += 1
2347 fAllSameCases = True
2348 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2349 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2350 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2351 if fAllSameCases:
2352 aoStmts = [
2353 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2354 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2355 iai.McCppGeneric('{'),
2356 ];
2357 for oCase in aoCases:
2358 aoStmts.extend(oCase.toFunctionAssignment());
2359 aoStmts.extend([
2360 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2361 iai.McCppGeneric('}'),
2362 ]);
2363 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmts(0, 'enmFunction'));
2364
2365 else:
2366 #
2367 # Generate the generic switch statement.
2368 #
2369 aoStmts = [
2370 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2371 iai.McCppGeneric('{'),
2372 ];
2373 for oCase in aoCases:
2374 aoStmts.extend(oCase.toCode());
2375 aoStmts.extend([
2376 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2377 iai.McCppGeneric('}'),
2378 ]);
2379
2380 return aoStmts;
2381
2382 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2383 """
2384 Adjusts (& copies) the statements for the input/decoder so it will emit
2385 calls to the right threaded functions for each block.
2386
2387 Returns list/tree of statements (aoStmts is not modified) and updated
2388 fCallEmitted status.
2389 """
2390 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2391 aoDecoderStmts = [];
2392
2393 for iStmt, oStmt in enumerate(aoStmts):
2394 # Copy the statement. Make a deep copy to make sure we've got our own
2395 # copies of all instance variables, even if a bit overkill at the moment.
2396 oNewStmt = copy.deepcopy(oStmt);
2397 aoDecoderStmts.append(oNewStmt);
2398 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2399 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2400 oNewStmt.asParams[3] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2401
2402 # If we haven't emitted the threaded function call yet, look for
2403 # statements which it would naturally follow or preceed.
2404 if not fCallEmitted:
2405 if not oStmt.isCppStmt():
2406 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2407 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2408 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2409 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2410 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2411 aoDecoderStmts.pop();
2412 if not fIsConditional:
2413 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2414 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2415 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2416 else:
2417 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2418 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2419 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2420 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2421 aoDecoderStmts.append(oNewStmt);
2422 fCallEmitted = True;
2423
2424 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2425 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2426 if not sBranchAnnotation:
2427 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2428 assert fIsConditional;
2429 aoDecoderStmts.pop();
2430 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2431 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2432 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp'));
2433 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2434 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2435 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2436 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2437 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2438 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp'));
2439 else:
2440 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2441 aoDecoderStmts.append(oNewStmt);
2442 fCallEmitted = True;
2443
2444 elif ( not fIsConditional
2445 and oStmt.fDecode
2446 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2447 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2448 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2449 fCallEmitted = True;
2450
2451 # Process branches of conditionals recursively.
2452 if isinstance(oStmt, iai.McStmtCond):
2453 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2454 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2455 if oStmt.aoElseBranch:
2456 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2457 fCallEmitted, cDepth + 1,
2458 oStmt.oElseBranchAnnotation);
2459 else:
2460 fCallEmitted2 = False;
2461 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2462
2463 if not fCallEmitted and cDepth == 0:
2464 self.raiseProblem('Unable to insert call to threaded function.');
2465
2466 return (aoDecoderStmts, fCallEmitted);
2467
2468
2469 def generateInputCode(self):
2470 """
2471 Modifies the input code.
2472 """
2473 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2474
2475 if len(self.oMcBlock.aoStmts) == 1:
2476 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2477 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2478 if self.dsCImplFlags:
2479 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2480 else:
2481 sCode += '0;\n';
2482 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2483 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2484 sIndent = ' ' * (min(cchIndent, 2) - 2);
2485 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2486 return sCode;
2487
2488 # IEM_MC_BEGIN/END block
2489 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2490 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2491 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2492 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2493 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2494
2495# Short alias for ThreadedFunctionVariation.
2496ThrdFnVar = ThreadedFunctionVariation;
2497
2498
2499class IEMThreadedGenerator(object):
2500 """
2501 The threaded code generator & annotator.
2502 """
2503
2504 def __init__(self):
2505 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2506 self.oOptions = None # type: argparse.Namespace
2507 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2508 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2509 self.cErrors = 0;
2510
2511 #
2512 # Error reporting.
2513 #
2514
2515 def rawError(self, sCompleteMessage):
2516 """ Output a raw error and increment the error counter. """
2517 print(sCompleteMessage, file = sys.stderr);
2518 self.cErrors += 1;
2519 return False;
2520
2521 #
2522 # Processing.
2523 #
2524
2525 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2526 """
2527 Process the input files.
2528 """
2529
2530 # Parse the files.
2531 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2532
2533 # Create threaded functions for the MC blocks.
2534 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2535
2536 # Analyze the threaded functions.
2537 dRawParamCounts = {};
2538 dMinParamCounts = {};
2539 for oThreadedFunction in self.aoThreadedFuncs:
2540 oThreadedFunction.analyze(self);
2541 for oVariation in oThreadedFunction.aoVariations:
2542 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2543 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2544 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2545 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2546 print('debug: %s params: %4s raw, %4s min'
2547 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2548 file = sys.stderr);
2549
2550 # Do another pass over the threaded functions to settle the name suffix.
2551 iThreadedFn = 0;
2552 while iThreadedFn < len(self.aoThreadedFuncs):
2553 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2554 assert oFunction;
2555 iThreadedFnNext = iThreadedFn + 1;
2556 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2557 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2558 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2559 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2560 iThreadedFnNext += 1;
2561 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2562 iSubName = 0;
2563 while iThreadedFn + iSubName < iThreadedFnNext:
2564 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2565 iSubName += 1;
2566 iThreadedFn = iThreadedFnNext;
2567
2568 # Populate aidxFirstFunctions. This is ASSUMING that
2569 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2570 iThreadedFunction = 0;
2571 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2572 self.aidxFirstFunctions = [];
2573 for oParser in self.aoParsers:
2574 self.aidxFirstFunctions.append(iThreadedFunction);
2575
2576 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2577 iThreadedFunction += 1;
2578 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2579
2580 # Analyze the threaded functions and their variations for native recompilation.
2581 if fNativeRecompilerEnabled:
2582 ian.displayStatistics(self.aoThreadedFuncs, sHostArch);
2583
2584 # Gather arguments + variable statistics for the MC blocks.
2585 cMaxArgs = 0;
2586 cMaxVars = 0;
2587 cMaxVarsAndArgs = 0;
2588 cbMaxArgs = 0;
2589 cbMaxVars = 0;
2590 cbMaxVarsAndArgs = 0;
2591 for oThreadedFunction in self.aoThreadedFuncs:
2592 if oThreadedFunction.oMcBlock.cLocals >= 0:
2593 # Counts.
2594 assert oThreadedFunction.oMcBlock.cArgs >= 0;
2595 cMaxVars = max(cMaxVars, oThreadedFunction.oMcBlock.cLocals);
2596 cMaxArgs = max(cMaxArgs, oThreadedFunction.oMcBlock.cArgs);
2597 cMaxVarsAndArgs = max(cMaxVarsAndArgs, oThreadedFunction.oMcBlock.cLocals + oThreadedFunction.oMcBlock.cArgs);
2598 if cMaxVarsAndArgs > 9:
2599 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2600 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2601 oThreadedFunction.oMcBlock.cLocals, oThreadedFunction.oMcBlock.cArgs));
2602 # Calc stack allocation size:
2603 cbArgs = 0;
2604 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2605 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2606 cbVars = 0;
2607 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2608 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2609 cbMaxVars = max(cbMaxVars, cbVars);
2610 cbMaxArgs = max(cbMaxArgs, cbArgs);
2611 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2612 if cbMaxVarsAndArgs >= 0xc0:
2613 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2614 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2615
2616 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2617 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2618
2619 if self.cErrors > 0:
2620 print('fatal error: %u error%s during processing. Details above.'
2621 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2622 return False;
2623 return True;
2624
2625 #
2626 # Output
2627 #
2628
2629 def generateLicenseHeader(self):
2630 """
2631 Returns the lines for a license header.
2632 """
2633 return [
2634 '/*',
2635 ' * Autogenerated by $Id: IEMAllThrdPython.py 103678 2024-03-05 09:56:20Z vboxsync $ ',
2636 ' * Do not edit!',
2637 ' */',
2638 '',
2639 '/*',
2640 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2641 ' *',
2642 ' * This file is part of VirtualBox base platform packages, as',
2643 ' * available from https://www.alldomusa.eu.org.',
2644 ' *',
2645 ' * This program is free software; you can redistribute it and/or',
2646 ' * modify it under the terms of the GNU General Public License',
2647 ' * as published by the Free Software Foundation, in version 3 of the',
2648 ' * License.',
2649 ' *',
2650 ' * This program is distributed in the hope that it will be useful, but',
2651 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2652 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2653 ' * General Public License for more details.',
2654 ' *',
2655 ' * You should have received a copy of the GNU General Public License',
2656 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2657 ' *',
2658 ' * The contents of this file may alternatively be used under the terms',
2659 ' * of the Common Development and Distribution License Version 1.0',
2660 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2661 ' * in the VirtualBox distribution, in which case the provisions of the',
2662 ' * CDDL are applicable instead of those of the GPL.',
2663 ' *',
2664 ' * You may elect to license modified versions of this file under the',
2665 ' * terms and conditions of either the GPL or the CDDL or both.',
2666 ' *',
2667 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2668 ' */',
2669 '',
2670 '',
2671 '',
2672 ];
2673
2674 ## List of built-in threaded functions with user argument counts and
2675 ## whether it has a native recompiler implementation.
2676 katBltIns = (
2677 ( 'Nop', 0, True ),
2678 ( 'LogCpuState', 0, True ),
2679
2680 ( 'DeferToCImpl0', 2, True ),
2681 ( 'CheckIrq', 0, True ),
2682 ( 'CheckMode', 1, True ),
2683 ( 'CheckHwInstrBps', 0, False ),
2684 ( 'CheckCsLim', 1, True ),
2685
2686 ( 'CheckCsLimAndOpcodes', 3, True ),
2687 ( 'CheckOpcodes', 3, True ),
2688 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2689
2690 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2691 ( 'CheckPcAndOpcodes', 3, True ),
2692 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2693
2694 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2695 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2696 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2697
2698 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2699 ( 'CheckOpcodesLoadingTlb', 3, True ),
2700 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2701
2702 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2703 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2704 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2705
2706 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2707 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2708 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2709 );
2710
2711 def generateThreadedFunctionsHeader(self, oOut):
2712 """
2713 Generates the threaded functions header file.
2714 Returns success indicator.
2715 """
2716
2717 asLines = self.generateLicenseHeader();
2718
2719 # Generate the threaded function table indexes.
2720 asLines += [
2721 'typedef enum IEMTHREADEDFUNCS',
2722 '{',
2723 ' kIemThreadedFunc_Invalid = 0,',
2724 '',
2725 ' /*',
2726 ' * Predefined',
2727 ' */',
2728 ];
2729 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2730
2731 iThreadedFunction = 1 + len(self.katBltIns);
2732 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2733 asLines += [
2734 '',
2735 ' /*',
2736 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2737 ' */',
2738 ];
2739 for oThreadedFunction in self.aoThreadedFuncs:
2740 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2741 if oVariation:
2742 iThreadedFunction += 1;
2743 oVariation.iEnumValue = iThreadedFunction;
2744 asLines.append(' ' + oVariation.getIndexName() + ',');
2745 asLines += [
2746 ' kIemThreadedFunc_End',
2747 '} IEMTHREADEDFUNCS;',
2748 '',
2749 ];
2750
2751 # Prototype the function table.
2752 asLines += [
2753 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2754 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2755 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2756 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2757 '#endif',
2758 '#if defined(IN_RING3)',
2759 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2760 '#endif',
2761 ];
2762
2763 oOut.write('\n'.join(asLines));
2764 return True;
2765
2766 ksBitsToIntMask = {
2767 1: "UINT64_C(0x1)",
2768 2: "UINT64_C(0x3)",
2769 4: "UINT64_C(0xf)",
2770 8: "UINT64_C(0xff)",
2771 16: "UINT64_C(0xffff)",
2772 32: "UINT64_C(0xffffffff)",
2773 };
2774
2775 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams):
2776 """
2777 Outputs code for unpacking parameters.
2778 This is shared by the threaded and native code generators.
2779 """
2780 aasVars = [];
2781 for aoRefs in oVariation.dParamRefs.values():
2782 oRef = aoRefs[0];
2783 if oRef.sType[0] != 'P':
2784 cBits = g_kdTypeInfo[oRef.sType][0];
2785 sType = g_kdTypeInfo[oRef.sType][2];
2786 else:
2787 cBits = 64;
2788 sType = oRef.sType;
2789
2790 sTypeDecl = sType + ' const';
2791
2792 if cBits == 64:
2793 assert oRef.offNewParam == 0;
2794 if sType == 'uint64_t':
2795 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2796 else:
2797 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2798 elif oRef.offNewParam == 0:
2799 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2800 else:
2801 sUnpack = '(%s)((%s >> %s) & %s);' \
2802 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2803
2804 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2805
2806 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2807 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2808 acchVars = [0, 0, 0, 0, 0];
2809 for asVar in aasVars:
2810 for iCol, sStr in enumerate(asVar):
2811 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2812 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2813 for asVar in sorted(aasVars):
2814 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2815 return True;
2816
2817 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2818 def generateThreadedFunctionsSource(self, oOut):
2819 """
2820 Generates the threaded functions source file.
2821 Returns success indicator.
2822 """
2823
2824 asLines = self.generateLicenseHeader();
2825 oOut.write('\n'.join(asLines));
2826
2827 #
2828 # Emit the function definitions.
2829 #
2830 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2831 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2832 oOut.write( '\n'
2833 + '\n'
2834 + '\n'
2835 + '\n'
2836 + '/*' + '*' * 128 + '\n'
2837 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2838 + '*' * 128 + '*/\n');
2839
2840 for oThreadedFunction in self.aoThreadedFuncs:
2841 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2842 if oVariation:
2843 oMcBlock = oThreadedFunction.oMcBlock;
2844
2845 # Function header
2846 oOut.write( '\n'
2847 + '\n'
2848 + '/**\n'
2849 + ' * #%u: %s at line %s offset %s in %s%s\n'
2850 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
2851 os.path.split(oMcBlock.sSrcFile)[1],
2852 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
2853 + ' */\n'
2854 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
2855 + '{\n');
2856
2857 # Unpack parameters.
2858 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
2859
2860 # RT_NOREF for unused parameters.
2861 if oVariation.cMinParams < g_kcThreadedParams:
2862 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
2863
2864 # Now for the actual statements.
2865 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
2866
2867 oOut.write('}\n');
2868
2869
2870 #
2871 # Generate the output tables in parallel.
2872 #
2873 asFuncTable = [
2874 '/**',
2875 ' * Function pointer table.',
2876 ' */',
2877 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
2878 '{',
2879 ' /*Invalid*/ NULL,',
2880 ];
2881 asArgCntTab = [
2882 '/**',
2883 ' * Argument count table.',
2884 ' */',
2885 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
2886 '{',
2887 ' 0, /*Invalid*/',
2888 ];
2889 asNameTable = [
2890 '/**',
2891 ' * Function name table.',
2892 ' */',
2893 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
2894 '{',
2895 ' "Invalid",',
2896 ];
2897 asStatTable = [
2898 '/**',
2899 ' * Function statistics name table.',
2900 ' */',
2901 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
2902 '{',
2903 ' NULL,',
2904 ];
2905 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
2906
2907 for asTable in aasTables:
2908 asTable.extend((
2909 '',
2910 ' /*',
2911 ' * Predefined.',
2912 ' */',
2913 ));
2914 for sFuncNm, cArgs, _ in self.katBltIns:
2915 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
2916 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
2917 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
2918 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
2919
2920 iThreadedFunction = 1 + len(self.katBltIns);
2921 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2922 for asTable in aasTables:
2923 asTable.extend((
2924 '',
2925 ' /*',
2926 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
2927 ' */',
2928 ));
2929 for oThreadedFunction in self.aoThreadedFuncs:
2930 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2931 if oVariation:
2932 iThreadedFunction += 1;
2933 assert oVariation.iEnumValue == iThreadedFunction;
2934 sName = oVariation.getThreadedFunctionName();
2935 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
2936 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
2937 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
2938 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
2939
2940 for asTable in aasTables:
2941 asTable.append('};');
2942
2943 #
2944 # Output the tables.
2945 #
2946 oOut.write( '\n'
2947 + '\n');
2948 oOut.write('\n'.join(asFuncTable));
2949 oOut.write( '\n'
2950 + '\n'
2951 + '\n');
2952 oOut.write('\n'.join(asArgCntTab));
2953 oOut.write( '\n'
2954 + '\n'
2955 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
2956 oOut.write('\n'.join(asNameTable));
2957 oOut.write( '\n'
2958 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
2959 + '\n'
2960 + '\n'
2961 + '#if defined(IN_RING3)\n');
2962 oOut.write('\n'.join(asStatTable));
2963 oOut.write( '\n'
2964 + '#endif /* IN_RING3 */\n');
2965
2966 return True;
2967
2968 def generateNativeFunctionsHeader(self, oOut):
2969 """
2970 Generates the native recompiler functions header file.
2971 Returns success indicator.
2972 """
2973 if not self.oOptions.fNativeRecompilerEnabled:
2974 return True;
2975
2976 asLines = self.generateLicenseHeader();
2977
2978 # Prototype the function table.
2979 asLines += [
2980 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
2981 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
2982 '',
2983 ];
2984
2985 # Emit indicators as to which of the builtin functions have a native
2986 # recompiler function and which not. (We only really need this for
2987 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
2988 for atBltIn in self.katBltIns:
2989 if atBltIn[1]:
2990 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
2991 else:
2992 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
2993
2994 # Emit prototypes for the builtin functions we use in tables.
2995 asLines += [
2996 '',
2997 '/* Prototypes for built-in functions used in the above tables. */',
2998 ];
2999 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3000 if fHaveRecompFunc:
3001 asLines += [
3002 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3003 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3004 ];
3005
3006 oOut.write('\n'.join(asLines));
3007 return True;
3008
3009 def generateNativeFunctionsSource(self, oOut):
3010 """
3011 Generates the native recompiler functions source file.
3012 Returns success indicator.
3013 """
3014 if not self.oOptions.fNativeRecompilerEnabled:
3015 return True;
3016
3017 #
3018 # The file header.
3019 #
3020 oOut.write('\n'.join(self.generateLicenseHeader()));
3021
3022 #
3023 # Emit the functions.
3024 #
3025 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3026 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3027 oOut.write( '\n'
3028 + '\n'
3029 + '\n'
3030 + '\n'
3031 + '/*' + '*' * 128 + '\n'
3032 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3033 + '*' * 128 + '*/\n');
3034
3035 for oThreadedFunction in self.aoThreadedFuncs:
3036 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3037 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3038 oMcBlock = oThreadedFunction.oMcBlock;
3039
3040 # Function header
3041 oOut.write( '\n'
3042 + '\n'
3043 + '/**\n'
3044 + ' * #%u: %s at line %s offset %s in %s%s\n'
3045 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3046 os.path.split(oMcBlock.sSrcFile)[1],
3047 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3048 + ' */\n'
3049 + 'static IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3050 + '{\n');
3051
3052 # Unpack parameters.
3053 self.generateFunctionParameterUnpacking(oVariation, oOut,
3054 ('pCallEntry->auParams[0]',
3055 'pCallEntry->auParams[1]',
3056 'pCallEntry->auParams[2]',));
3057
3058 # Now for the actual statements.
3059 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3060
3061 oOut.write('}\n');
3062
3063 #
3064 # Output the function table.
3065 #
3066 oOut.write( '\n'
3067 + '\n'
3068 + '/*\n'
3069 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3070 + ' */\n'
3071 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3072 + '{\n'
3073 + ' /*Invalid*/ NULL,'
3074 + '\n'
3075 + ' /*\n'
3076 + ' * Predefined.\n'
3077 + ' */\n'
3078 );
3079 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3080 if fHaveRecompFunc:
3081 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3082 else:
3083 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3084
3085 iThreadedFunction = 1 + len(self.katBltIns);
3086 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3087 oOut.write( ' /*\n'
3088 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3089 + ' */\n');
3090 for oThreadedFunction in self.aoThreadedFuncs:
3091 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3092 if oVariation:
3093 iThreadedFunction += 1;
3094 assert oVariation.iEnumValue == iThreadedFunction;
3095 sName = oVariation.getNativeFunctionName();
3096 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3097 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3098 else:
3099 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3100
3101 oOut.write( '};\n'
3102 + '\n');
3103 return True;
3104
3105 def generateNativeLivenessSource(self, oOut):
3106 """
3107 Generates the native recompiler liveness analysis functions source file.
3108 Returns success indicator.
3109 """
3110 if not self.oOptions.fNativeRecompilerEnabled:
3111 return True;
3112
3113 #
3114 # The file header.
3115 #
3116 oOut.write('\n'.join(self.generateLicenseHeader()));
3117
3118 #
3119 # Emit the functions.
3120 #
3121 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3122 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3123 oOut.write( '\n'
3124 + '\n'
3125 + '\n'
3126 + '\n'
3127 + '/*' + '*' * 128 + '\n'
3128 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3129 + '*' * 128 + '*/\n');
3130
3131 for oThreadedFunction in self.aoThreadedFuncs:
3132 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3133 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3134 oMcBlock = oThreadedFunction.oMcBlock;
3135
3136 # Function header
3137 oOut.write( '\n'
3138 + '\n'
3139 + '/**\n'
3140 + ' * #%u: %s at line %s offset %s in %s%s\n'
3141 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3142 os.path.split(oMcBlock.sSrcFile)[1],
3143 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3144 + ' */\n'
3145 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3146 + '{\n');
3147
3148 # Unpack parameters.
3149 self.generateFunctionParameterUnpacking(oVariation, oOut,
3150 ('pCallEntry->auParams[0]',
3151 'pCallEntry->auParams[1]',
3152 'pCallEntry->auParams[2]',));
3153 asNoRefs = []; #[ 'RT_NOREF_PV(pReNative);', ];
3154 for aoRefs in oVariation.dParamRefs.values():
3155 asNoRefs.append('RT_NOREF_PV(%s);' % (aoRefs[0].sNewName,));
3156 oOut.write(' %s\n' % (' '.join(asNoRefs),));
3157
3158 # Now for the actual statements.
3159 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3160
3161 oOut.write('}\n');
3162
3163 #
3164 # Output the function table.
3165 #
3166 oOut.write( '\n'
3167 + '\n'
3168 + '/*\n'
3169 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3170 + ' */\n'
3171 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3172 + '{\n'
3173 + ' /*Invalid*/ NULL,'
3174 + '\n'
3175 + ' /*\n'
3176 + ' * Predefined.\n'
3177 + ' */\n'
3178 );
3179 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3180 if fHaveRecompFunc:
3181 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3182 else:
3183 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3184
3185 iThreadedFunction = 1 + len(self.katBltIns);
3186 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3187 oOut.write( ' /*\n'
3188 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3189 + ' */\n');
3190 for oThreadedFunction in self.aoThreadedFuncs:
3191 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3192 if oVariation:
3193 iThreadedFunction += 1;
3194 assert oVariation.iEnumValue == iThreadedFunction;
3195 sName = oVariation.getLivenessFunctionName();
3196 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3197 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3198 else:
3199 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3200
3201 oOut.write( '};\n'
3202 + '\n');
3203 return True;
3204
3205
3206 def getThreadedFunctionByIndex(self, idx):
3207 """
3208 Returns a ThreadedFunction object for the given index. If the index is
3209 out of bounds, a dummy is returned.
3210 """
3211 if idx < len(self.aoThreadedFuncs):
3212 return self.aoThreadedFuncs[idx];
3213 return ThreadedFunction.dummyInstance();
3214
3215 def generateModifiedInput(self, oOut, idxFile):
3216 """
3217 Generates the combined modified input source/header file.
3218 Returns success indicator.
3219 """
3220 #
3221 # File header and assert assumptions.
3222 #
3223 oOut.write('\n'.join(self.generateLicenseHeader()));
3224 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3225
3226 #
3227 # Iterate all parsers (input files) and output the ones related to the
3228 # file set given by idxFile.
3229 #
3230 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3231 # Is this included in the file set?
3232 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3233 fInclude = -1;
3234 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3235 if sSrcBaseFile == aoInfo[0].lower():
3236 fInclude = aoInfo[2] in (-1, idxFile);
3237 break;
3238 if fInclude is not True:
3239 assert fInclude is False;
3240 continue;
3241
3242 # Output it.
3243 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3244
3245 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3246 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3247 iLine = 0;
3248 while iLine < len(oParser.asLines):
3249 sLine = oParser.asLines[iLine];
3250 iLine += 1; # iBeginLine and iEndLine are 1-based.
3251
3252 # Can we pass it thru?
3253 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3254 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3255 oOut.write(sLine);
3256 #
3257 # Single MC block. Just extract it and insert the replacement.
3258 #
3259 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3260 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3261 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3262 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3263 sModified = oThreadedFunction.generateInputCode().strip();
3264 oOut.write(sModified);
3265
3266 iLine = oThreadedFunction.oMcBlock.iEndLine;
3267 sLine = oParser.asLines[iLine - 1];
3268 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3269 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3270 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3271 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3272
3273 # Advance
3274 iThreadedFunction += 1;
3275 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3276 #
3277 # Macro expansion line that have sublines and may contain multiple MC blocks.
3278 #
3279 else:
3280 offLine = 0;
3281 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3282 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3283
3284 sModified = oThreadedFunction.generateInputCode().strip();
3285 assert ( sModified.startswith('IEM_MC_BEGIN')
3286 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3287 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3288 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3289 ), 'sModified="%s"' % (sModified,);
3290 oOut.write(sModified);
3291
3292 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3293
3294 # Advance
3295 iThreadedFunction += 1;
3296 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3297
3298 # Last line segment.
3299 if offLine < len(sLine):
3300 oOut.write(sLine[offLine : ]);
3301
3302 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3303
3304 return True;
3305
3306 def generateModifiedInput1(self, oOut):
3307 """
3308 Generates the combined modified input source/header file, part 1.
3309 Returns success indicator.
3310 """
3311 return self.generateModifiedInput(oOut, 1);
3312
3313 def generateModifiedInput2(self, oOut):
3314 """
3315 Generates the combined modified input source/header file, part 2.
3316 Returns success indicator.
3317 """
3318 return self.generateModifiedInput(oOut, 2);
3319
3320 def generateModifiedInput3(self, oOut):
3321 """
3322 Generates the combined modified input source/header file, part 3.
3323 Returns success indicator.
3324 """
3325 return self.generateModifiedInput(oOut, 3);
3326
3327 def generateModifiedInput4(self, oOut):
3328 """
3329 Generates the combined modified input source/header file, part 4.
3330 Returns success indicator.
3331 """
3332 return self.generateModifiedInput(oOut, 4);
3333
3334
3335 #
3336 # Main
3337 #
3338
3339 def main(self, asArgs):
3340 """
3341 C-like main function.
3342 Returns exit code.
3343 """
3344
3345 #
3346 # Parse arguments
3347 #
3348 sScriptDir = os.path.dirname(__file__);
3349 oParser = argparse.ArgumentParser(add_help = False);
3350 oParser.add_argument('asInFiles',
3351 metavar = 'input.cpp.h',
3352 nargs = '*',
3353 default = [os.path.join(sScriptDir, aoInfo[0])
3354 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3355 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3356 oParser.add_argument('--host-arch',
3357 metavar = 'arch',
3358 dest = 'sHostArch',
3359 action = 'store',
3360 default = None,
3361 help = 'The host architecture.');
3362
3363 oParser.add_argument('--out-thrd-funcs-hdr',
3364 metavar = 'file-thrd-funcs.h',
3365 dest = 'sOutFileThrdFuncsHdr',
3366 action = 'store',
3367 default = '-',
3368 help = 'The output header file for the threaded functions.');
3369 oParser.add_argument('--out-thrd-funcs-cpp',
3370 metavar = 'file-thrd-funcs.cpp',
3371 dest = 'sOutFileThrdFuncsCpp',
3372 action = 'store',
3373 default = '-',
3374 help = 'The output C++ file for the threaded functions.');
3375 oParser.add_argument('--out-n8ve-funcs-hdr',
3376 metavar = 'file-n8tv-funcs.h',
3377 dest = 'sOutFileN8veFuncsHdr',
3378 action = 'store',
3379 default = '-',
3380 help = 'The output header file for the native recompiler functions.');
3381 oParser.add_argument('--out-n8ve-funcs-cpp',
3382 metavar = 'file-n8tv-funcs.cpp',
3383 dest = 'sOutFileN8veFuncsCpp',
3384 action = 'store',
3385 default = '-',
3386 help = 'The output C++ file for the native recompiler functions.');
3387 oParser.add_argument('--out-n8ve-liveness-cpp',
3388 metavar = 'file-n8tv-liveness.cpp',
3389 dest = 'sOutFileN8veLivenessCpp',
3390 action = 'store',
3391 default = '-',
3392 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3393 oParser.add_argument('--native',
3394 dest = 'fNativeRecompilerEnabled',
3395 action = 'store_true',
3396 default = False,
3397 help = 'Enables generating the files related to native recompilation.');
3398 oParser.add_argument('--out-mod-input1',
3399 metavar = 'file-instr.cpp.h',
3400 dest = 'sOutFileModInput1',
3401 action = 'store',
3402 default = '-',
3403 help = 'The output C++/header file for modified input instruction files part 1.');
3404 oParser.add_argument('--out-mod-input2',
3405 metavar = 'file-instr.cpp.h',
3406 dest = 'sOutFileModInput2',
3407 action = 'store',
3408 default = '-',
3409 help = 'The output C++/header file for modified input instruction files part 2.');
3410 oParser.add_argument('--out-mod-input3',
3411 metavar = 'file-instr.cpp.h',
3412 dest = 'sOutFileModInput3',
3413 action = 'store',
3414 default = '-',
3415 help = 'The output C++/header file for modified input instruction files part 3.');
3416 oParser.add_argument('--out-mod-input4',
3417 metavar = 'file-instr.cpp.h',
3418 dest = 'sOutFileModInput4',
3419 action = 'store',
3420 default = '-',
3421 help = 'The output C++/header file for modified input instruction files part 4.');
3422 oParser.add_argument('--help', '-h', '-?',
3423 action = 'help',
3424 help = 'Display help and exit.');
3425 oParser.add_argument('--version', '-V',
3426 action = 'version',
3427 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3428 % (__version__.split()[1], iai.__version__.split()[1],),
3429 help = 'Displays the version/revision of the script and exit.');
3430 self.oOptions = oParser.parse_args(asArgs[1:]);
3431 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3432
3433 #
3434 # Process the instructions specified in the IEM sources.
3435 #
3436 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3437 #
3438 # Generate the output files.
3439 #
3440 aaoOutputFiles = (
3441 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader ),
3442 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource ),
3443 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader ),
3444 ( self.oOptions.sOutFileN8veFuncsCpp, self.generateNativeFunctionsSource ),
3445 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource ),
3446 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput1 ),
3447 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput2 ),
3448 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput3 ),
3449 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput4 ),
3450 );
3451 fRc = True;
3452 for sOutFile, fnGenMethod in aaoOutputFiles:
3453 if sOutFile == '-':
3454 fRc = fnGenMethod(sys.stdout) and fRc;
3455 else:
3456 try:
3457 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3458 except Exception as oXcpt:
3459 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3460 return 1;
3461 fRc = fnGenMethod(oOut) and fRc;
3462 oOut.close();
3463 if fRc:
3464 return 0;
3465
3466 return 1;
3467
3468
3469if __name__ == '__main__':
3470 sys.exit(IEMThreadedGenerator().main(sys.argv));
3471
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette