VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 36125

最後變更 在這個檔案從36125是 36125,由 vboxsync 提交於 14 年 前

recompiler: Removing traces of attempts at making the recompiler compile with the microsoft compiler. (untested)

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.1 KB
 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define DATA_SIZE (1 << SHIFT)
31
32#if DATA_SIZE == 8
33#define SUFFIX q
34#define USUFFIX q
35#define DATA_TYPE uint64_t
36#define DATA_TYPE_PROMOTED uint64_t
37#elif DATA_SIZE == 4
38#define SUFFIX l
39#define USUFFIX l
40#define DATA_TYPE uint32_t
41#ifdef VBOX
42#define DATA_TYPE_PROMOTED RTCCUINTREG
43#endif
44#elif DATA_SIZE == 2
45#define SUFFIX w
46#define USUFFIX uw
47#define DATA_TYPE uint16_t
48#ifdef VBOX
49#define DATA_TYPE_PROMOTED RTCCUINTREG
50#endif
51#elif DATA_SIZE == 1
52#define SUFFIX b
53#define USUFFIX ub
54#define DATA_TYPE uint8_t
55#ifdef VBOX
56#define DATA_TYPE_PROMOTED RTCCUINTREG
57#endif
58#else
59#error unsupported data size
60#endif
61
62#ifdef SOFTMMU_CODE_ACCESS
63#define READ_ACCESS_TYPE 2
64#define ADDR_READ addr_code
65#else
66#define READ_ACCESS_TYPE 0
67#define ADDR_READ addr_read
68#endif
69
70static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
71 int mmu_idx,
72 void *retaddr);
73static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
74 target_ulong addr,
75 void *retaddr)
76{
77 DATA_TYPE res;
78 int index;
79 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
80 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
81 env->mem_io_pc = (unsigned long)retaddr;
82 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
83 && !can_do_io(env)) {
84 cpu_io_recompile(env, retaddr);
85 }
86
87#if SHIFT <= 2
88 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
89#else
90#ifdef TARGET_WORDS_BIGENDIAN
91 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
92 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
93#else
94 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
95 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
96#endif
97#endif /* SHIFT > 2 */
98#ifdef USE_KQEMU
99 env->last_io_time = cpu_get_time_fast();
100#endif
101 return res;
102}
103
104/* handle all cases except unaligned access which span two pages */
105#ifndef VBOX
106DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
107 int mmu_idx)
108#else
109/* Load helpers invoked from generated code, and TCG makes an assumption
110 that valid value takes the whole register, why gcc after 4.3 may
111 use only lower part of register for smaller types. So force promotion. */
112DATA_TYPE_PROMOTED REGPARM
113glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
114 int mmu_idx)
115#endif
116{
117 DATA_TYPE res;
118 int index;
119 target_ulong tlb_addr;
120 target_phys_addr_t addend;
121 void *retaddr;
122
123 /* test if there is match for unaligned or IO access */
124 /* XXX: could done more in memory macro in a non portable way */
125 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
126 redo:
127 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
128 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
129 if (tlb_addr & ~TARGET_PAGE_MASK) {
130 /* IO access */
131 if ((addr & (DATA_SIZE - 1)) != 0)
132 goto do_unaligned_access;
133 retaddr = GETPC();
134 addend = env->iotlb[mmu_idx][index];
135 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
136 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
137 /* slow unaligned access (it spans two pages or IO) */
138 do_unaligned_access:
139 retaddr = GETPC();
140#ifdef ALIGNED_ONLY
141 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
142#endif
143 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
144 mmu_idx, retaddr);
145 } else {
146 /* unaligned/aligned access in the same page */
147#ifdef ALIGNED_ONLY
148 if ((addr & (DATA_SIZE - 1)) != 0) {
149 retaddr = GETPC();
150 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
151 }
152#endif
153 addend = env->tlb_table[mmu_idx][index].addend;
154 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
155 }
156 } else {
157 /* the page is not in the TLB : fill it */
158 retaddr = GETPC();
159#ifdef ALIGNED_ONLY
160 if ((addr & (DATA_SIZE - 1)) != 0)
161 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
162#endif
163 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
164 goto redo;
165 }
166 return res;
167}
168
169/* handle all unaligned cases */
170static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
171 int mmu_idx,
172 void *retaddr)
173{
174 DATA_TYPE res, res1, res2;
175 int index, shift;
176 target_phys_addr_t addend;
177 target_ulong tlb_addr, addr1, addr2;
178
179 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
180 redo:
181 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
182 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
183 if (tlb_addr & ~TARGET_PAGE_MASK) {
184 /* IO access */
185 if ((addr & (DATA_SIZE - 1)) != 0)
186 goto do_unaligned_access;
187 retaddr = GETPC();
188 addend = env->iotlb[mmu_idx][index];
189 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
190 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
191 do_unaligned_access:
192 /* slow unaligned access (it spans two pages) */
193 addr1 = addr & ~(DATA_SIZE - 1);
194 addr2 = addr1 + DATA_SIZE;
195 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
196 mmu_idx, retaddr);
197 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
198 mmu_idx, retaddr);
199 shift = (addr & (DATA_SIZE - 1)) * 8;
200#ifdef TARGET_WORDS_BIGENDIAN
201 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
202#else
203 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
204#endif
205 res = (DATA_TYPE)res;
206 } else {
207 /* unaligned/aligned access in the same page */
208 addend = env->tlb_table[mmu_idx][index].addend;
209 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
210 }
211 } else {
212 /* the page is not in the TLB : fill it */
213 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
214 goto redo;
215 }
216 return res;
217}
218
219#ifndef SOFTMMU_CODE_ACCESS
220
221static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
222 DATA_TYPE val,
223 int mmu_idx,
224 void *retaddr);
225
226static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
227 DATA_TYPE val,
228 target_ulong addr,
229 void *retaddr)
230{
231 int index;
232 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
233 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
234 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
235 && !can_do_io(env)) {
236 cpu_io_recompile(env, retaddr);
237 }
238
239 env->mem_io_vaddr = addr;
240 env->mem_io_pc = (unsigned long)retaddr;
241#if SHIFT <= 2
242 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
243#else
244#ifdef TARGET_WORDS_BIGENDIAN
245 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
246 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
247#else
248 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
249 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
250#endif
251#endif /* SHIFT > 2 */
252#ifdef USE_KQEMU
253 env->last_io_time = cpu_get_time_fast();
254#endif
255}
256
257void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
258 DATA_TYPE val,
259 int mmu_idx)
260{
261 target_phys_addr_t addend;
262 target_ulong tlb_addr;
263 void *retaddr;
264 int index;
265
266 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
267 redo:
268 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
269 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
270 if (tlb_addr & ~TARGET_PAGE_MASK) {
271 /* IO access */
272 if ((addr & (DATA_SIZE - 1)) != 0)
273 goto do_unaligned_access;
274 retaddr = GETPC();
275 addend = env->iotlb[mmu_idx][index];
276 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
277 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
278 do_unaligned_access:
279 retaddr = GETPC();
280#ifdef ALIGNED_ONLY
281 do_unaligned_access(addr, 1, mmu_idx, retaddr);
282#endif
283 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
284 mmu_idx, retaddr);
285 } else {
286 /* aligned/unaligned access in the same page */
287#ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0) {
289 retaddr = GETPC();
290 do_unaligned_access(addr, 1, mmu_idx, retaddr);
291 }
292#endif
293 addend = env->tlb_table[mmu_idx][index].addend;
294 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
295 }
296 } else {
297 /* the page is not in the TLB : fill it */
298 retaddr = GETPC();
299#ifdef ALIGNED_ONLY
300 if ((addr & (DATA_SIZE - 1)) != 0)
301 do_unaligned_access(addr, 1, mmu_idx, retaddr);
302#endif
303 tlb_fill(addr, 1, mmu_idx, retaddr);
304 goto redo;
305 }
306}
307
308/* handles all unaligned cases */
309static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
310 DATA_TYPE val,
311 int mmu_idx,
312 void *retaddr)
313{
314 target_phys_addr_t addend;
315 target_ulong tlb_addr;
316 int index, i;
317
318 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
319 redo:
320 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
321 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
322 if (tlb_addr & ~TARGET_PAGE_MASK) {
323 /* IO access */
324 if ((addr & (DATA_SIZE - 1)) != 0)
325 goto do_unaligned_access;
326 addend = env->iotlb[mmu_idx][index];
327 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
328 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
329 do_unaligned_access:
330 /* XXX: not efficient, but simple */
331 /* Note: relies on the fact that tlb_fill() does not remove the
332 * previous page from the TLB cache. */
333 for(i = DATA_SIZE - 1; i >= 0; i--) {
334#ifdef TARGET_WORDS_BIGENDIAN
335 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
336 mmu_idx, retaddr);
337#else
338 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
339 mmu_idx, retaddr);
340#endif
341 }
342 } else {
343 /* aligned/unaligned access in the same page */
344 addend = env->tlb_table[mmu_idx][index].addend;
345 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
346 }
347 } else {
348 /* the page is not in the TLB : fill it */
349 tlb_fill(addr, 1, mmu_idx, retaddr);
350 goto redo;
351 }
352}
353
354#endif /* !defined(SOFTMMU_CODE_ACCESS) */
355
356#ifdef VBOX
357#undef DATA_TYPE_PROMOTED
358#endif
359#undef READ_ACCESS_TYPE
360#undef SHIFT
361#undef DATA_TYPE
362#undef SUFFIX
363#undef USUFFIX
364#undef DATA_SIZE
365#undef ADDR_READ
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette