VirtualBox

source: vbox/trunk/src/recompiler/softmmu_template.h@ 36170

最後變更 在這個檔案從36170是 36170,由 vboxsync 提交於 14 年 前

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • 屬性 svn:eol-style 設為 native
檔案大小: 13.2 KB
 
1/*
2 * Software MMU support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define DATA_SIZE (1 << SHIFT)
31
32#if DATA_SIZE == 8
33#define SUFFIX q
34#define USUFFIX q
35#define DATA_TYPE uint64_t
36#define DATA_TYPE_PROMOTED uint64_t
37#elif DATA_SIZE == 4
38#define SUFFIX l
39#define USUFFIX l
40#define DATA_TYPE uint32_t
41#ifdef VBOX
42#define DATA_TYPE_PROMOTED RTCCUINTREG
43#endif
44#elif DATA_SIZE == 2
45#define SUFFIX w
46#define USUFFIX uw
47#define DATA_TYPE uint16_t
48#ifdef VBOX
49#define DATA_TYPE_PROMOTED RTCCUINTREG
50#endif
51#elif DATA_SIZE == 1
52#define SUFFIX b
53#define USUFFIX ub
54#define DATA_TYPE uint8_t
55#ifdef VBOX
56#define DATA_TYPE_PROMOTED RTCCUINTREG
57#endif
58#else
59#error unsupported data size
60#endif
61
62#ifdef SOFTMMU_CODE_ACCESS
63#define READ_ACCESS_TYPE 2
64#define ADDR_READ addr_code
65#else
66#define READ_ACCESS_TYPE 0
67#define ADDR_READ addr_read
68#endif
69
70static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
71 int mmu_idx,
72 void *retaddr);
73static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
74 target_ulong addr,
75 void *retaddr)
76{
77 DATA_TYPE res;
78 int index;
79 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
80 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
81 env->mem_io_pc = (unsigned long)retaddr;
82 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
83 && !can_do_io(env)) {
84 cpu_io_recompile(env, retaddr);
85 }
86
87 env->mem_io_vaddr = addr;
88#if SHIFT <= 2
89 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
90#else
91#ifdef TARGET_WORDS_BIGENDIAN
92 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
93 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
94#else
95 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
96 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
97#endif
98#endif /* SHIFT > 2 */
99#ifdef USE_KQEMU
100 env->last_io_time = cpu_get_time_fast();
101#endif
102 return res;
103}
104
105/* handle all cases except unaligned access which span two pages */
106#ifndef VBOX
107DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
108 int mmu_idx)
109#else
110/* Load helpers invoked from generated code, and TCG makes an assumption
111 that valid value takes the whole register, why gcc after 4.3 may
112 use only lower part of register for smaller types. So force promotion. */
113DATA_TYPE_PROMOTED REGPARM
114glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
115 int mmu_idx)
116#endif
117{
118 DATA_TYPE res;
119 int index;
120 target_ulong tlb_addr;
121 target_phys_addr_t addend;
122 void *retaddr;
123
124 /* test if there is match for unaligned or IO access */
125 /* XXX: could done more in memory macro in a non portable way */
126 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
127 redo:
128 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
129 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
130 if (tlb_addr & ~TARGET_PAGE_MASK) {
131 /* IO access */
132 if ((addr & (DATA_SIZE - 1)) != 0)
133 goto do_unaligned_access;
134 retaddr = GETPC();
135 addend = env->iotlb[mmu_idx][index];
136 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
137 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
138 /* slow unaligned access (it spans two pages or IO) */
139 do_unaligned_access:
140 retaddr = GETPC();
141#ifdef ALIGNED_ONLY
142 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
143#endif
144 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
145 mmu_idx, retaddr);
146 } else {
147 /* unaligned/aligned access in the same page */
148#ifdef ALIGNED_ONLY
149 if ((addr & (DATA_SIZE - 1)) != 0) {
150 retaddr = GETPC();
151 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
152 }
153#endif
154 addend = env->tlb_table[mmu_idx][index].addend;
155 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
156 }
157 } else {
158 /* the page is not in the TLB : fill it */
159 retaddr = GETPC();
160#ifdef ALIGNED_ONLY
161 if ((addr & (DATA_SIZE - 1)) != 0)
162 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
163#endif
164 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
165 goto redo;
166 }
167 return res;
168}
169
170/* handle all unaligned cases */
171static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
172 int mmu_idx,
173 void *retaddr)
174{
175 DATA_TYPE res, res1, res2;
176 int index, shift;
177 target_phys_addr_t addend;
178 target_ulong tlb_addr, addr1, addr2;
179
180 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
181 redo:
182 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
183 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
184 if (tlb_addr & ~TARGET_PAGE_MASK) {
185 /* IO access */
186 if ((addr & (DATA_SIZE - 1)) != 0)
187 goto do_unaligned_access;
188 retaddr = GETPC();
189 addend = env->iotlb[mmu_idx][index];
190 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
191 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
192 do_unaligned_access:
193 /* slow unaligned access (it spans two pages) */
194 addr1 = addr & ~(DATA_SIZE - 1);
195 addr2 = addr1 + DATA_SIZE;
196 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
197 mmu_idx, retaddr);
198 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
199 mmu_idx, retaddr);
200 shift = (addr & (DATA_SIZE - 1)) * 8;
201#ifdef TARGET_WORDS_BIGENDIAN
202 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
203#else
204 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
205#endif
206 res = (DATA_TYPE)res;
207 } else {
208 /* unaligned/aligned access in the same page */
209 addend = env->tlb_table[mmu_idx][index].addend;
210 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
211 }
212 } else {
213 /* the page is not in the TLB : fill it */
214 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
215 goto redo;
216 }
217 return res;
218}
219
220#ifndef SOFTMMU_CODE_ACCESS
221
222static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
223 DATA_TYPE val,
224 int mmu_idx,
225 void *retaddr);
226
227static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
228 DATA_TYPE val,
229 target_ulong addr,
230 void *retaddr)
231{
232 int index;
233 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
234 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
235 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
236 && !can_do_io(env)) {
237 cpu_io_recompile(env, retaddr);
238 }
239
240 env->mem_io_vaddr = addr;
241 env->mem_io_pc = (unsigned long)retaddr;
242#if SHIFT <= 2
243 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
244#else
245#ifdef TARGET_WORDS_BIGENDIAN
246 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
247 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
248#else
249 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
250 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
251#endif
252#endif /* SHIFT > 2 */
253#ifdef USE_KQEMU
254 env->last_io_time = cpu_get_time_fast();
255#endif
256}
257
258void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
259 DATA_TYPE val,
260 int mmu_idx)
261{
262 target_phys_addr_t addend;
263 target_ulong tlb_addr;
264 void *retaddr;
265 int index;
266
267 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
268 redo:
269 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
270 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
271 if (tlb_addr & ~TARGET_PAGE_MASK) {
272 /* IO access */
273 if ((addr & (DATA_SIZE - 1)) != 0)
274 goto do_unaligned_access;
275 retaddr = GETPC();
276 addend = env->iotlb[mmu_idx][index];
277 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
278 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
279 do_unaligned_access:
280 retaddr = GETPC();
281#ifdef ALIGNED_ONLY
282 do_unaligned_access(addr, 1, mmu_idx, retaddr);
283#endif
284 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
285 mmu_idx, retaddr);
286 } else {
287 /* aligned/unaligned access in the same page */
288#ifdef ALIGNED_ONLY
289 if ((addr & (DATA_SIZE - 1)) != 0) {
290 retaddr = GETPC();
291 do_unaligned_access(addr, 1, mmu_idx, retaddr);
292 }
293#endif
294 addend = env->tlb_table[mmu_idx][index].addend;
295 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
296 }
297 } else {
298 /* the page is not in the TLB : fill it */
299 retaddr = GETPC();
300#ifdef ALIGNED_ONLY
301 if ((addr & (DATA_SIZE - 1)) != 0)
302 do_unaligned_access(addr, 1, mmu_idx, retaddr);
303#endif
304 tlb_fill(addr, 1, mmu_idx, retaddr);
305 goto redo;
306 }
307}
308
309/* handles all unaligned cases */
310static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
311 DATA_TYPE val,
312 int mmu_idx,
313 void *retaddr)
314{
315 target_phys_addr_t addend;
316 target_ulong tlb_addr;
317 int index, i;
318
319 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
320 redo:
321 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
322 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
323 if (tlb_addr & ~TARGET_PAGE_MASK) {
324 /* IO access */
325 if ((addr & (DATA_SIZE - 1)) != 0)
326 goto do_unaligned_access;
327 addend = env->iotlb[mmu_idx][index];
328 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
329 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
330 do_unaligned_access:
331 /* XXX: not efficient, but simple */
332 /* Note: relies on the fact that tlb_fill() does not remove the
333 * previous page from the TLB cache. */
334 for(i = DATA_SIZE - 1; i >= 0; i--) {
335#ifdef TARGET_WORDS_BIGENDIAN
336 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
337 mmu_idx, retaddr);
338#else
339 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
340 mmu_idx, retaddr);
341#endif
342 }
343 } else {
344 /* aligned/unaligned access in the same page */
345 addend = env->tlb_table[mmu_idx][index].addend;
346 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
347 }
348 } else {
349 /* the page is not in the TLB : fill it */
350 tlb_fill(addr, 1, mmu_idx, retaddr);
351 goto redo;
352 }
353}
354
355#endif /* !defined(SOFTMMU_CODE_ACCESS) */
356
357#ifdef VBOX
358#undef DATA_TYPE_PROMOTED
359#endif
360#undef READ_ACCESS_TYPE
361#undef SHIFT
362#undef DATA_TYPE
363#undef SUFFIX
364#undef USUFFIX
365#undef DATA_SIZE
366#undef ADDR_READ
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette