VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 36125

最後變更 在這個檔案從36125是 36125,由 vboxsync 提交於 14 年 前

recompiler: Removing traces of attempts at making the recompiler compile with the microsoft compiler. (untested)

  • 屬性 svn:eol-style 設為 native
檔案大小: 111.7 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#ifndef VBOX
32#ifdef _WIN32
33#include <windows.h>
34#else
35#include <sys/types.h>
36#include <sys/mman.h>
37#endif
38#include <stdlib.h>
39#include <stdio.h>
40#include <stdarg.h>
41#include <string.h>
42#include <errno.h>
43#include <unistd.h>
44#include <inttypes.h>
45#else /* VBOX */
46# include <stdlib.h>
47# include <stdio.h>
48# include <iprt/alloc.h>
49# include <iprt/string.h>
50# include <iprt/param.h>
51# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
52#endif /* VBOX */
53
54#include "cpu.h"
55#include "exec-all.h"
56#if defined(CONFIG_USER_ONLY)
57#include <qemu.h>
58#endif
59
60//#define DEBUG_TB_INVALIDATE
61//#define DEBUG_FLUSH
62//#define DEBUG_TLB
63//#define DEBUG_UNASSIGNED
64
65/* make various TB consistency checks */
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
68
69#if !defined(CONFIG_USER_ONLY)
70/* TB consistency checks only implemented for usermode emulation. */
71#undef DEBUG_TB_CHECK
72#endif
73
74#define SMC_BITMAP_USE_THRESHOLD 10
75
76#define MMAP_AREA_START 0x00000000
77#define MMAP_AREA_END 0xa8000000
78
79#if defined(TARGET_SPARC64)
80#define TARGET_PHYS_ADDR_SPACE_BITS 41
81#elif defined(TARGET_SPARC)
82#define TARGET_PHYS_ADDR_SPACE_BITS 36
83#elif defined(TARGET_ALPHA)
84#define TARGET_PHYS_ADDR_SPACE_BITS 42
85#define TARGET_VIRT_ADDR_SPACE_BITS 42
86#elif defined(TARGET_PPC64)
87#define TARGET_PHYS_ADDR_SPACE_BITS 42
88#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
89#define TARGET_PHYS_ADDR_SPACE_BITS 42
90#elif defined(TARGET_I386) && !defined(USE_KQEMU)
91#define TARGET_PHYS_ADDR_SPACE_BITS 36
92#else
93/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
94#define TARGET_PHYS_ADDR_SPACE_BITS 32
95#endif
96
97static TranslationBlock *tbs;
98int code_gen_max_blocks;
99TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
100static int nb_tbs;
101/* any access to the tbs or the page table must use this lock */
102spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
103
104#ifndef VBOX
105#if defined(__arm__) || defined(__sparc_v9__)
106/* The prologue must be reachable with a direct jump. ARM and Sparc64
107 have limited branch ranges (possibly also PPC) so place it in a
108 section close to code segment. */
109#define code_gen_section \
110 __attribute__((__section__(".gen_code"))) \
111 __attribute__((aligned (32)))
112#else
113#define code_gen_section \
114 __attribute__((aligned (32)))
115#endif
116uint8_t code_gen_prologue[1024] code_gen_section;
117
118#else /* VBOX */
119extern uint8_t* code_gen_prologue;
120#endif /* VBOX */
121
122static uint8_t *code_gen_buffer;
123static unsigned long code_gen_buffer_size;
124/* threshold to flush the translated code buffer */
125static unsigned long code_gen_buffer_max_size;
126uint8_t *code_gen_ptr;
127
128#ifndef VBOX
129#if !defined(CONFIG_USER_ONLY)
130ram_addr_t phys_ram_size;
131int phys_ram_fd;
132uint8_t *phys_ram_base;
133uint8_t *phys_ram_dirty;
134static int in_migration;
135static ram_addr_t phys_ram_alloc_offset = 0;
136#endif
137#else /* VBOX */
138RTGCPHYS phys_ram_size;
139/* we have memory ranges (the high PC-BIOS mapping) which
140 causes some pages to fall outside the dirty map here. */
141RTGCPHYS phys_ram_dirty_size;
142#endif /* VBOX */
143#if !defined(VBOX)
144uint8_t *phys_ram_base;
145#endif
146uint8_t *phys_ram_dirty;
147
148CPUState *first_cpu;
149/* current CPU in the current thread. It is only valid inside
150 cpu_exec() */
151CPUState *cpu_single_env;
152/* 0 = Do not count executed instructions.
153 1 = Precise instruction counting.
154 2 = Adaptive rate instruction counting. */
155int use_icount = 0;
156/* Current instruction counter. While executing translated code this may
157 include some instructions that have not yet been executed. */
158int64_t qemu_icount;
159
160typedef struct PageDesc {
161 /* list of TBs intersecting this ram page */
162 TranslationBlock *first_tb;
163 /* in order to optimize self modifying code, we count the number
164 of lookups we do to a given page to use a bitmap */
165 unsigned int code_write_count;
166 uint8_t *code_bitmap;
167#if defined(CONFIG_USER_ONLY)
168 unsigned long flags;
169#endif
170} PageDesc;
171
172typedef struct PhysPageDesc {
173 /* offset in host memory of the page + io_index in the low 12 bits */
174 ram_addr_t phys_offset;
175} PhysPageDesc;
176
177#define L2_BITS 10
178#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
179/* XXX: this is a temporary hack for alpha target.
180 * In the future, this is to be replaced by a multi-level table
181 * to actually be able to handle the complete 64 bits address space.
182 */
183#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
184#else
185#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
186#endif
187#ifdef VBOX
188#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
189#endif
190
191#ifdef VBOX
192#define L0_SIZE (1 << L0_BITS)
193#endif
194#define L1_SIZE (1 << L1_BITS)
195#define L2_SIZE (1 << L2_BITS)
196
197static void io_mem_init(void);
198
199unsigned long qemu_real_host_page_size;
200unsigned long qemu_host_page_bits;
201unsigned long qemu_host_page_size;
202unsigned long qemu_host_page_mask;
203
204/* XXX: for system emulation, it could just be an array */
205#ifndef VBOX
206static PageDesc *l1_map[L1_SIZE];
207static PhysPageDesc **l1_phys_map;
208#else
209static unsigned l0_map_max_used = 0;
210static PageDesc **l0_map[L0_SIZE];
211static void **l0_phys_map[L0_SIZE];
212#endif
213
214#if !defined(CONFIG_USER_ONLY)
215static void io_mem_init(void);
216
217/* io memory support */
218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
221static int io_mem_nb;
222static int io_mem_watch;
223#endif
224
225#ifndef VBOX
226/* log support */
227static const char *logfilename = "/tmp/qemu.log";
228#endif /* !VBOX */
229FILE *logfile;
230int loglevel;
231#ifndef VBOX
232static int log_append = 0;
233#endif
234
235/* statistics */
236#ifndef VBOX
237static int tlb_flush_count;
238static int tb_flush_count;
239static int tb_phys_invalidate_count;
240#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
241uint32_t tlb_flush_count;
242uint32_t tb_flush_count;
243uint32_t tb_phys_invalidate_count;
244#endif /* VBOX */
245
246#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
247typedef struct subpage_t {
248 target_phys_addr_t base;
249 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
250 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
251 void *opaque[TARGET_PAGE_SIZE][2][4];
252} subpage_t;
253
254
255#ifndef VBOX
256#ifdef _WIN32
257static void map_exec(void *addr, long size)
258{
259 DWORD old_protect;
260 VirtualProtect(addr, size,
261 PAGE_EXECUTE_READWRITE, &old_protect);
262
263}
264#else
265static void map_exec(void *addr, long size)
266{
267 unsigned long start, end, page_size;
268
269 page_size = getpagesize();
270 start = (unsigned long)addr;
271 start &= ~(page_size - 1);
272
273 end = (unsigned long)addr + size;
274 end += page_size - 1;
275 end &= ~(page_size - 1);
276
277 mprotect((void *)start, end - start,
278 PROT_READ | PROT_WRITE | PROT_EXEC);
279}
280#endif
281#else // VBOX
282static void map_exec(void *addr, long size)
283{
284 RTMemProtect(addr, size,
285 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
286}
287#endif
288
289static void page_init(void)
290{
291 /* NOTE: we can always suppose that qemu_host_page_size >=
292 TARGET_PAGE_SIZE */
293#ifdef VBOX
294 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
295 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
296 qemu_real_host_page_size = PAGE_SIZE;
297#else /* !VBOX */
298#ifdef _WIN32
299 {
300 SYSTEM_INFO system_info;
301 DWORD old_protect;
302
303 GetSystemInfo(&system_info);
304 qemu_real_host_page_size = system_info.dwPageSize;
305 }
306#else
307 qemu_real_host_page_size = getpagesize();
308#endif
309#endif /* !VBOX */
310
311 if (qemu_host_page_size == 0)
312 qemu_host_page_size = qemu_real_host_page_size;
313 if (qemu_host_page_size < TARGET_PAGE_SIZE)
314 qemu_host_page_size = TARGET_PAGE_SIZE;
315 qemu_host_page_bits = 0;
316#ifndef VBOX
317 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
318#else
319 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
320#endif
321 qemu_host_page_bits++;
322 qemu_host_page_mask = ~(qemu_host_page_size - 1);
323#ifndef VBOX
324 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
325 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
326#endif
327#ifdef VBOX
328 /* We use other means to set reserved bit on our pages */
329#else
330#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
331 {
332 long long startaddr, endaddr;
333 FILE *f;
334 int n;
335
336 mmap_lock();
337 last_brk = (unsigned long)sbrk(0);
338 f = fopen("/proc/self/maps", "r");
339 if (f) {
340 do {
341 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
342 if (n == 2) {
343 startaddr = MIN(startaddr,
344 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
345 endaddr = MIN(endaddr,
346 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
347 page_set_flags(startaddr & TARGET_PAGE_MASK,
348 TARGET_PAGE_ALIGN(endaddr),
349 PAGE_RESERVED);
350 }
351 } while (!feof(f));
352 fclose(f);
353 }
354 mmap_unlock();
355 }
356#endif
357#endif
358}
359
360static inline PageDesc **page_l1_map(target_ulong index)
361{
362#ifndef VBOX
363#if TARGET_LONG_BITS > 32
364 /* Host memory outside guest VM. For 32-bit targets we have already
365 excluded high addresses. */
366 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
367 return NULL;
368#endif
369 return &l1_map[index >> L2_BITS];
370#else /* VBOX */
371 PageDesc **l1_map;
372 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
373 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
374 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
375 NULL);
376 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
377 if (RT_UNLIKELY(!l1_map))
378 {
379 unsigned i0 = index >> (L1_BITS + L2_BITS);
380 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
381 if (RT_UNLIKELY(!l1_map))
382 return NULL;
383 if (i0 >= l0_map_max_used)
384 l0_map_max_used = i0 + 1;
385 }
386 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
387#endif /* VBOX */
388}
389
390static inline PageDesc *page_find_alloc(target_ulong index)
391{
392 PageDesc **lp, *p;
393 lp = page_l1_map(index);
394 if (!lp)
395 return NULL;
396
397 p = *lp;
398 if (!p) {
399 /* allocate if not found */
400#if defined(CONFIG_USER_ONLY)
401 unsigned long addr;
402 size_t len = sizeof(PageDesc) * L2_SIZE;
403 /* Don't use qemu_malloc because it may recurse. */
404 p = mmap(0, len, PROT_READ | PROT_WRITE,
405 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
406 *lp = p;
407 addr = h2g(p);
408 if (addr == (target_ulong)addr) {
409 page_set_flags(addr & TARGET_PAGE_MASK,
410 TARGET_PAGE_ALIGN(addr + len),
411 PAGE_RESERVED);
412 }
413#else
414 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
415 *lp = p;
416#endif
417 }
418 return p + (index & (L2_SIZE - 1));
419}
420
421static inline PageDesc *page_find(target_ulong index)
422{
423 PageDesc **lp, *p;
424 lp = page_l1_map(index);
425 if (!lp)
426 return NULL;
427
428 p = *lp;
429 if (!p)
430 return 0;
431 return p + (index & (L2_SIZE - 1));
432}
433
434static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
435{
436 void **lp, **p;
437 PhysPageDesc *pd;
438
439#ifndef VBOX
440 p = (void **)l1_phys_map;
441#if TARGET_PHYS_ADDR_SPACE_BITS > 32
442
443#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
444#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
445#endif
446 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
447 p = *lp;
448 if (!p) {
449 /* allocate if not found */
450 if (!alloc)
451 return NULL;
452 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
453 memset(p, 0, sizeof(void *) * L1_SIZE);
454 *lp = p;
455 }
456#endif
457#else /* VBOX */
458 /* level 0 lookup and lazy allocation of level 1 map. */
459 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
460 return NULL;
461 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
462 if (RT_UNLIKELY(!p)) {
463 if (!alloc)
464 return NULL;
465 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
466 memset(p, 0, sizeof(void **) * L1_SIZE);
467 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
468 }
469
470 /* level 1 lookup and lazy allocation of level 2 map. */
471#endif /* VBOX */
472 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
473 pd = *lp;
474 if (!pd) {
475 int i;
476 /* allocate if not found */
477 if (!alloc)
478 return NULL;
479 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
480 *lp = pd;
481 for (i = 0; i < L2_SIZE; i++)
482 pd[i].phys_offset = IO_MEM_UNASSIGNED;
483 }
484 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
485}
486
487static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
488{
489 return phys_page_find_alloc(index, 0);
490}
491
492#if !defined(CONFIG_USER_ONLY)
493static void tlb_protect_code(ram_addr_t ram_addr);
494static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
495 target_ulong vaddr);
496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
498#endif
499
500#ifdef VBOX
501/*
502 * We don't need such huge codegen buffer size, as execute most of the code
503 * in raw or hwacc mode
504 */
505#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
506#else
507#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
508#endif
509
510#if defined(CONFIG_USER_ONLY)
511/* Currently it is not recommended to allocate big chunks of data in
512 user mode. It will change when a dedicated libc will be used */
513#define USE_STATIC_CODE_GEN_BUFFER
514#endif
515
516/* VBox allocates codegen buffer dynamically */
517#ifndef VBOX
518#ifdef USE_STATIC_CODE_GEN_BUFFER
519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
520#endif
521#endif
522
523static void code_gen_alloc(unsigned long tb_size)
524{
525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
530#ifdef VBOX
531 /* We cannot use phys_ram_size here, as it's 0 now,
532 * it only gets initialized once RAM registration callback
533 * (REMR3NotifyPhysRamRegister()) called.
534 */
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536#else
537 code_gen_buffer_size = tb_size;
538 if (code_gen_buffer_size == 0) {
539#if defined(CONFIG_USER_ONLY)
540 /* in user mode, phys_ram_size is not meaningful */
541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
543 /* XXX: needs adjustments */
544 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
545#endif
546
547 }
548 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
549 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
550#endif /* VBOX */
551
552 /* The code gen buffer location may have constraints depending on
553 the host cpu and OS */
554#ifdef VBOX
555 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
556
557 if (!code_gen_buffer) {
558 LogRel(("REM: failed allocate codegen buffer %lld\n",
559 code_gen_buffer_size));
560 return;
561 }
562#else //!VBOX
563#if defined(__linux__)
564 {
565 int flags;
566 void *start = NULL;
567
568 flags = MAP_PRIVATE | MAP_ANONYMOUS;
569#if defined(__x86_64__)
570 flags |= MAP_32BIT;
571 /* Cannot map more than that */
572 if (code_gen_buffer_size > (800 * 1024 * 1024))
573 code_gen_buffer_size = (800 * 1024 * 1024);
574#elif defined(__sparc_v9__)
575 // Map the buffer below 2G, so we can use direct calls and branches
576 flags |= MAP_FIXED;
577 start = (void *) 0x60000000UL;
578 if (code_gen_buffer_size > (512 * 1024 * 1024))
579 code_gen_buffer_size = (512 * 1024 * 1024);
580#endif
581 code_gen_buffer = mmap(start, code_gen_buffer_size,
582 PROT_WRITE | PROT_READ | PROT_EXEC,
583 flags, -1, 0);
584 if (code_gen_buffer == MAP_FAILED) {
585 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
586 exit(1);
587 }
588 }
589#elif defined(__FreeBSD__)
590 {
591 int flags;
592 void *addr = NULL;
593 flags = MAP_PRIVATE | MAP_ANONYMOUS;
594#if defined(__x86_64__)
595 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
596 * 0x40000000 is free */
597 flags |= MAP_FIXED;
598 addr = (void *)0x40000000;
599 /* Cannot map more than that */
600 if (code_gen_buffer_size > (800 * 1024 * 1024))
601 code_gen_buffer_size = (800 * 1024 * 1024);
602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
611#else
612 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
613 if (!code_gen_buffer) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 map_exec(code_gen_buffer, code_gen_buffer_size);
618#endif
619 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
620#endif /* !VBOX */
621#endif /* !USE_STATIC_CODE_GEN_BUFFER */
622#ifndef VBOX
623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
624#else
625 map_exec(code_gen_prologue, _1K);
626#endif
627
628 code_gen_buffer_max_size = code_gen_buffer_size -
629 code_gen_max_block_size();
630 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
631 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
632}
633
634/* Must be called before using the QEMU cpus. 'tb_size' is the size
635 (in bytes) allocated to the translation buffer. Zero means default
636 size. */
637void cpu_exec_init_all(unsigned long tb_size)
638{
639 cpu_gen_init();
640 code_gen_alloc(tb_size);
641 code_gen_ptr = code_gen_buffer;
642 page_init();
643#if !defined(CONFIG_USER_ONLY)
644 io_mem_init();
645#endif
646}
647
648#ifndef VBOX
649#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
650
651#define CPU_COMMON_SAVE_VERSION 1
652
653static void cpu_common_save(QEMUFile *f, void *opaque)
654{
655 CPUState *env = opaque;
656
657 qemu_put_be32s(f, &env->halted);
658 qemu_put_be32s(f, &env->interrupt_request);
659}
660
661static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
662{
663 CPUState *env = opaque;
664
665 if (version_id != CPU_COMMON_SAVE_VERSION)
666 return -EINVAL;
667
668 qemu_get_be32s(f, &env->halted);
669 qemu_get_be32s(f, &env->interrupt_request);
670 tlb_flush(env, 1);
671
672 return 0;
673}
674#endif
675#endif //!VBOX
676
677void cpu_exec_init(CPUState *env)
678{
679 CPUState **penv;
680 int cpu_index;
681
682 env->next_cpu = NULL;
683 penv = &first_cpu;
684 cpu_index = 0;
685 while (*penv != NULL) {
686 penv = (CPUState **)&(*penv)->next_cpu;
687 cpu_index++;
688 }
689 env->cpu_index = cpu_index;
690 env->nb_watchpoints = 0;
691 *penv = env;
692#ifndef VBOX
693#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
694 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
695 cpu_common_save, cpu_common_load, env);
696 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
697 cpu_save, cpu_load, env);
698#endif
699#endif // !VBOX
700}
701
702static inline void invalidate_page_bitmap(PageDesc *p)
703{
704 if (p->code_bitmap) {
705 qemu_free(p->code_bitmap);
706 p->code_bitmap = NULL;
707 }
708 p->code_write_count = 0;
709}
710
711/* set to NULL all the 'first_tb' fields in all PageDescs */
712static void page_flush_tb(void)
713{
714 int i, j;
715 PageDesc *p;
716#ifdef VBOX
717 int k;
718#endif
719
720#ifdef VBOX
721 k = l0_map_max_used;
722 while (k-- > 0) {
723 PageDesc **l1_map = l0_map[k];
724 if (l1_map) {
725#endif
726 for(i = 0; i < L1_SIZE; i++) {
727 p = l1_map[i];
728 if (p) {
729 for(j = 0; j < L2_SIZE; j++) {
730 p->first_tb = NULL;
731 invalidate_page_bitmap(p);
732 p++;
733 }
734 }
735 }
736#ifdef VBOX
737 }
738 }
739#endif
740}
741
742/* flush all the translation blocks */
743/* XXX: tb_flush is currently not thread safe */
744void tb_flush(CPUState *env1)
745{
746 CPUState *env;
747#ifdef VBOX
748 STAM_PROFILE_START(&env1->StatTbFlush, a);
749#endif
750#if defined(DEBUG_FLUSH)
751 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752 (unsigned long)(code_gen_ptr - code_gen_buffer),
753 nb_tbs, nb_tbs > 0 ?
754 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
755#endif
756 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
757 cpu_abort(env1, "Internal error: code buffer overflow\n");
758
759 nb_tbs = 0;
760
761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763 }
764
765 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
766 page_flush_tb();
767
768 code_gen_ptr = code_gen_buffer;
769 /* XXX: flush processor icache at this point if cache flush is
770 expensive */
771 tb_flush_count++;
772#ifdef VBOX
773 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
774#endif
775}
776
777#ifdef DEBUG_TB_CHECK
778static void tb_invalidate_check(target_ulong address)
779{
780 TranslationBlock *tb;
781 int i;
782 address &= TARGET_PAGE_MASK;
783 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
784 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
785 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
786 address >= tb->pc + tb->size)) {
787 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
788 address, (long)tb->pc, tb->size);
789 }
790 }
791 }
792}
793
794/* verify that all the pages have correct rights for code */
795static void tb_page_check(void)
796{
797 TranslationBlock *tb;
798 int i, flags1, flags2;
799
800 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
801 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
802 flags1 = page_get_flags(tb->pc);
803 flags2 = page_get_flags(tb->pc + tb->size - 1);
804 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
805 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
806 (long)tb->pc, tb->size, flags1, flags2);
807 }
808 }
809 }
810}
811
812static void tb_jmp_check(TranslationBlock *tb)
813{
814 TranslationBlock *tb1;
815 unsigned int n1;
816
817 /* suppress any remaining jumps to this TB */
818 tb1 = tb->jmp_first;
819 for(;;) {
820 n1 = (long)tb1 & 3;
821 tb1 = (TranslationBlock *)((long)tb1 & ~3);
822 if (n1 == 2)
823 break;
824 tb1 = tb1->jmp_next[n1];
825 }
826 /* check end of list */
827 if (tb1 != tb) {
828 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
829 }
830}
831#endif // DEBUG_TB_CHECK
832
833/* invalidate one TB */
834static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
835 int next_offset)
836{
837 TranslationBlock *tb1;
838 for(;;) {
839 tb1 = *ptb;
840 if (tb1 == tb) {
841 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
842 break;
843 }
844 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
845 }
846}
847
848static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
849{
850 TranslationBlock *tb1;
851 unsigned int n1;
852
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (tb1 == tb) {
858 *ptb = tb1->page_next[n1];
859 break;
860 }
861 ptb = &tb1->page_next[n1];
862 }
863}
864
865static inline void tb_jmp_remove(TranslationBlock *tb, int n)
866{
867 TranslationBlock *tb1, **ptb;
868 unsigned int n1;
869
870 ptb = &tb->jmp_next[n];
871 tb1 = *ptb;
872 if (tb1) {
873 /* find tb(n) in circular list */
874 for(;;) {
875 tb1 = *ptb;
876 n1 = (long)tb1 & 3;
877 tb1 = (TranslationBlock *)((long)tb1 & ~3);
878 if (n1 == n && tb1 == tb)
879 break;
880 if (n1 == 2) {
881 ptb = &tb1->jmp_first;
882 } else {
883 ptb = &tb1->jmp_next[n1];
884 }
885 }
886 /* now we can suppress tb(n) from the list */
887 *ptb = tb->jmp_next[n];
888
889 tb->jmp_next[n] = NULL;
890 }
891}
892
893/* reset the jump entry 'n' of a TB so that it is not chained to
894 another TB */
895static inline void tb_reset_jump(TranslationBlock *tb, int n)
896{
897 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
898}
899
900void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
901{
902 CPUState *env;
903 PageDesc *p;
904 unsigned int h, n1;
905 target_phys_addr_t phys_pc;
906 TranslationBlock *tb1, *tb2;
907
908 /* remove the TB from the hash list */
909 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
910 h = tb_phys_hash_func(phys_pc);
911 tb_remove(&tb_phys_hash[h], tb,
912 offsetof(TranslationBlock, phys_hash_next));
913
914 /* remove the TB from the page list */
915 if (tb->page_addr[0] != page_addr) {
916 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
917 tb_page_remove(&p->first_tb, tb);
918 invalidate_page_bitmap(p);
919 }
920 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
921 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
922 tb_page_remove(&p->first_tb, tb);
923 invalidate_page_bitmap(p);
924 }
925
926 tb_invalidated_flag = 1;
927
928 /* remove the TB from the hash list */
929 h = tb_jmp_cache_hash_func(tb->pc);
930 for(env = first_cpu; env != NULL; env = env->next_cpu) {
931 if (env->tb_jmp_cache[h] == tb)
932 env->tb_jmp_cache[h] = NULL;
933 }
934
935 /* suppress this TB from the two jump lists */
936 tb_jmp_remove(tb, 0);
937 tb_jmp_remove(tb, 1);
938
939 /* suppress any remaining jumps to this TB */
940 tb1 = tb->jmp_first;
941 for(;;) {
942 n1 = (long)tb1 & 3;
943 if (n1 == 2)
944 break;
945 tb1 = (TranslationBlock *)((long)tb1 & ~3);
946 tb2 = tb1->jmp_next[n1];
947 tb_reset_jump(tb1, n1);
948 tb1->jmp_next[n1] = NULL;
949 tb1 = tb2;
950 }
951 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
952
953 tb_phys_invalidate_count++;
954}
955
956
957#ifdef VBOX
958void tb_invalidate_virt(CPUState *env, uint32_t eip)
959{
960# if 1
961 tb_flush(env);
962# else
963 uint8_t *cs_base, *pc;
964 unsigned int flags, h, phys_pc;
965 TranslationBlock *tb, **ptb;
966
967 flags = env->hflags;
968 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
969 cs_base = env->segs[R_CS].base;
970 pc = cs_base + eip;
971
972 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
973 flags);
974
975 if(tb)
976 {
977# ifdef DEBUG
978 printf("invalidating TB (%08X) at %08X\n", tb, eip);
979# endif
980 tb_invalidate(tb);
981 //Note: this will leak TBs, but the whole cache will be flushed
982 // when it happens too often
983 tb->pc = 0;
984 tb->cs_base = 0;
985 tb->flags = 0;
986 }
987# endif
988}
989
990# ifdef VBOX_STRICT
991/**
992 * Gets the page offset.
993 */
994unsigned long get_phys_page_offset(target_ulong addr)
995{
996 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
997 return p ? p->phys_offset : 0;
998}
999# endif /* VBOX_STRICT */
1000#endif /* VBOX */
1001
1002static inline void set_bits(uint8_t *tab, int start, int len)
1003{
1004 int end, mask, end1;
1005
1006 end = start + len;
1007 tab += start >> 3;
1008 mask = 0xff << (start & 7);
1009 if ((start & ~7) == (end & ~7)) {
1010 if (start < end) {
1011 mask &= ~(0xff << (end & 7));
1012 *tab |= mask;
1013 }
1014 } else {
1015 *tab++ |= mask;
1016 start = (start + 8) & ~7;
1017 end1 = end & ~7;
1018 while (start < end1) {
1019 *tab++ = 0xff;
1020 start += 8;
1021 }
1022 if (start < end) {
1023 mask = ~(0xff << (end & 7));
1024 *tab |= mask;
1025 }
1026 }
1027}
1028
1029static void build_page_bitmap(PageDesc *p)
1030{
1031 int n, tb_start, tb_end;
1032 TranslationBlock *tb;
1033
1034 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
1035 if (!p->code_bitmap)
1036 return;
1037 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
1038
1039 tb = p->first_tb;
1040 while (tb != NULL) {
1041 n = (long)tb & 3;
1042 tb = (TranslationBlock *)((long)tb & ~3);
1043 /* NOTE: this is subtle as a TB may span two physical pages */
1044 if (n == 0) {
1045 /* NOTE: tb_end may be after the end of the page, but
1046 it is not a problem */
1047 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1048 tb_end = tb_start + tb->size;
1049 if (tb_end > TARGET_PAGE_SIZE)
1050 tb_end = TARGET_PAGE_SIZE;
1051 } else {
1052 tb_start = 0;
1053 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1054 }
1055 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1056 tb = tb->page_next[n];
1057 }
1058}
1059
1060TranslationBlock *tb_gen_code(CPUState *env,
1061 target_ulong pc, target_ulong cs_base,
1062 int flags, int cflags)
1063{
1064 TranslationBlock *tb;
1065 uint8_t *tc_ptr;
1066 target_ulong phys_pc, phys_page2, virt_page2;
1067 int code_gen_size;
1068
1069 phys_pc = get_phys_addr_code(env, pc);
1070 tb = tb_alloc(pc);
1071 if (!tb) {
1072 /* flush must be done */
1073 tb_flush(env);
1074 /* cannot fail at this point */
1075 tb = tb_alloc(pc);
1076 /* Don't forget to invalidate previous TB info. */
1077 tb_invalidated_flag = 1;
1078 }
1079 tc_ptr = code_gen_ptr;
1080 tb->tc_ptr = tc_ptr;
1081 tb->cs_base = cs_base;
1082 tb->flags = flags;
1083 tb->cflags = cflags;
1084 cpu_gen_code(env, tb, &code_gen_size);
1085 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1086
1087 /* check next page if needed */
1088 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1089 phys_page2 = -1;
1090 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1091 phys_page2 = get_phys_addr_code(env, virt_page2);
1092 }
1093 tb_link_phys(tb, phys_pc, phys_page2);
1094 return tb;
1095}
1096
1097/* invalidate all TBs which intersect with the target physical page
1098 starting in range [start;end[. NOTE: start and end must refer to
1099 the same physical page. 'is_cpu_write_access' should be true if called
1100 from a real cpu write access: the virtual CPU will exit the current
1101 TB if code is modified inside this TB. */
1102void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1103 int is_cpu_write_access)
1104{
1105 int n, current_tb_modified, current_tb_not_found, current_flags;
1106 CPUState *env = cpu_single_env;
1107 PageDesc *p;
1108 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1109 target_ulong tb_start, tb_end;
1110 target_ulong current_pc, current_cs_base;
1111
1112 p = page_find(start >> TARGET_PAGE_BITS);
1113 if (!p)
1114 return;
1115 if (!p->code_bitmap &&
1116 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1117 is_cpu_write_access) {
1118 /* build code bitmap */
1119 build_page_bitmap(p);
1120 }
1121
1122 /* we remove all the TBs in the range [start, end[ */
1123 /* XXX: see if in some cases it could be faster to invalidate all the code */
1124 current_tb_not_found = is_cpu_write_access;
1125 current_tb_modified = 0;
1126 current_tb = NULL; /* avoid warning */
1127 current_pc = 0; /* avoid warning */
1128 current_cs_base = 0; /* avoid warning */
1129 current_flags = 0; /* avoid warning */
1130 tb = p->first_tb;
1131 while (tb != NULL) {
1132 n = (long)tb & 3;
1133 tb = (TranslationBlock *)((long)tb & ~3);
1134 tb_next = tb->page_next[n];
1135 /* NOTE: this is subtle as a TB may span two physical pages */
1136 if (n == 0) {
1137 /* NOTE: tb_end may be after the end of the page, but
1138 it is not a problem */
1139 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1140 tb_end = tb_start + tb->size;
1141 } else {
1142 tb_start = tb->page_addr[1];
1143 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1144 }
1145 if (!(tb_end <= start || tb_start >= end)) {
1146#ifdef TARGET_HAS_PRECISE_SMC
1147 if (current_tb_not_found) {
1148 current_tb_not_found = 0;
1149 current_tb = NULL;
1150 if (env->mem_io_pc) {
1151 /* now we have a real cpu fault */
1152 current_tb = tb_find_pc(env->mem_io_pc);
1153 }
1154 }
1155 if (current_tb == tb &&
1156 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1157 /* If we are modifying the current TB, we must stop
1158 its execution. We could be more precise by checking
1159 that the modification is after the current PC, but it
1160 would require a specialized function to partially
1161 restore the CPU state */
1162
1163 current_tb_modified = 1;
1164 cpu_restore_state(current_tb, env,
1165 env->mem_io_pc, NULL);
1166#if defined(TARGET_I386)
1167 current_flags = env->hflags;
1168 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1169 current_cs_base = (target_ulong)env->segs[R_CS].base;
1170 current_pc = current_cs_base + env->eip;
1171#else
1172#error unsupported CPU
1173#endif
1174 }
1175#endif /* TARGET_HAS_PRECISE_SMC */
1176 /* we need to do that to handle the case where a signal
1177 occurs while doing tb_phys_invalidate() */
1178 saved_tb = NULL;
1179 if (env) {
1180 saved_tb = env->current_tb;
1181 env->current_tb = NULL;
1182 }
1183 tb_phys_invalidate(tb, -1);
1184 if (env) {
1185 env->current_tb = saved_tb;
1186 if (env->interrupt_request && env->current_tb)
1187 cpu_interrupt(env, env->interrupt_request);
1188 }
1189 }
1190 tb = tb_next;
1191 }
1192#if !defined(CONFIG_USER_ONLY)
1193 /* if no code remaining, no need to continue to use slow writes */
1194 if (!p->first_tb) {
1195 invalidate_page_bitmap(p);
1196 if (is_cpu_write_access) {
1197 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1198 }
1199 }
1200#endif
1201#ifdef TARGET_HAS_PRECISE_SMC
1202 if (current_tb_modified) {
1203 /* we generate a block containing just the instruction
1204 modifying the memory. It will ensure that it cannot modify
1205 itself */
1206 env->current_tb = NULL;
1207 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1208 cpu_resume_from_signal(env, NULL);
1209 }
1210#endif
1211}
1212
1213
1214/* len must be <= 8 and start must be a multiple of len */
1215static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1216{
1217 PageDesc *p;
1218 int offset, b;
1219#if 0
1220 if (1) {
1221 if (loglevel) {
1222 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1223 cpu_single_env->mem_io_vaddr, len,
1224 cpu_single_env->eip,
1225 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1226 }
1227 }
1228#endif
1229 p = page_find(start >> TARGET_PAGE_BITS);
1230 if (!p)
1231 return;
1232 if (p->code_bitmap) {
1233 offset = start & ~TARGET_PAGE_MASK;
1234 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1235 if (b & ((1 << len) - 1))
1236 goto do_invalidate;
1237 } else {
1238 do_invalidate:
1239 tb_invalidate_phys_page_range(start, start + len, 1);
1240 }
1241}
1242
1243
1244#if !defined(CONFIG_SOFTMMU)
1245static void tb_invalidate_phys_page(target_phys_addr_t addr,
1246 unsigned long pc, void *puc)
1247{
1248 int n, current_flags, current_tb_modified;
1249 target_ulong current_pc, current_cs_base;
1250 PageDesc *p;
1251 TranslationBlock *tb, *current_tb;
1252#ifdef TARGET_HAS_PRECISE_SMC
1253 CPUState *env = cpu_single_env;
1254#endif
1255
1256 addr &= TARGET_PAGE_MASK;
1257 p = page_find(addr >> TARGET_PAGE_BITS);
1258 if (!p)
1259 return;
1260 tb = p->first_tb;
1261 current_tb_modified = 0;
1262 current_tb = NULL;
1263 current_pc = 0; /* avoid warning */
1264 current_cs_base = 0; /* avoid warning */
1265 current_flags = 0; /* avoid warning */
1266#ifdef TARGET_HAS_PRECISE_SMC
1267 if (tb && pc != 0) {
1268 current_tb = tb_find_pc(pc);
1269 }
1270#endif
1271 while (tb != NULL) {
1272 n = (long)tb & 3;
1273 tb = (TranslationBlock *)((long)tb & ~3);
1274#ifdef TARGET_HAS_PRECISE_SMC
1275 if (current_tb == tb &&
1276 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1277 /* If we are modifying the current TB, we must stop
1278 its execution. We could be more precise by checking
1279 that the modification is after the current PC, but it
1280 would require a specialized function to partially
1281 restore the CPU state */
1282
1283 current_tb_modified = 1;
1284 cpu_restore_state(current_tb, env, pc, puc);
1285#if defined(TARGET_I386)
1286 current_flags = env->hflags;
1287 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1288 current_cs_base = (target_ulong)env->segs[R_CS].base;
1289 current_pc = current_cs_base + env->eip;
1290#else
1291#error unsupported CPU
1292#endif
1293 }
1294#endif /* TARGET_HAS_PRECISE_SMC */
1295 tb_phys_invalidate(tb, addr);
1296 tb = tb->page_next[n];
1297 }
1298 p->first_tb = NULL;
1299#ifdef TARGET_HAS_PRECISE_SMC
1300 if (current_tb_modified) {
1301 /* we generate a block containing just the instruction
1302 modifying the memory. It will ensure that it cannot modify
1303 itself */
1304 env->current_tb = NULL;
1305 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1306 cpu_resume_from_signal(env, puc);
1307 }
1308#endif
1309}
1310#endif
1311
1312/* add the tb in the target page and protect it if necessary */
1313static inline void tb_alloc_page(TranslationBlock *tb,
1314 unsigned int n, target_ulong page_addr)
1315{
1316 PageDesc *p;
1317 TranslationBlock *last_first_tb;
1318
1319 tb->page_addr[n] = page_addr;
1320 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1321 tb->page_next[n] = p->first_tb;
1322 last_first_tb = p->first_tb;
1323 p->first_tb = (TranslationBlock *)((long)tb | n);
1324 invalidate_page_bitmap(p);
1325
1326#if defined(TARGET_HAS_SMC) || 1
1327
1328#if defined(CONFIG_USER_ONLY)
1329 if (p->flags & PAGE_WRITE) {
1330 target_ulong addr;
1331 PageDesc *p2;
1332 int prot;
1333
1334 /* force the host page as non writable (writes will have a
1335 page fault + mprotect overhead) */
1336 page_addr &= qemu_host_page_mask;
1337 prot = 0;
1338 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1339 addr += TARGET_PAGE_SIZE) {
1340
1341 p2 = page_find (addr >> TARGET_PAGE_BITS);
1342 if (!p2)
1343 continue;
1344 prot |= p2->flags;
1345 p2->flags &= ~PAGE_WRITE;
1346 page_get_flags(addr);
1347 }
1348 mprotect(g2h(page_addr), qemu_host_page_size,
1349 (prot & PAGE_BITS) & ~PAGE_WRITE);
1350#ifdef DEBUG_TB_INVALIDATE
1351 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1352 page_addr);
1353#endif
1354 }
1355#else
1356 /* if some code is already present, then the pages are already
1357 protected. So we handle the case where only the first TB is
1358 allocated in a physical page */
1359 if (!last_first_tb) {
1360 tlb_protect_code(page_addr);
1361 }
1362#endif
1363
1364#endif /* TARGET_HAS_SMC */
1365}
1366
1367/* Allocate a new translation block. Flush the translation buffer if
1368 too many translation blocks or too much generated code. */
1369TranslationBlock *tb_alloc(target_ulong pc)
1370{
1371 TranslationBlock *tb;
1372
1373 if (nb_tbs >= code_gen_max_blocks ||
1374#ifndef VBOX
1375 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1376#else
1377 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1378#endif
1379 return NULL;
1380 tb = &tbs[nb_tbs++];
1381 tb->pc = pc;
1382 tb->cflags = 0;
1383 return tb;
1384}
1385
1386void tb_free(TranslationBlock *tb)
1387{
1388 /* In practice this is mostly used for single use temporary TB
1389 Ignore the hard cases and just back up if this TB happens to
1390 be the last one generated. */
1391 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1392 code_gen_ptr = tb->tc_ptr;
1393 nb_tbs--;
1394 }
1395}
1396
1397/* add a new TB and link it to the physical page tables. phys_page2 is
1398 (-1) to indicate that only one page contains the TB. */
1399void tb_link_phys(TranslationBlock *tb,
1400 target_ulong phys_pc, target_ulong phys_page2)
1401{
1402 unsigned int h;
1403 TranslationBlock **ptb;
1404
1405 /* Grab the mmap lock to stop another thread invalidating this TB
1406 before we are done. */
1407 mmap_lock();
1408 /* add in the physical hash table */
1409 h = tb_phys_hash_func(phys_pc);
1410 ptb = &tb_phys_hash[h];
1411 tb->phys_hash_next = *ptb;
1412 *ptb = tb;
1413
1414 /* add in the page list */
1415 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1416 if (phys_page2 != -1)
1417 tb_alloc_page(tb, 1, phys_page2);
1418 else
1419 tb->page_addr[1] = -1;
1420
1421 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1422 tb->jmp_next[0] = NULL;
1423 tb->jmp_next[1] = NULL;
1424
1425 /* init original jump addresses */
1426 if (tb->tb_next_offset[0] != 0xffff)
1427 tb_reset_jump(tb, 0);
1428 if (tb->tb_next_offset[1] != 0xffff)
1429 tb_reset_jump(tb, 1);
1430
1431#ifdef DEBUG_TB_CHECK
1432 tb_page_check();
1433#endif
1434 mmap_unlock();
1435}
1436
1437/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1438 tb[1].tc_ptr. Return NULL if not found */
1439TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1440{
1441 int m_min, m_max, m;
1442 unsigned long v;
1443 TranslationBlock *tb;
1444
1445 if (nb_tbs <= 0)
1446 return NULL;
1447 if (tc_ptr < (unsigned long)code_gen_buffer ||
1448 tc_ptr >= (unsigned long)code_gen_ptr)
1449 return NULL;
1450 /* binary search (cf Knuth) */
1451 m_min = 0;
1452 m_max = nb_tbs - 1;
1453 while (m_min <= m_max) {
1454 m = (m_min + m_max) >> 1;
1455 tb = &tbs[m];
1456 v = (unsigned long)tb->tc_ptr;
1457 if (v == tc_ptr)
1458 return tb;
1459 else if (tc_ptr < v) {
1460 m_max = m - 1;
1461 } else {
1462 m_min = m + 1;
1463 }
1464 }
1465 return &tbs[m_max];
1466}
1467
1468static void tb_reset_jump_recursive(TranslationBlock *tb);
1469
1470static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1471{
1472 TranslationBlock *tb1, *tb_next, **ptb;
1473 unsigned int n1;
1474
1475 tb1 = tb->jmp_next[n];
1476 if (tb1 != NULL) {
1477 /* find head of list */
1478 for(;;) {
1479 n1 = (long)tb1 & 3;
1480 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1481 if (n1 == 2)
1482 break;
1483 tb1 = tb1->jmp_next[n1];
1484 }
1485 /* we are now sure now that tb jumps to tb1 */
1486 tb_next = tb1;
1487
1488 /* remove tb from the jmp_first list */
1489 ptb = &tb_next->jmp_first;
1490 for(;;) {
1491 tb1 = *ptb;
1492 n1 = (long)tb1 & 3;
1493 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1494 if (n1 == n && tb1 == tb)
1495 break;
1496 ptb = &tb1->jmp_next[n1];
1497 }
1498 *ptb = tb->jmp_next[n];
1499 tb->jmp_next[n] = NULL;
1500
1501 /* suppress the jump to next tb in generated code */
1502 tb_reset_jump(tb, n);
1503
1504 /* suppress jumps in the tb on which we could have jumped */
1505 tb_reset_jump_recursive(tb_next);
1506 }
1507}
1508
1509static void tb_reset_jump_recursive(TranslationBlock *tb)
1510{
1511 tb_reset_jump_recursive2(tb, 0);
1512 tb_reset_jump_recursive2(tb, 1);
1513}
1514
1515#if defined(TARGET_HAS_ICE)
1516static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1517{
1518 target_ulong addr, pd;
1519 ram_addr_t ram_addr;
1520 PhysPageDesc *p;
1521
1522 addr = cpu_get_phys_page_debug(env, pc);
1523 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1524 if (!p) {
1525 pd = IO_MEM_UNASSIGNED;
1526 } else {
1527 pd = p->phys_offset;
1528 }
1529 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1530 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1531}
1532#endif
1533
1534/* Add a watchpoint. */
1535int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1536{
1537 int i;
1538
1539 for (i = 0; i < env->nb_watchpoints; i++) {
1540 if (addr == env->watchpoint[i].vaddr)
1541 return 0;
1542 }
1543 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1544 return -1;
1545
1546 i = env->nb_watchpoints++;
1547 env->watchpoint[i].vaddr = addr;
1548 env->watchpoint[i].type = type;
1549 tlb_flush_page(env, addr);
1550 /* FIXME: This flush is needed because of the hack to make memory ops
1551 terminate the TB. It can be removed once the proper IO trap and
1552 re-execute bits are in. */
1553 tb_flush(env);
1554 return i;
1555}
1556
1557/* Remove a watchpoint. */
1558int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1559{
1560 int i;
1561
1562 for (i = 0; i < env->nb_watchpoints; i++) {
1563 if (addr == env->watchpoint[i].vaddr) {
1564 env->nb_watchpoints--;
1565 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1566 tlb_flush_page(env, addr);
1567 return 0;
1568 }
1569 }
1570 return -1;
1571}
1572
1573/* Remove all watchpoints. */
1574void cpu_watchpoint_remove_all(CPUState *env) {
1575 int i;
1576
1577 for (i = 0; i < env->nb_watchpoints; i++) {
1578 tlb_flush_page(env, env->watchpoint[i].vaddr);
1579 }
1580 env->nb_watchpoints = 0;
1581}
1582
1583/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1584 breakpoint is reached */
1585int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1586{
1587#if defined(TARGET_HAS_ICE)
1588 int i;
1589
1590 for(i = 0; i < env->nb_breakpoints; i++) {
1591 if (env->breakpoints[i] == pc)
1592 return 0;
1593 }
1594
1595 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1596 return -1;
1597 env->breakpoints[env->nb_breakpoints++] = pc;
1598
1599 breakpoint_invalidate(env, pc);
1600 return 0;
1601#else
1602 return -1;
1603#endif
1604}
1605
1606/* remove all breakpoints */
1607void cpu_breakpoint_remove_all(CPUState *env) {
1608#if defined(TARGET_HAS_ICE)
1609 int i;
1610 for(i = 0; i < env->nb_breakpoints; i++) {
1611 breakpoint_invalidate(env, env->breakpoints[i]);
1612 }
1613 env->nb_breakpoints = 0;
1614#endif
1615}
1616
1617/* remove a breakpoint */
1618int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1619{
1620#if defined(TARGET_HAS_ICE)
1621 int i;
1622 for(i = 0; i < env->nb_breakpoints; i++) {
1623 if (env->breakpoints[i] == pc)
1624 goto found;
1625 }
1626 return -1;
1627 found:
1628 env->nb_breakpoints--;
1629 if (i < env->nb_breakpoints)
1630 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1631
1632 breakpoint_invalidate(env, pc);
1633 return 0;
1634#else
1635 return -1;
1636#endif
1637}
1638
1639/* enable or disable single step mode. EXCP_DEBUG is returned by the
1640 CPU loop after each instruction */
1641void cpu_single_step(CPUState *env, int enabled)
1642{
1643#if defined(TARGET_HAS_ICE)
1644 if (env->singlestep_enabled != enabled) {
1645 env->singlestep_enabled = enabled;
1646 /* must flush all the translated code to avoid inconsistencies */
1647 /* XXX: only flush what is necessary */
1648 tb_flush(env);
1649 }
1650#endif
1651}
1652
1653#ifndef VBOX
1654/* enable or disable low levels log */
1655void cpu_set_log(int log_flags)
1656{
1657 loglevel = log_flags;
1658 if (loglevel && !logfile) {
1659 logfile = fopen(logfilename, "w");
1660 if (!logfile) {
1661 perror(logfilename);
1662 _exit(1);
1663 }
1664#if !defined(CONFIG_SOFTMMU)
1665 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1666 {
1667 static uint8_t logfile_buf[4096];
1668 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1669 }
1670#else
1671 setvbuf(logfile, NULL, _IOLBF, 0);
1672#endif
1673 }
1674}
1675
1676void cpu_set_log_filename(const char *filename)
1677{
1678 logfilename = strdup(filename);
1679}
1680#endif /* !VBOX */
1681
1682/* mask must never be zero, except for A20 change call */
1683void cpu_interrupt(CPUState *env, int mask)
1684{
1685#if !defined(USE_NPTL)
1686 TranslationBlock *tb;
1687 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1688#endif
1689 int old_mask;
1690
1691 old_mask = env->interrupt_request;
1692#ifdef VBOX
1693 VM_ASSERT_EMT(env->pVM);
1694 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1695#else /* !VBOX */
1696 /* FIXME: This is probably not threadsafe. A different thread could
1697 be in the middle of a read-modify-write operation. */
1698 env->interrupt_request |= mask;
1699#endif /* !VBOX */
1700#if defined(USE_NPTL)
1701 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1702 problem and hope the cpu will stop of its own accord. For userspace
1703 emulation this often isn't actually as bad as it sounds. Often
1704 signals are used primarily to interrupt blocking syscalls. */
1705#else
1706 if (use_icount) {
1707 env->icount_decr.u16.high = 0xffff;
1708#ifndef CONFIG_USER_ONLY
1709 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1710 an async event happened and we need to process it. */
1711 if (!can_do_io(env)
1712 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1713 cpu_abort(env, "Raised interrupt while not in I/O function");
1714 }
1715#endif
1716 } else {
1717 tb = env->current_tb;
1718 /* if the cpu is currently executing code, we must unlink it and
1719 all the potentially executing TB */
1720 if (tb && !testandset(&interrupt_lock)) {
1721 env->current_tb = NULL;
1722 tb_reset_jump_recursive(tb);
1723 resetlock(&interrupt_lock);
1724 }
1725 }
1726#endif
1727}
1728
1729void cpu_reset_interrupt(CPUState *env, int mask)
1730{
1731#ifdef VBOX
1732 /*
1733 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1734 * for future changes!
1735 */
1736 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1737#else /* !VBOX */
1738 env->interrupt_request &= ~mask;
1739#endif /* !VBOX */
1740}
1741
1742#ifndef VBOX
1743CPULogItem cpu_log_items[] = {
1744 { CPU_LOG_TB_OUT_ASM, "out_asm",
1745 "show generated host assembly code for each compiled TB" },
1746 { CPU_LOG_TB_IN_ASM, "in_asm",
1747 "show target assembly code for each compiled TB" },
1748 { CPU_LOG_TB_OP, "op",
1749 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1750#ifdef TARGET_I386
1751 { CPU_LOG_TB_OP_OPT, "op_opt",
1752 "show micro ops after optimization for each compiled TB" },
1753#endif
1754 { CPU_LOG_INT, "int",
1755 "show interrupts/exceptions in short format" },
1756 { CPU_LOG_EXEC, "exec",
1757 "show trace before each executed TB (lots of logs)" },
1758 { CPU_LOG_TB_CPU, "cpu",
1759 "show CPU state before bloc translation" },
1760#ifdef TARGET_I386
1761 { CPU_LOG_PCALL, "pcall",
1762 "show protected mode far calls/returns/exceptions" },
1763#endif
1764#ifdef DEBUG_IOPORT
1765 { CPU_LOG_IOPORT, "ioport",
1766 "show all i/o ports accesses" },
1767#endif
1768 { 0, NULL, NULL },
1769};
1770
1771static int cmp1(const char *s1, int n, const char *s2)
1772{
1773 if (strlen(s2) != n)
1774 return 0;
1775 return memcmp(s1, s2, n) == 0;
1776}
1777
1778/* takes a comma separated list of log masks. Return 0 if error. */
1779int cpu_str_to_log_mask(const char *str)
1780{
1781 CPULogItem *item;
1782 int mask;
1783 const char *p, *p1;
1784
1785 p = str;
1786 mask = 0;
1787 for(;;) {
1788 p1 = strchr(p, ',');
1789 if (!p1)
1790 p1 = p + strlen(p);
1791 if(cmp1(p,p1-p,"all")) {
1792 for(item = cpu_log_items; item->mask != 0; item++) {
1793 mask |= item->mask;
1794 }
1795 } else {
1796 for(item = cpu_log_items; item->mask != 0; item++) {
1797 if (cmp1(p, p1 - p, item->name))
1798 goto found;
1799 }
1800 return 0;
1801 }
1802 found:
1803 mask |= item->mask;
1804 if (*p1 != ',')
1805 break;
1806 p = p1 + 1;
1807 }
1808 return mask;
1809}
1810#endif /* !VBOX */
1811
1812#ifndef VBOX /* VBOX: we have our own routine. */
1813void cpu_abort(CPUState *env, const char *fmt, ...)
1814{
1815 va_list ap;
1816
1817 va_start(ap, fmt);
1818 fprintf(stderr, "qemu: fatal: ");
1819 vfprintf(stderr, fmt, ap);
1820 fprintf(stderr, "\n");
1821#ifdef TARGET_I386
1822 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1823#else
1824 cpu_dump_state(env, stderr, fprintf, 0);
1825#endif
1826 va_end(ap);
1827 abort();
1828}
1829#endif /* !VBOX */
1830
1831#ifndef VBOX
1832CPUState *cpu_copy(CPUState *env)
1833{
1834 CPUState *new_env = cpu_init(env->cpu_model_str);
1835 /* preserve chaining and index */
1836 CPUState *next_cpu = new_env->next_cpu;
1837 int cpu_index = new_env->cpu_index;
1838 memcpy(new_env, env, sizeof(CPUState));
1839 new_env->next_cpu = next_cpu;
1840 new_env->cpu_index = cpu_index;
1841 return new_env;
1842}
1843#endif
1844
1845#if !defined(CONFIG_USER_ONLY)
1846
1847static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1848{
1849 unsigned int i;
1850
1851 /* Discard jump cache entries for any tb which might potentially
1852 overlap the flushed page. */
1853 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1854 memset (&env->tb_jmp_cache[i], 0,
1855 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1856
1857 i = tb_jmp_cache_hash_page(addr);
1858 memset (&env->tb_jmp_cache[i], 0,
1859 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1860
1861#ifdef VBOX
1862 /* inform raw mode about TLB page flush */
1863 remR3FlushPage(env, addr);
1864#endif /* VBOX */
1865}
1866
1867static CPUTLBEntry s_cputlb_empty_entry = {
1868 .addr_read = -1,
1869 .addr_write = -1,
1870 .addr_code = -1,
1871 .addend = -1,
1872};
1873
1874/* NOTE: if flush_global is true, also flush global entries (not
1875 implemented yet) */
1876void tlb_flush(CPUState *env, int flush_global)
1877{
1878 int i;
1879
1880#if defined(DEBUG_TLB)
1881 printf("tlb_flush:\n");
1882#endif
1883 /* must reset current TB so that interrupts cannot modify the
1884 links while we are modifying them */
1885 env->current_tb = NULL;
1886
1887 for(i = 0; i < CPU_TLB_SIZE; i++) {
1888 int mmu_idx;
1889 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1890 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1891 }
1892 }
1893
1894 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1895
1896#ifdef VBOX
1897 /* inform raw mode about TLB flush */
1898 remR3FlushTLB(env, flush_global);
1899#endif
1900#ifdef USE_KQEMU
1901 if (env->kqemu_enabled) {
1902 kqemu_flush(env, flush_global);
1903 }
1904#endif
1905 tlb_flush_count++;
1906}
1907
1908static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1909{
1910 if (addr == (tlb_entry->addr_read &
1911 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1912 addr == (tlb_entry->addr_write &
1913 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1914 addr == (tlb_entry->addr_code &
1915 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1916 tlb_entry->addr_read = -1;
1917 tlb_entry->addr_write = -1;
1918 tlb_entry->addr_code = -1;
1919 }
1920}
1921
1922void tlb_flush_page(CPUState *env, target_ulong addr)
1923{
1924 int i;
1925
1926#if defined(DEBUG_TLB)
1927 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1928#endif
1929 /* must reset current TB so that interrupts cannot modify the
1930 links while we are modifying them */
1931 env->current_tb = NULL;
1932
1933 addr &= TARGET_PAGE_MASK;
1934 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935 tlb_flush_entry(&env->tlb_table[0][i], addr);
1936 tlb_flush_entry(&env->tlb_table[1][i], addr);
1937#if (NB_MMU_MODES >= 3)
1938 tlb_flush_entry(&env->tlb_table[2][i], addr);
1939#if (NB_MMU_MODES == 4)
1940 tlb_flush_entry(&env->tlb_table[3][i], addr);
1941#endif
1942#endif
1943
1944 tlb_flush_jmp_cache(env, addr);
1945
1946#ifdef USE_KQEMU
1947 if (env->kqemu_enabled) {
1948 kqemu_flush_page(env, addr);
1949 }
1950#endif
1951}
1952
1953/* update the TLBs so that writes to code in the virtual page 'addr'
1954 can be detected */
1955static void tlb_protect_code(ram_addr_t ram_addr)
1956{
1957 cpu_physical_memory_reset_dirty(ram_addr,
1958 ram_addr + TARGET_PAGE_SIZE,
1959 CODE_DIRTY_FLAG);
1960#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
1961 /** @todo Retest this? This function has changed... */
1962 remR3ProtectCode(cpu_single_env, ram_addr);
1963#endif
1964}
1965
1966/* update the TLB so that writes in physical page 'phys_addr' are no longer
1967 tested for self modifying code */
1968static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1969 target_ulong vaddr)
1970{
1971#ifdef VBOX
1972 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
1973#endif
1974 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1975}
1976
1977static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1978 unsigned long start, unsigned long length)
1979{
1980 unsigned long addr;
1981
1982#ifdef VBOX
1983 if (start & 3)
1984 return;
1985#endif
1986 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1987 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1988 if ((addr - start) < length) {
1989 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1990 }
1991 }
1992}
1993
1994void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1995 int dirty_flags)
1996{
1997 CPUState *env;
1998 unsigned long length, start1;
1999 int i, mask, len;
2000 uint8_t *p;
2001
2002 start &= TARGET_PAGE_MASK;
2003 end = TARGET_PAGE_ALIGN(end);
2004
2005 length = end - start;
2006 if (length == 0)
2007 return;
2008 len = length >> TARGET_PAGE_BITS;
2009#ifdef USE_KQEMU
2010 /* XXX: should not depend on cpu context */
2011 env = first_cpu;
2012 if (env->kqemu_enabled) {
2013 ram_addr_t addr;
2014 addr = start;
2015 for(i = 0; i < len; i++) {
2016 kqemu_set_notdirty(env, addr);
2017 addr += TARGET_PAGE_SIZE;
2018 }
2019 }
2020#endif
2021 mask = ~dirty_flags;
2022 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2023#ifdef VBOX
2024 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2025#endif
2026 for(i = 0; i < len; i++)
2027 p[i] &= mask;
2028
2029 /* we modify the TLB cache so that the dirty bit will be set again
2030 when accessing the range */
2031#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2032 start1 = start;
2033#elif !defined(VBOX)
2034 start1 = start + (unsigned long)phys_ram_base;
2035#else
2036 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2037#endif
2038 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2039 for(i = 0; i < CPU_TLB_SIZE; i++)
2040 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2041 for(i = 0; i < CPU_TLB_SIZE; i++)
2042 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2043#if (NB_MMU_MODES >= 3)
2044 for(i = 0; i < CPU_TLB_SIZE; i++)
2045 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2046#if (NB_MMU_MODES == 4)
2047 for(i = 0; i < CPU_TLB_SIZE; i++)
2048 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2049#endif
2050#endif
2051 }
2052}
2053
2054#ifndef VBOX
2055int cpu_physical_memory_set_dirty_tracking(int enable)
2056{
2057 in_migration = enable;
2058 return 0;
2059}
2060
2061int cpu_physical_memory_get_dirty_tracking(void)
2062{
2063 return in_migration;
2064}
2065#endif
2066
2067#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2068DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2069#else
2070static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2071#endif
2072{
2073 ram_addr_t ram_addr;
2074
2075 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2076 /* RAM case */
2077#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2078 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2079#elif !defined(VBOX)
2080 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2081 tlb_entry->addend - (unsigned long)phys_ram_base;
2082#else
2083 Assert(phys_addend != -1);
2084 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2085#endif
2086 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2087 tlb_entry->addr_write |= TLB_NOTDIRTY;
2088 }
2089 }
2090}
2091
2092/* update the TLB according to the current state of the dirty bits */
2093void cpu_tlb_update_dirty(CPUState *env)
2094{
2095 int i;
2096#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2097 for(i = 0; i < CPU_TLB_SIZE; i++)
2098 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2099 for(i = 0; i < CPU_TLB_SIZE; i++)
2100 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2101#if (NB_MMU_MODES >= 3)
2102 for(i = 0; i < CPU_TLB_SIZE; i++)
2103 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2104#if (NB_MMU_MODES == 4)
2105 for(i = 0; i < CPU_TLB_SIZE; i++)
2106 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2107#endif
2108#endif
2109#else /* VBOX */
2110 for(i = 0; i < CPU_TLB_SIZE; i++)
2111 tlb_update_dirty(&env->tlb_table[0][i]);
2112 for(i = 0; i < CPU_TLB_SIZE; i++)
2113 tlb_update_dirty(&env->tlb_table[1][i]);
2114#if (NB_MMU_MODES >= 3)
2115 for(i = 0; i < CPU_TLB_SIZE; i++)
2116 tlb_update_dirty(&env->tlb_table[2][i]);
2117#if (NB_MMU_MODES == 4)
2118 for(i = 0; i < CPU_TLB_SIZE; i++)
2119 tlb_update_dirty(&env->tlb_table[3][i]);
2120#endif
2121#endif
2122#endif /* VBOX */
2123}
2124
2125static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2126{
2127 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2128 tlb_entry->addr_write = vaddr;
2129}
2130
2131
2132/* update the TLB corresponding to virtual page vaddr and phys addr
2133 addr so that it is no longer dirty */
2134static inline void tlb_set_dirty(CPUState *env,
2135 unsigned long addr, target_ulong vaddr)
2136{
2137 int i;
2138
2139 addr &= TARGET_PAGE_MASK;
2140 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2141 tlb_set_dirty1(&env->tlb_table[0][i], addr);
2142 tlb_set_dirty1(&env->tlb_table[1][i], addr);
2143#if (NB_MMU_MODES >= 3)
2144 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2145#if (NB_MMU_MODES == 4)
2146 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2147#endif
2148#endif
2149}
2150
2151/* add a new TLB entry. At most one entry for a given virtual address
2152 is permitted. Return 0 if OK or 2 if the page could not be mapped
2153 (can only happen in non SOFTMMU mode for I/O pages or pages
2154 conflicting with the host address space). */
2155int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2156 target_phys_addr_t paddr, int prot,
2157 int mmu_idx, int is_softmmu)
2158{
2159 PhysPageDesc *p;
2160 unsigned long pd;
2161 unsigned int index;
2162 target_ulong address;
2163 target_ulong code_address;
2164 target_phys_addr_t addend;
2165 int ret;
2166 CPUTLBEntry *te;
2167 int i;
2168 target_phys_addr_t iotlb;
2169#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2170 int read_mods = 0, write_mods = 0, code_mods = 0;
2171#endif
2172
2173 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2174 if (!p) {
2175 pd = IO_MEM_UNASSIGNED;
2176 } else {
2177 pd = p->phys_offset;
2178 }
2179#if defined(DEBUG_TLB)
2180 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2181 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2182#endif
2183
2184 ret = 0;
2185 address = vaddr;
2186 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2187 /* IO memory case (romd handled later) */
2188 address |= TLB_MMIO;
2189 }
2190#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2191 addend = pd & TARGET_PAGE_MASK;
2192#elif !defined(VBOX)
2193 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2194#else
2195 /** @todo this is racing the phys_page_find call above since it may register
2196 * a new chunk of memory... */
2197 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2198 pd & TARGET_PAGE_MASK,
2199 !!(prot & PAGE_WRITE));
2200#endif
2201
2202 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2203 /* Normal RAM. */
2204 iotlb = pd & TARGET_PAGE_MASK;
2205 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2206 iotlb |= IO_MEM_NOTDIRTY;
2207 else
2208 iotlb |= IO_MEM_ROM;
2209 } else {
2210 /* IO handlers are currently passed a phsical address.
2211 It would be nice to pass an offset from the base address
2212 of that region. This would avoid having to special case RAM,
2213 and avoid full address decoding in every device.
2214 We can't use the high bits of pd for this because
2215 IO_MEM_ROMD uses these as a ram address. */
2216 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2217 }
2218
2219 code_address = address;
2220
2221#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2222 if (addend & 0x3)
2223 {
2224 if (addend & 0x2)
2225 {
2226 /* catch write */
2227 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2228 write_mods |= TLB_MMIO;
2229 }
2230 else if (addend & 0x1)
2231 {
2232 /* catch all */
2233 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2234 {
2235 read_mods |= TLB_MMIO;
2236 write_mods |= TLB_MMIO;
2237 code_mods |= TLB_MMIO;
2238 }
2239 }
2240 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2241 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2242 addend &= ~(target_ulong)0x3;
2243 }
2244#endif
2245
2246 /* Make accesses to pages with watchpoints go via the
2247 watchpoint trap routines. */
2248 for (i = 0; i < env->nb_watchpoints; i++) {
2249 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2250 iotlb = io_mem_watch + paddr;
2251 /* TODO: The memory case can be optimized by not trapping
2252 reads of pages with a write breakpoint. */
2253 address |= TLB_MMIO;
2254 }
2255 }
2256
2257 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2258 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2259 te = &env->tlb_table[mmu_idx][index];
2260 te->addend = addend - vaddr;
2261 if (prot & PAGE_READ) {
2262 te->addr_read = address;
2263 } else {
2264 te->addr_read = -1;
2265 }
2266
2267 if (prot & PAGE_EXEC) {
2268 te->addr_code = code_address;
2269 } else {
2270 te->addr_code = -1;
2271 }
2272 if (prot & PAGE_WRITE) {
2273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2274 (pd & IO_MEM_ROMD)) {
2275 /* Write access calls the I/O callback. */
2276 te->addr_write = address | TLB_MMIO;
2277 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2278 !cpu_physical_memory_is_dirty(pd)) {
2279 te->addr_write = address | TLB_NOTDIRTY;
2280 } else {
2281 te->addr_write = address;
2282 }
2283 } else {
2284 te->addr_write = -1;
2285 }
2286
2287#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2288 if (prot & PAGE_READ)
2289 te->addr_read |= read_mods;
2290 if (prot & PAGE_EXEC)
2291 te->addr_code |= code_mods;
2292 if (prot & PAGE_WRITE)
2293 te->addr_write |= write_mods;
2294
2295 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2296#endif
2297
2298#ifdef VBOX
2299 /* inform raw mode about TLB page change */
2300 remR3FlushPage(env, vaddr);
2301#endif
2302 return ret;
2303}
2304#if 0
2305/* called from signal handler: invalidate the code and unprotect the
2306 page. Return TRUE if the fault was successfully handled. */
2307int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
2308{
2309#if !defined(CONFIG_SOFTMMU)
2310 VirtPageDesc *vp;
2311
2312#if defined(DEBUG_TLB)
2313 printf("page_unprotect: addr=0x%08x\n", addr);
2314#endif
2315 addr &= TARGET_PAGE_MASK;
2316
2317 /* if it is not mapped, no need to worry here */
2318 if (addr >= MMAP_AREA_END)
2319 return 0;
2320 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
2321 if (!vp)
2322 return 0;
2323 /* NOTE: in this case, validate_tag is _not_ tested as it
2324 validates only the code TLB */
2325 if (vp->valid_tag != virt_valid_tag)
2326 return 0;
2327 if (!(vp->prot & PAGE_WRITE))
2328 return 0;
2329#if defined(DEBUG_TLB)
2330 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
2331 addr, vp->phys_addr, vp->prot);
2332#endif
2333 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
2334 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
2335 (unsigned long)addr, vp->prot);
2336 /* set the dirty bit */
2337 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
2338 /* flush the code inside */
2339 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
2340 return 1;
2341#elif defined(VBOX)
2342 addr &= TARGET_PAGE_MASK;
2343
2344 /* if it is not mapped, no need to worry here */
2345 if (addr >= MMAP_AREA_END)
2346 return 0;
2347 return 1;
2348#else
2349 return 0;
2350#endif
2351}
2352#endif /* 0 */
2353
2354#else
2355
2356void tlb_flush(CPUState *env, int flush_global)
2357{
2358}
2359
2360void tlb_flush_page(CPUState *env, target_ulong addr)
2361{
2362}
2363
2364int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2365 target_phys_addr_t paddr, int prot,
2366 int mmu_idx, int is_softmmu)
2367{
2368 return 0;
2369}
2370
2371#ifndef VBOX
2372/* dump memory mappings */
2373void page_dump(FILE *f)
2374{
2375 unsigned long start, end;
2376 int i, j, prot, prot1;
2377 PageDesc *p;
2378
2379 fprintf(f, "%-8s %-8s %-8s %s\n",
2380 "start", "end", "size", "prot");
2381 start = -1;
2382 end = -1;
2383 prot = 0;
2384 for(i = 0; i <= L1_SIZE; i++) {
2385 if (i < L1_SIZE)
2386 p = l1_map[i];
2387 else
2388 p = NULL;
2389 for(j = 0;j < L2_SIZE; j++) {
2390 if (!p)
2391 prot1 = 0;
2392 else
2393 prot1 = p[j].flags;
2394 if (prot1 != prot) {
2395 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2396 if (start != -1) {
2397 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2398 start, end, end - start,
2399 prot & PAGE_READ ? 'r' : '-',
2400 prot & PAGE_WRITE ? 'w' : '-',
2401 prot & PAGE_EXEC ? 'x' : '-');
2402 }
2403 if (prot1 != 0)
2404 start = end;
2405 else
2406 start = -1;
2407 prot = prot1;
2408 }
2409 if (!p)
2410 break;
2411 }
2412 }
2413}
2414#endif /* !VBOX */
2415
2416int page_get_flags(target_ulong address)
2417{
2418 PageDesc *p;
2419
2420 p = page_find(address >> TARGET_PAGE_BITS);
2421 if (!p)
2422 return 0;
2423 return p->flags;
2424}
2425
2426/* modify the flags of a page and invalidate the code if
2427 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2428 depending on PAGE_WRITE */
2429void page_set_flags(target_ulong start, target_ulong end, int flags)
2430{
2431 PageDesc *p;
2432 target_ulong addr;
2433
2434 start = start & TARGET_PAGE_MASK;
2435 end = TARGET_PAGE_ALIGN(end);
2436 if (flags & PAGE_WRITE)
2437 flags |= PAGE_WRITE_ORG;
2438#ifdef VBOX
2439 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2440#endif
2441 spin_lock(&tb_lock);
2442 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2443 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2444 /* if the write protection is set, then we invalidate the code
2445 inside */
2446 if (!(p->flags & PAGE_WRITE) &&
2447 (flags & PAGE_WRITE) &&
2448 p->first_tb) {
2449 tb_invalidate_phys_page(addr, 0, NULL);
2450 }
2451 p->flags = flags;
2452 }
2453 spin_unlock(&tb_lock);
2454}
2455
2456int page_check_range(target_ulong start, target_ulong len, int flags)
2457{
2458 PageDesc *p;
2459 target_ulong end;
2460 target_ulong addr;
2461
2462 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2463 start = start & TARGET_PAGE_MASK;
2464
2465 if( end < start )
2466 /* we've wrapped around */
2467 return -1;
2468 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2469 p = page_find(addr >> TARGET_PAGE_BITS);
2470 if( !p )
2471 return -1;
2472 if( !(p->flags & PAGE_VALID) )
2473 return -1;
2474
2475 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2476 return -1;
2477 if (flags & PAGE_WRITE) {
2478 if (!(p->flags & PAGE_WRITE_ORG))
2479 return -1;
2480 /* unprotect the page if it was put read-only because it
2481 contains translated code */
2482 if (!(p->flags & PAGE_WRITE)) {
2483 if (!page_unprotect(addr, 0, NULL))
2484 return -1;
2485 }
2486 return 0;
2487 }
2488 }
2489 return 0;
2490}
2491
2492/* called from signal handler: invalidate the code and unprotect the
2493 page. Return TRUE if the fault was successfully handled. */
2494int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2495{
2496 unsigned int page_index, prot, pindex;
2497 PageDesc *p, *p1;
2498 target_ulong host_start, host_end, addr;
2499
2500 /* Technically this isn't safe inside a signal handler. However we
2501 know this only ever happens in a synchronous SEGV handler, so in
2502 practice it seems to be ok. */
2503 mmap_lock();
2504
2505 host_start = address & qemu_host_page_mask;
2506 page_index = host_start >> TARGET_PAGE_BITS;
2507 p1 = page_find(page_index);
2508 if (!p1) {
2509 mmap_unlock();
2510 return 0;
2511 }
2512 host_end = host_start + qemu_host_page_size;
2513 p = p1;
2514 prot = 0;
2515 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2516 prot |= p->flags;
2517 p++;
2518 }
2519 /* if the page was really writable, then we change its
2520 protection back to writable */
2521 if (prot & PAGE_WRITE_ORG) {
2522 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2523 if (!(p1[pindex].flags & PAGE_WRITE)) {
2524 mprotect((void *)g2h(host_start), qemu_host_page_size,
2525 (prot & PAGE_BITS) | PAGE_WRITE);
2526 p1[pindex].flags |= PAGE_WRITE;
2527 /* and since the content will be modified, we must invalidate
2528 the corresponding translated code. */
2529 tb_invalidate_phys_page(address, pc, puc);
2530#ifdef DEBUG_TB_CHECK
2531 tb_invalidate_check(address);
2532#endif
2533 mmap_unlock();
2534 return 1;
2535 }
2536 }
2537 mmap_unlock();
2538 return 0;
2539}
2540
2541static inline void tlb_set_dirty(CPUState *env,
2542 unsigned long addr, target_ulong vaddr)
2543{
2544}
2545#endif /* defined(CONFIG_USER_ONLY) */
2546
2547#if !defined(CONFIG_USER_ONLY)
2548static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2549 ram_addr_t memory);
2550static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2551 ram_addr_t orig_memory);
2552#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2553 need_subpage) \
2554 do { \
2555 if (addr > start_addr) \
2556 start_addr2 = 0; \
2557 else { \
2558 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2559 if (start_addr2 > 0) \
2560 need_subpage = 1; \
2561 } \
2562 \
2563 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2564 end_addr2 = TARGET_PAGE_SIZE - 1; \
2565 else { \
2566 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2567 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2568 need_subpage = 1; \
2569 } \
2570 } while (0)
2571
2572
2573/* register physical memory. 'size' must be a multiple of the target
2574 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2575 io memory page */
2576void cpu_register_physical_memory(target_phys_addr_t start_addr,
2577 unsigned long size,
2578 unsigned long phys_offset)
2579{
2580 target_phys_addr_t addr, end_addr;
2581 PhysPageDesc *p;
2582 CPUState *env;
2583 ram_addr_t orig_size = size;
2584 void *subpage;
2585
2586#ifdef USE_KQEMU
2587 /* XXX: should not depend on cpu context */
2588 env = first_cpu;
2589 if (env->kqemu_enabled) {
2590 kqemu_set_phys_mem(start_addr, size, phys_offset);
2591 }
2592#endif
2593 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2594 end_addr = start_addr + (target_phys_addr_t)size;
2595 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2596 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2597 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2598 ram_addr_t orig_memory = p->phys_offset;
2599 target_phys_addr_t start_addr2, end_addr2;
2600 int need_subpage = 0;
2601
2602 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2603 need_subpage);
2604 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2605 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2606 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2607 &p->phys_offset, orig_memory);
2608 } else {
2609 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2610 >> IO_MEM_SHIFT];
2611 }
2612 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2613 } else {
2614 p->phys_offset = phys_offset;
2615 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2616 (phys_offset & IO_MEM_ROMD))
2617 phys_offset += TARGET_PAGE_SIZE;
2618 }
2619 } else {
2620 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2621 p->phys_offset = phys_offset;
2622 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2623 (phys_offset & IO_MEM_ROMD))
2624 phys_offset += TARGET_PAGE_SIZE;
2625 else {
2626 target_phys_addr_t start_addr2, end_addr2;
2627 int need_subpage = 0;
2628
2629 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2630 end_addr2, need_subpage);
2631
2632 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2633 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2634 &p->phys_offset, IO_MEM_UNASSIGNED);
2635 subpage_register(subpage, start_addr2, end_addr2,
2636 phys_offset);
2637 }
2638 }
2639 }
2640 }
2641 /* since each CPU stores ram addresses in its TLB cache, we must
2642 reset the modified entries */
2643 /* XXX: slow ! */
2644 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2645 tlb_flush(env, 1);
2646 }
2647}
2648
2649/* XXX: temporary until new memory mapping API */
2650uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2651{
2652 PhysPageDesc *p;
2653
2654 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2655 if (!p)
2656 return IO_MEM_UNASSIGNED;
2657 return p->phys_offset;
2658}
2659
2660#ifndef VBOX
2661/* XXX: better than nothing */
2662ram_addr_t qemu_ram_alloc(ram_addr_t size)
2663{
2664 ram_addr_t addr;
2665 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2666 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2667 (uint64_t)size, (uint64_t)phys_ram_size);
2668 abort();
2669 }
2670 addr = phys_ram_alloc_offset;
2671 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2672 return addr;
2673}
2674
2675void qemu_ram_free(ram_addr_t addr)
2676{
2677}
2678#endif
2679
2680
2681static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2682{
2683#ifdef DEBUG_UNASSIGNED
2684 printf("Unassigned mem read 0x%08x\n", (int)addr);
2685#endif
2686#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2687 do_unassigned_access(addr, 0, 0, 0, 1);
2688#endif
2689 return 0;
2690}
2691
2692static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2693{
2694#ifdef DEBUG_UNASSIGNED
2695 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2696#endif
2697#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2698 do_unassigned_access(addr, 0, 0, 0, 2);
2699#endif
2700 return 0;
2701}
2702
2703static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2704{
2705#ifdef DEBUG_UNASSIGNED
2706 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2707#endif
2708#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2709 do_unassigned_access(addr, 0, 0, 0, 4);
2710#endif
2711 return 0;
2712}
2713
2714static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2715{
2716#ifdef DEBUG_UNASSIGNED
2717 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
2718#endif
2719}
2720
2721static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2722{
2723#ifdef DEBUG_UNASSIGNED
2724 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2725#endif
2726#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2727 do_unassigned_access(addr, 1, 0, 0, 2);
2728#endif
2729}
2730
2731static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2732{
2733#ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2735#endif
2736#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2737 do_unassigned_access(addr, 1, 0, 0, 4);
2738#endif
2739}
2740static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2741 unassigned_mem_readb,
2742 unassigned_mem_readw,
2743 unassigned_mem_readl,
2744};
2745
2746static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2747 unassigned_mem_writeb,
2748 unassigned_mem_writew,
2749 unassigned_mem_writel,
2750};
2751
2752static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2753{
2754 unsigned long ram_addr;
2755 int dirty_flags;
2756#if defined(VBOX)
2757 ram_addr = addr;
2758#else
2759 ram_addr = addr - (unsigned long)phys_ram_base;
2760#endif
2761#ifdef VBOX
2762 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2763 dirty_flags = 0xff;
2764 else
2765#endif /* VBOX */
2766 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2767 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2768#if !defined(CONFIG_USER_ONLY)
2769 tb_invalidate_phys_page_fast(ram_addr, 1);
2770# ifdef VBOX
2771 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2772 dirty_flags = 0xff;
2773 else
2774# endif /* VBOX */
2775 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2776#endif
2777 }
2778#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2779 remR3PhysWriteU8(addr, val);
2780#else
2781 stb_p((uint8_t *)(long)addr, val);
2782#endif
2783#ifdef USE_KQEMU
2784 if (cpu_single_env->kqemu_enabled &&
2785 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2786 kqemu_modify_page(cpu_single_env, ram_addr);
2787#endif
2788 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2789#ifdef VBOX
2790 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2791#endif /* !VBOX */
2792 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2793 /* we remove the notdirty callback only if the code has been
2794 flushed */
2795 if (dirty_flags == 0xff)
2796 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2797}
2798
2799static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2800{
2801 unsigned long ram_addr;
2802 int dirty_flags;
2803#if defined(VBOX)
2804 ram_addr = addr;
2805#else
2806 ram_addr = addr - (unsigned long)phys_ram_base;
2807#endif
2808#ifdef VBOX
2809 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2810 dirty_flags = 0xff;
2811 else
2812#endif /* VBOX */
2813 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2814 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2815#if !defined(CONFIG_USER_ONLY)
2816 tb_invalidate_phys_page_fast(ram_addr, 2);
2817# ifdef VBOX
2818 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2819 dirty_flags = 0xff;
2820 else
2821# endif /* VBOX */
2822 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2823#endif
2824 }
2825#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2826 remR3PhysWriteU16(addr, val);
2827#else
2828 stw_p((uint8_t *)(long)addr, val);
2829#endif
2830
2831#ifdef USE_KQEMU
2832 if (cpu_single_env->kqemu_enabled &&
2833 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2834 kqemu_modify_page(cpu_single_env, ram_addr);
2835#endif
2836 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2837#ifdef VBOX
2838 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2839#endif
2840 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2841 /* we remove the notdirty callback only if the code has been
2842 flushed */
2843 if (dirty_flags == 0xff)
2844 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2845}
2846
2847static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2848{
2849 unsigned long ram_addr;
2850 int dirty_flags;
2851#if defined(VBOX)
2852 ram_addr = addr;
2853#else
2854 ram_addr = addr - (unsigned long)phys_ram_base;
2855#endif
2856#ifdef VBOX
2857 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2858 dirty_flags = 0xff;
2859 else
2860#endif /* VBOX */
2861 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2862 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2863#if !defined(CONFIG_USER_ONLY)
2864 tb_invalidate_phys_page_fast(ram_addr, 4);
2865# ifdef VBOX
2866 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2867 dirty_flags = 0xff;
2868 else
2869# endif /* VBOX */
2870 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2871#endif
2872 }
2873#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2874 remR3PhysWriteU32(addr, val);
2875#else
2876 stl_p((uint8_t *)(long)addr, val);
2877#endif
2878#ifdef USE_KQEMU
2879 if (cpu_single_env->kqemu_enabled &&
2880 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2881 kqemu_modify_page(cpu_single_env, ram_addr);
2882#endif
2883 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2884#ifdef VBOX
2885 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2886#endif
2887 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2888 /* we remove the notdirty callback only if the code has been
2889 flushed */
2890 if (dirty_flags == 0xff)
2891 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_io_vaddr);
2892}
2893
2894static CPUReadMemoryFunc *error_mem_read[3] = {
2895 NULL, /* never used */
2896 NULL, /* never used */
2897 NULL, /* never used */
2898};
2899
2900static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2901 notdirty_mem_writeb,
2902 notdirty_mem_writew,
2903 notdirty_mem_writel,
2904};
2905
2906
2907/* Generate a debug exception if a watchpoint has been hit. */
2908static void check_watchpoint(int offset, int flags)
2909{
2910 CPUState *env = cpu_single_env;
2911 target_ulong vaddr;
2912 int i;
2913
2914 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2915 for (i = 0; i < env->nb_watchpoints; i++) {
2916 if (vaddr == env->watchpoint[i].vaddr
2917 && (env->watchpoint[i].type & flags)) {
2918 env->watchpoint_hit = i + 1;
2919 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2920 break;
2921 }
2922 }
2923}
2924
2925/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2926 so these check for a hit then pass through to the normal out-of-line
2927 phys routines. */
2928static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2929{
2930 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2931 return ldub_phys(addr);
2932}
2933
2934static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2935{
2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2937 return lduw_phys(addr);
2938}
2939
2940static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2941{
2942 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2943 return ldl_phys(addr);
2944}
2945
2946static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2947 uint32_t val)
2948{
2949 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2950 stb_phys(addr, val);
2951}
2952
2953static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2954 uint32_t val)
2955{
2956 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2957 stw_phys(addr, val);
2958}
2959
2960static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2961 uint32_t val)
2962{
2963 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2964 stl_phys(addr, val);
2965}
2966
2967static CPUReadMemoryFunc *watch_mem_read[3] = {
2968 watch_mem_readb,
2969 watch_mem_readw,
2970 watch_mem_readl,
2971};
2972
2973static CPUWriteMemoryFunc *watch_mem_write[3] = {
2974 watch_mem_writeb,
2975 watch_mem_writew,
2976 watch_mem_writel,
2977};
2978
2979static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2980 unsigned int len)
2981{
2982 uint32_t ret;
2983 unsigned int idx;
2984
2985 idx = SUBPAGE_IDX(addr - mmio->base);
2986#if defined(DEBUG_SUBPAGE)
2987 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2988 mmio, len, addr, idx);
2989#endif
2990 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2991
2992 return ret;
2993}
2994
2995static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2996 uint32_t value, unsigned int len)
2997{
2998 unsigned int idx;
2999
3000 idx = SUBPAGE_IDX(addr - mmio->base);
3001#if defined(DEBUG_SUBPAGE)
3002 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3003 mmio, len, addr, idx, value);
3004#endif
3005 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3006}
3007
3008static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3009{
3010#if defined(DEBUG_SUBPAGE)
3011 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3012#endif
3013
3014 return subpage_readlen(opaque, addr, 0);
3015}
3016
3017static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3018 uint32_t value)
3019{
3020#if defined(DEBUG_SUBPAGE)
3021 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3022#endif
3023 subpage_writelen(opaque, addr, value, 0);
3024}
3025
3026static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3027{
3028#if defined(DEBUG_SUBPAGE)
3029 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3030#endif
3031
3032 return subpage_readlen(opaque, addr, 1);
3033}
3034
3035static void subpage_writew (void *opaque, target_phys_addr_t addr,
3036 uint32_t value)
3037{
3038#if defined(DEBUG_SUBPAGE)
3039 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3040#endif
3041 subpage_writelen(opaque, addr, value, 1);
3042}
3043
3044static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3045{
3046#if defined(DEBUG_SUBPAGE)
3047 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3048#endif
3049
3050 return subpage_readlen(opaque, addr, 2);
3051}
3052
3053static void subpage_writel (void *opaque,
3054 target_phys_addr_t addr, uint32_t value)
3055{
3056#if defined(DEBUG_SUBPAGE)
3057 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3058#endif
3059 subpage_writelen(opaque, addr, value, 2);
3060}
3061
3062static CPUReadMemoryFunc *subpage_read[] = {
3063 &subpage_readb,
3064 &subpage_readw,
3065 &subpage_readl,
3066};
3067
3068static CPUWriteMemoryFunc *subpage_write[] = {
3069 &subpage_writeb,
3070 &subpage_writew,
3071 &subpage_writel,
3072};
3073
3074static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3075 ram_addr_t memory)
3076{
3077 int idx, eidx;
3078 unsigned int i;
3079
3080 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3081 return -1;
3082 idx = SUBPAGE_IDX(start);
3083 eidx = SUBPAGE_IDX(end);
3084#if defined(DEBUG_SUBPAGE)
3085 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3086 mmio, start, end, idx, eidx, memory);
3087#endif
3088 memory >>= IO_MEM_SHIFT;
3089 for (; idx <= eidx; idx++) {
3090 for (i = 0; i < 4; i++) {
3091 if (io_mem_read[memory][i]) {
3092 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3093 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3094 }
3095 if (io_mem_write[memory][i]) {
3096 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3097 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3098 }
3099 }
3100 }
3101
3102 return 0;
3103}
3104
3105static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3106 ram_addr_t orig_memory)
3107{
3108 subpage_t *mmio;
3109 int subpage_memory;
3110
3111 mmio = qemu_mallocz(sizeof(subpage_t));
3112 if (mmio != NULL) {
3113 mmio->base = base;
3114 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3115#if defined(DEBUG_SUBPAGE)
3116 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3117 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3118#endif
3119 *phys = subpage_memory | IO_MEM_SUBPAGE;
3120 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3121 }
3122
3123 return mmio;
3124}
3125
3126static void io_mem_init(void)
3127{
3128 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3129 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3130 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3131 io_mem_nb = 5;
3132
3133 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3134 watch_mem_write, NULL);
3135
3136#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3137 /* alloc dirty bits array */
3138 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3139 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3140#endif /* !VBOX */
3141}
3142
3143/* mem_read and mem_write are arrays of functions containing the
3144 function to access byte (index 0), word (index 1) and dword (index
3145 2). Functions can be omitted with a NULL function pointer. The
3146 registered functions may be modified dynamically later.
3147 If io_index is non zero, the corresponding io zone is
3148 modified. If it is zero, a new io zone is allocated. The return
3149 value can be used with cpu_register_physical_memory(). (-1) is
3150 returned if error. */
3151int cpu_register_io_memory(int io_index,
3152 CPUReadMemoryFunc **mem_read,
3153 CPUWriteMemoryFunc **mem_write,
3154 void *opaque)
3155{
3156 int i, subwidth = 0;
3157
3158 if (io_index <= 0) {
3159 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3160 return -1;
3161 io_index = io_mem_nb++;
3162 } else {
3163 if (io_index >= IO_MEM_NB_ENTRIES)
3164 return -1;
3165 }
3166
3167 for(i = 0;i < 3; i++) {
3168 if (!mem_read[i] || !mem_write[i])
3169 subwidth = IO_MEM_SUBWIDTH;
3170 io_mem_read[io_index][i] = mem_read[i];
3171 io_mem_write[io_index][i] = mem_write[i];
3172 }
3173 io_mem_opaque[io_index] = opaque;
3174 return (io_index << IO_MEM_SHIFT) | subwidth;
3175}
3176
3177CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3178{
3179 return io_mem_write[io_index >> IO_MEM_SHIFT];
3180}
3181
3182CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3183{
3184 return io_mem_read[io_index >> IO_MEM_SHIFT];
3185}
3186#endif /* !defined(CONFIG_USER_ONLY) */
3187
3188/* physical memory access (slow version, mainly for debug) */
3189#if defined(CONFIG_USER_ONLY)
3190void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3191 int len, int is_write)
3192{
3193 int l, flags;
3194 target_ulong page;
3195 void * p;
3196
3197 while (len > 0) {
3198 page = addr & TARGET_PAGE_MASK;
3199 l = (page + TARGET_PAGE_SIZE) - addr;
3200 if (l > len)
3201 l = len;
3202 flags = page_get_flags(page);
3203 if (!(flags & PAGE_VALID))
3204 return;
3205 if (is_write) {
3206 if (!(flags & PAGE_WRITE))
3207 return;
3208 /* XXX: this code should not depend on lock_user */
3209 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3210 /* FIXME - should this return an error rather than just fail? */
3211 return;
3212 memcpy(p, buf, len);
3213 unlock_user(p, addr, len);
3214 } else {
3215 if (!(flags & PAGE_READ))
3216 return;
3217 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3218 /* FIXME - should this return an error rather than just fail? */
3219 return;
3220 memcpy(buf, p, len);
3221 unlock_user(p, addr, 0);
3222 }
3223 len -= l;
3224 buf += l;
3225 addr += l;
3226 }
3227}
3228
3229#else
3230void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3231 int len, int is_write)
3232{
3233 int l, io_index;
3234 uint8_t *ptr;
3235 uint32_t val;
3236 target_phys_addr_t page;
3237 unsigned long pd;
3238 PhysPageDesc *p;
3239
3240 while (len > 0) {
3241 page = addr & TARGET_PAGE_MASK;
3242 l = (page + TARGET_PAGE_SIZE) - addr;
3243 if (l > len)
3244 l = len;
3245 p = phys_page_find(page >> TARGET_PAGE_BITS);
3246 if (!p) {
3247 pd = IO_MEM_UNASSIGNED;
3248 } else {
3249 pd = p->phys_offset;
3250 }
3251
3252 if (is_write) {
3253 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3254 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3255 /* XXX: could force cpu_single_env to NULL to avoid
3256 potential bugs */
3257 if (l >= 4 && ((addr & 3) == 0)) {
3258 /* 32 bit write access */
3259#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3260 val = ldl_p(buf);
3261#else
3262 val = *(const uint32_t *)buf;
3263#endif
3264 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3265 l = 4;
3266 } else if (l >= 2 && ((addr & 1) == 0)) {
3267 /* 16 bit write access */
3268#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3269 val = lduw_p(buf);
3270#else
3271 val = *(const uint16_t *)buf;
3272#endif
3273 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3274 l = 2;
3275 } else {
3276 /* 8 bit write access */
3277#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3278 val = ldub_p(buf);
3279#else
3280 val = *(const uint8_t *)buf;
3281#endif
3282 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3283 l = 1;
3284 }
3285 } else {
3286 unsigned long addr1;
3287 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3288 /* RAM case */
3289#ifdef VBOX
3290 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3291#else
3292 ptr = phys_ram_base + addr1;
3293 memcpy(ptr, buf, l);
3294#endif
3295 if (!cpu_physical_memory_is_dirty(addr1)) {
3296 /* invalidate code */
3297 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3298 /* set dirty bit */
3299#ifdef VBOX
3300 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3301#endif
3302 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3303 (0xff & ~CODE_DIRTY_FLAG);
3304 }
3305 }
3306 } else {
3307 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3308 !(pd & IO_MEM_ROMD)) {
3309 /* I/O case */
3310 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3311 if (l >= 4 && ((addr & 3) == 0)) {
3312 /* 32 bit read access */
3313 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3314#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3315 stl_p(buf, val);
3316#else
3317 *(uint32_t *)buf = val;
3318#endif
3319 l = 4;
3320 } else if (l >= 2 && ((addr & 1) == 0)) {
3321 /* 16 bit read access */
3322 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3323#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3324 stw_p(buf, val);
3325#else
3326 *(uint16_t *)buf = val;
3327#endif
3328 l = 2;
3329 } else {
3330 /* 8 bit read access */
3331 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3332#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3333 stb_p(buf, val);
3334#else
3335 *(uint8_t *)buf = val;
3336#endif
3337 l = 1;
3338 }
3339 } else {
3340 /* RAM case */
3341#ifdef VBOX
3342 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3343#else
3344 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3345 (addr & ~TARGET_PAGE_MASK);
3346 memcpy(buf, ptr, l);
3347#endif
3348 }
3349 }
3350 len -= l;
3351 buf += l;
3352 addr += l;
3353 }
3354}
3355
3356#ifndef VBOX
3357/* used for ROM loading : can write in RAM and ROM */
3358void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3359 const uint8_t *buf, int len)
3360{
3361 int l;
3362 uint8_t *ptr;
3363 target_phys_addr_t page;
3364 unsigned long pd;
3365 PhysPageDesc *p;
3366
3367 while (len > 0) {
3368 page = addr & TARGET_PAGE_MASK;
3369 l = (page + TARGET_PAGE_SIZE) - addr;
3370 if (l > len)
3371 l = len;
3372 p = phys_page_find(page >> TARGET_PAGE_BITS);
3373 if (!p) {
3374 pd = IO_MEM_UNASSIGNED;
3375 } else {
3376 pd = p->phys_offset;
3377 }
3378
3379 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3380 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3381 !(pd & IO_MEM_ROMD)) {
3382 /* do nothing */
3383 } else {
3384 unsigned long addr1;
3385 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3386 /* ROM/RAM case */
3387 ptr = phys_ram_base + addr1;
3388 memcpy(ptr, buf, l);
3389 }
3390 len -= l;
3391 buf += l;
3392 addr += l;
3393 }
3394}
3395#endif /* !VBOX */
3396
3397
3398/* warning: addr must be aligned */
3399uint32_t ldl_phys(target_phys_addr_t addr)
3400{
3401 int io_index;
3402 uint8_t *ptr;
3403 uint32_t val;
3404 unsigned long pd;
3405 PhysPageDesc *p;
3406
3407 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3408 if (!p) {
3409 pd = IO_MEM_UNASSIGNED;
3410 } else {
3411 pd = p->phys_offset;
3412 }
3413
3414 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3415 !(pd & IO_MEM_ROMD)) {
3416 /* I/O case */
3417 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3418 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3419 } else {
3420 /* RAM case */
3421#ifndef VBOX
3422 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3423 (addr & ~TARGET_PAGE_MASK);
3424 val = ldl_p(ptr);
3425#else
3426 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3427#endif
3428 }
3429 return val;
3430}
3431
3432/* warning: addr must be aligned */
3433uint64_t ldq_phys(target_phys_addr_t addr)
3434{
3435 int io_index;
3436 uint8_t *ptr;
3437 uint64_t val;
3438 unsigned long pd;
3439 PhysPageDesc *p;
3440
3441 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3442 if (!p) {
3443 pd = IO_MEM_UNASSIGNED;
3444 } else {
3445 pd = p->phys_offset;
3446 }
3447
3448 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3449 !(pd & IO_MEM_ROMD)) {
3450 /* I/O case */
3451 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3452#ifdef TARGET_WORDS_BIGENDIAN
3453 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3454 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3455#else
3456 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3457 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3458#endif
3459 } else {
3460 /* RAM case */
3461#ifndef VBOX
3462 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3463 (addr & ~TARGET_PAGE_MASK);
3464 val = ldq_p(ptr);
3465#else
3466 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3467#endif
3468 }
3469 return val;
3470}
3471
3472/* XXX: optimize */
3473uint32_t ldub_phys(target_phys_addr_t addr)
3474{
3475 uint8_t val;
3476 cpu_physical_memory_read(addr, &val, 1);
3477 return val;
3478}
3479
3480/* XXX: optimize */
3481uint32_t lduw_phys(target_phys_addr_t addr)
3482{
3483 uint16_t val;
3484 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3485 return tswap16(val);
3486}
3487
3488/* warning: addr must be aligned. The ram page is not masked as dirty
3489 and the code inside is not invalidated. It is useful if the dirty
3490 bits are used to track modified PTEs */
3491void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3492{
3493 int io_index;
3494 uint8_t *ptr;
3495 unsigned long pd;
3496 PhysPageDesc *p;
3497
3498 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3499 if (!p) {
3500 pd = IO_MEM_UNASSIGNED;
3501 } else {
3502 pd = p->phys_offset;
3503 }
3504
3505 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3506 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3507 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3508 } else {
3509#ifndef VBOX
3510 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3511 (addr & ~TARGET_PAGE_MASK);
3512 stl_p(ptr, val);
3513#else
3514 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3515#endif
3516#ifndef VBOX
3517 if (unlikely(in_migration)) {
3518 if (!cpu_physical_memory_is_dirty(addr1)) {
3519 /* invalidate code */
3520 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3521 /* set dirty bit */
3522 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3523 (0xff & ~CODE_DIRTY_FLAG);
3524 }
3525 }
3526#endif
3527 }
3528}
3529
3530void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3531{
3532 int io_index;
3533 uint8_t *ptr;
3534 unsigned long pd;
3535 PhysPageDesc *p;
3536
3537 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3538 if (!p) {
3539 pd = IO_MEM_UNASSIGNED;
3540 } else {
3541 pd = p->phys_offset;
3542 }
3543
3544 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3545 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3546#ifdef TARGET_WORDS_BIGENDIAN
3547 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3548 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3549#else
3550 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3551 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3552#endif
3553 } else {
3554#ifndef VBOX
3555 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3556 (addr & ~TARGET_PAGE_MASK);
3557 stq_p(ptr, val);
3558#else
3559 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3560#endif
3561 }
3562}
3563
3564
3565/* warning: addr must be aligned */
3566void stl_phys(target_phys_addr_t addr, uint32_t val)
3567{
3568 int io_index;
3569 uint8_t *ptr;
3570 unsigned long pd;
3571 PhysPageDesc *p;
3572
3573 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3574 if (!p) {
3575 pd = IO_MEM_UNASSIGNED;
3576 } else {
3577 pd = p->phys_offset;
3578 }
3579
3580 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3581 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3582 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3583 } else {
3584 unsigned long addr1;
3585 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3586 /* RAM case */
3587#ifndef VBOX
3588 ptr = phys_ram_base + addr1;
3589 stl_p(ptr, val);
3590#else
3591 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3592#endif
3593 if (!cpu_physical_memory_is_dirty(addr1)) {
3594 /* invalidate code */
3595 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3596 /* set dirty bit */
3597#ifdef VBOX
3598 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3599#endif
3600 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3601 (0xff & ~CODE_DIRTY_FLAG);
3602 }
3603 }
3604}
3605
3606/* XXX: optimize */
3607void stb_phys(target_phys_addr_t addr, uint32_t val)
3608{
3609 uint8_t v = val;
3610 cpu_physical_memory_write(addr, &v, 1);
3611}
3612
3613/* XXX: optimize */
3614void stw_phys(target_phys_addr_t addr, uint32_t val)
3615{
3616 uint16_t v = tswap16(val);
3617 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3618}
3619
3620/* XXX: optimize */
3621void stq_phys(target_phys_addr_t addr, uint64_t val)
3622{
3623 val = tswap64(val);
3624 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3625}
3626
3627#endif
3628
3629/* virtual memory access for debug */
3630int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3631 uint8_t *buf, int len, int is_write)
3632{
3633 int l;
3634 target_ulong page, phys_addr;
3635
3636 while (len > 0) {
3637 page = addr & TARGET_PAGE_MASK;
3638 phys_addr = cpu_get_phys_page_debug(env, page);
3639 /* if no physical page mapped, return an error */
3640 if (phys_addr == -1)
3641 return -1;
3642 l = (page + TARGET_PAGE_SIZE) - addr;
3643 if (l > len)
3644 l = len;
3645 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3646 buf, l, is_write);
3647 len -= l;
3648 buf += l;
3649 addr += l;
3650 }
3651 return 0;
3652}
3653
3654/* in deterministic execution mode, instructions doing device I/Os
3655 must be at the end of the TB */
3656void cpu_io_recompile(CPUState *env, void *retaddr)
3657{
3658 TranslationBlock *tb;
3659 uint32_t n, cflags;
3660 target_ulong pc, cs_base;
3661 uint64_t flags;
3662
3663 tb = tb_find_pc((unsigned long)retaddr);
3664 if (!tb) {
3665 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3666 retaddr);
3667 }
3668 n = env->icount_decr.u16.low + tb->icount;
3669 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3670 /* Calculate how many instructions had been executed before the fault
3671 occurred. */
3672 n = n - env->icount_decr.u16.low;
3673 /* Generate a new TB ending on the I/O insn. */
3674 n++;
3675 /* On MIPS and SH, delay slot instructions can only be restarted if
3676 they were already the first instruction in the TB. If this is not
3677 the first instruction in a TB then re-execute the preceding
3678 branch. */
3679#if defined(TARGET_MIPS)
3680 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3681 env->active_tc.PC -= 4;
3682 env->icount_decr.u16.low++;
3683 env->hflags &= ~MIPS_HFLAG_BMASK;
3684 }
3685#elif defined(TARGET_SH4)
3686 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3687 && n > 1) {
3688 env->pc -= 2;
3689 env->icount_decr.u16.low++;
3690 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3691 }
3692#endif
3693 /* This should never happen. */
3694 if (n > CF_COUNT_MASK)
3695 cpu_abort(env, "TB too big during recompile");
3696
3697 cflags = n | CF_LAST_IO;
3698 pc = tb->pc;
3699 cs_base = tb->cs_base;
3700 flags = tb->flags;
3701 tb_phys_invalidate(tb, -1);
3702 /* FIXME: In theory this could raise an exception. In practice
3703 we have already translated the block once so it's probably ok. */
3704 tb_gen_code(env, pc, cs_base, flags, cflags);
3705 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3706 the first in the TB) then we end up generating a whole new TB and
3707 repeating the fault, which is horribly inefficient.
3708 Better would be to execute just this insn uncached, or generate a
3709 second new TB. */
3710 cpu_resume_from_signal(env, NULL);
3711}
3712
3713#ifndef VBOX
3714void dump_exec_info(FILE *f,
3715 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3716{
3717 int i, target_code_size, max_target_code_size;
3718 int direct_jmp_count, direct_jmp2_count, cross_page;
3719 TranslationBlock *tb;
3720
3721 target_code_size = 0;
3722 max_target_code_size = 0;
3723 cross_page = 0;
3724 direct_jmp_count = 0;
3725 direct_jmp2_count = 0;
3726 for(i = 0; i < nb_tbs; i++) {
3727 tb = &tbs[i];
3728 target_code_size += tb->size;
3729 if (tb->size > max_target_code_size)
3730 max_target_code_size = tb->size;
3731 if (tb->page_addr[1] != -1)
3732 cross_page++;
3733 if (tb->tb_next_offset[0] != 0xffff) {
3734 direct_jmp_count++;
3735 if (tb->tb_next_offset[1] != 0xffff) {
3736 direct_jmp2_count++;
3737 }
3738 }
3739 }
3740 /* XXX: avoid using doubles ? */
3741 cpu_fprintf(f, "Translation buffer state:\n");
3742 cpu_fprintf(f, "gen code size %ld/%ld\n",
3743 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3744 cpu_fprintf(f, "TB count %d/%d\n",
3745 nb_tbs, code_gen_max_blocks);
3746 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3747 nb_tbs ? target_code_size / nb_tbs : 0,
3748 max_target_code_size);
3749 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3750 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3751 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3752 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3753 cross_page,
3754 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3755 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3756 direct_jmp_count,
3757 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3758 direct_jmp2_count,
3759 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3760 cpu_fprintf(f, "\nStatistics:\n");
3761 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3762 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3763 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3764 tcg_dump_info(f, cpu_fprintf);
3765}
3766#endif /* !VBOX */
3767
3768#if !defined(CONFIG_USER_ONLY)
3769
3770#define MMUSUFFIX _cmmu
3771#define GETPC() NULL
3772#define env cpu_single_env
3773#define SOFTMMU_CODE_ACCESS
3774
3775#define SHIFT 0
3776#include "softmmu_template.h"
3777
3778#define SHIFT 1
3779#include "softmmu_template.h"
3780
3781#define SHIFT 2
3782#include "softmmu_template.h"
3783
3784#define SHIFT 3
3785#include "softmmu_template.h"
3786
3787#undef env
3788
3789#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette