VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 36141

最後變更 在這個檔案從36141是 36140,由 vboxsync 提交於 14 年 前

rem: Re-synced to svn://svn.savannah.nongnu.org/qemu/trunk@5495 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • 屬性 svn:eol-style 設為 native
檔案大小: 111.8 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#ifndef VBOX
32#ifdef _WIN32
33#define WIN32_LEAN_AND_MEAN
34#include <windows.h>
35#else
36#include <sys/types.h>
37#include <sys/mman.h>
38#endif
39#include <stdlib.h>
40#include <stdio.h>
41#include <stdarg.h>
42#include <string.h>
43#include <errno.h>
44#include <unistd.h>
45#include <inttypes.h>
46#else /* VBOX */
47# include <stdlib.h>
48# include <stdio.h>
49# include <iprt/alloc.h>
50# include <iprt/string.h>
51# include <iprt/param.h>
52# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
53#endif /* VBOX */
54
55#include "cpu.h"
56#include "exec-all.h"
57#include "qemu-common.h"
58#include "tcg.h"
59#ifndef VBOX
60#include "hw/hw.h"
61#endif
62#include "osdep.h"
63#if defined(CONFIG_USER_ONLY)
64#include <qemu.h>
65#endif
66
67//#define DEBUG_TB_INVALIDATE
68//#define DEBUG_FLUSH
69//#define DEBUG_TLB
70//#define DEBUG_UNASSIGNED
71
72/* make various TB consistency checks */
73//#define DEBUG_TB_CHECK
74//#define DEBUG_TLB_CHECK
75
76//#define DEBUG_IOPORT
77//#define DEBUG_SUBPAGE
78
79#if !defined(CONFIG_USER_ONLY)
80/* TB consistency checks only implemented for usermode emulation. */
81#undef DEBUG_TB_CHECK
82#endif
83
84#define SMC_BITMAP_USE_THRESHOLD 10
85
86#define MMAP_AREA_START 0x00000000
87#define MMAP_AREA_END 0xa8000000
88
89#if defined(TARGET_SPARC64)
90#define TARGET_PHYS_ADDR_SPACE_BITS 41
91#elif defined(TARGET_SPARC)
92#define TARGET_PHYS_ADDR_SPACE_BITS 36
93#elif defined(TARGET_ALPHA)
94#define TARGET_PHYS_ADDR_SPACE_BITS 42
95#define TARGET_VIRT_ADDR_SPACE_BITS 42
96#elif defined(TARGET_PPC64)
97#define TARGET_PHYS_ADDR_SPACE_BITS 42
98#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
99#define TARGET_PHYS_ADDR_SPACE_BITS 42
100#elif defined(TARGET_I386) && !defined(USE_KQEMU)
101#define TARGET_PHYS_ADDR_SPACE_BITS 36
102#else
103/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
104#define TARGET_PHYS_ADDR_SPACE_BITS 32
105#endif
106
107static TranslationBlock *tbs;
108int code_gen_max_blocks;
109TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
110static int nb_tbs;
111/* any access to the tbs or the page table must use this lock */
112spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
113
114#ifndef VBOX
115#if defined(__arm__) || defined(__sparc_v9__)
116/* The prologue must be reachable with a direct jump. ARM and Sparc64
117 have limited branch ranges (possibly also PPC) so place it in a
118 section close to code segment. */
119#define code_gen_section \
120 __attribute__((__section__(".gen_code"))) \
121 __attribute__((aligned (32)))
122#else
123#define code_gen_section \
124 __attribute__((aligned (32)))
125#endif
126
127uint8_t code_gen_prologue[1024] code_gen_section;
128#else /* VBOX */
129extern uint8_t* code_gen_prologue;
130#endif /* VBOX */
131static uint8_t *code_gen_buffer;
132static unsigned long code_gen_buffer_size;
133/* threshold to flush the translated code buffer */
134static unsigned long code_gen_buffer_max_size;
135uint8_t *code_gen_ptr;
136
137#ifndef VBOX
138#if !defined(CONFIG_USER_ONLY)
139ram_addr_t phys_ram_size;
140int phys_ram_fd;
141uint8_t *phys_ram_base;
142uint8_t *phys_ram_dirty;
143static int in_migration;
144static ram_addr_t phys_ram_alloc_offset = 0;
145#endif
146#else /* VBOX */
147RTGCPHYS phys_ram_size;
148/* we have memory ranges (the high PC-BIOS mapping) which
149 causes some pages to fall outside the dirty map here. */
150RTGCPHYS phys_ram_dirty_size;
151#endif /* VBOX */
152#if !defined(VBOX)
153uint8_t *phys_ram_base;
154#endif
155uint8_t *phys_ram_dirty;
156
157CPUState *first_cpu;
158/* current CPU in the current thread. It is only valid inside
159 cpu_exec() */
160CPUState *cpu_single_env;
161/* 0 = Do not count executed instructions.
162 1 = Precise instruction counting.
163 2 = Adaptive rate instruction counting. */
164int use_icount = 0;
165/* Current instruction counter. While executing translated code this may
166 include some instructions that have not yet been executed. */
167int64_t qemu_icount;
168
169typedef struct PageDesc {
170 /* list of TBs intersecting this ram page */
171 TranslationBlock *first_tb;
172 /* in order to optimize self modifying code, we count the number
173 of lookups we do to a given page to use a bitmap */
174 unsigned int code_write_count;
175 uint8_t *code_bitmap;
176#if defined(CONFIG_USER_ONLY)
177 unsigned long flags;
178#endif
179} PageDesc;
180
181typedef struct PhysPageDesc {
182 /* offset in host memory of the page + io_index in the low bits */
183 ram_addr_t phys_offset;
184} PhysPageDesc;
185
186#define L2_BITS 10
187#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
188/* XXX: this is a temporary hack for alpha target.
189 * In the future, this is to be replaced by a multi-level table
190 * to actually be able to handle the complete 64 bits address space.
191 */
192#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
193#else
194#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
195#endif
196#ifdef VBOX
197#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
198#endif
199
200#ifdef VBOX
201#define L0_SIZE (1 << L0_BITS)
202#endif
203#define L1_SIZE (1 << L1_BITS)
204#define L2_SIZE (1 << L2_BITS)
205
206unsigned long qemu_real_host_page_size;
207unsigned long qemu_host_page_bits;
208unsigned long qemu_host_page_size;
209unsigned long qemu_host_page_mask;
210
211/* XXX: for system emulation, it could just be an array */
212#ifndef VBOX
213static PageDesc *l1_map[L1_SIZE];
214static PhysPageDesc **l1_phys_map;
215#else
216static unsigned l0_map_max_used = 0;
217static PageDesc **l0_map[L0_SIZE];
218static void **l0_phys_map[L0_SIZE];
219#endif
220
221#if !defined(CONFIG_USER_ONLY)
222static void io_mem_init(void);
223
224/* io memory support */
225CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
226CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
227void *io_mem_opaque[IO_MEM_NB_ENTRIES];
228static int io_mem_nb;
229static int io_mem_watch;
230#endif
231
232#ifndef VBOX
233/* log support */
234static const char *logfilename = "/tmp/qemu.log";
235#endif /* !VBOX */
236FILE *logfile;
237int loglevel;
238#ifndef VBOX
239static int log_append = 0;
240#endif
241
242/* statistics */
243#ifndef VBOX
244static int tlb_flush_count;
245static int tb_flush_count;
246static int tb_phys_invalidate_count;
247#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
248uint32_t tlb_flush_count;
249uint32_t tb_flush_count;
250uint32_t tb_phys_invalidate_count;
251#endif /* VBOX */
252
253#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
254typedef struct subpage_t {
255 target_phys_addr_t base;
256 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
257 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
258 void *opaque[TARGET_PAGE_SIZE][2][4];
259} subpage_t;
260
261#ifndef VBOX
262#ifdef _WIN32
263static void map_exec(void *addr, long size)
264{
265 DWORD old_protect;
266 VirtualProtect(addr, size,
267 PAGE_EXECUTE_READWRITE, &old_protect);
268
269}
270#else
271static void map_exec(void *addr, long size)
272{
273 unsigned long start, end, page_size;
274
275 page_size = getpagesize();
276 start = (unsigned long)addr;
277 start &= ~(page_size - 1);
278
279 end = (unsigned long)addr + size;
280 end += page_size - 1;
281 end &= ~(page_size - 1);
282
283 mprotect((void *)start, end - start,
284 PROT_READ | PROT_WRITE | PROT_EXEC);
285}
286#endif
287#else /* VBOX */
288static void map_exec(void *addr, long size)
289{
290 RTMemProtect(addr, size,
291 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
292}
293#endif /* VBOX */
294
295static void page_init(void)
296{
297 /* NOTE: we can always suppose that qemu_host_page_size >=
298 TARGET_PAGE_SIZE */
299#ifdef VBOX
300 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
301 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
302 qemu_real_host_page_size = PAGE_SIZE;
303#else /* !VBOX */
304#ifdef _WIN32
305 {
306 SYSTEM_INFO system_info;
307 DWORD old_protect;
308
309 GetSystemInfo(&system_info);
310 qemu_real_host_page_size = system_info.dwPageSize;
311 }
312#else
313 qemu_real_host_page_size = getpagesize();
314#endif
315#endif /* !VBOX */
316
317 if (qemu_host_page_size == 0)
318 qemu_host_page_size = qemu_real_host_page_size;
319 if (qemu_host_page_size < TARGET_PAGE_SIZE)
320 qemu_host_page_size = TARGET_PAGE_SIZE;
321 qemu_host_page_bits = 0;
322#ifndef VBOX
323 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
324#else
325 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
326#endif
327 qemu_host_page_bits++;
328 qemu_host_page_mask = ~(qemu_host_page_size - 1);
329#ifndef VBOX
330 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
331 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
332#endif
333
334#ifdef VBOX
335 /* We use other means to set reserved bit on our pages */
336#else /* !VBOX */
337#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
338 {
339 long long startaddr, endaddr;
340 FILE *f;
341 int n;
342
343 mmap_lock();
344 last_brk = (unsigned long)sbrk(0);
345 f = fopen("/proc/self/maps", "r");
346 if (f) {
347 do {
348 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
349 if (n == 2) {
350 startaddr = MIN(startaddr,
351 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
352 endaddr = MIN(endaddr,
353 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
354 page_set_flags(startaddr & TARGET_PAGE_MASK,
355 TARGET_PAGE_ALIGN(endaddr),
356 PAGE_RESERVED);
357 }
358 } while (!feof(f));
359 fclose(f);
360 }
361 mmap_unlock();
362 }
363#endif
364#endif /* !VBOX */
365}
366
367static inline PageDesc **page_l1_map(target_ulong index)
368{
369#ifndef VBOX
370#if TARGET_LONG_BITS > 32
371 /* Host memory outside guest VM. For 32-bit targets we have already
372 excluded high addresses. */
373 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
374 return NULL;
375#endif
376 return &l1_map[index >> L2_BITS];
377#else /* VBOX */
378 PageDesc **l1_map;
379 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
380 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
381 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
382 NULL);
383 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
384 if (RT_UNLIKELY(!l1_map))
385 {
386 unsigned i0 = index >> (L1_BITS + L2_BITS);
387 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
388 if (RT_UNLIKELY(!l1_map))
389 return NULL;
390 if (i0 >= l0_map_max_used)
391 l0_map_max_used = i0 + 1;
392 }
393 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
394#endif /* VBOX */
395}
396
397static inline PageDesc *page_find_alloc(target_ulong index)
398{
399 PageDesc **lp, *p;
400 lp = page_l1_map(index);
401 if (!lp)
402 return NULL;
403
404 p = *lp;
405 if (!p) {
406 /* allocate if not found */
407#if defined(CONFIG_USER_ONLY)
408 unsigned long addr;
409 size_t len = sizeof(PageDesc) * L2_SIZE;
410 /* Don't use qemu_malloc because it may recurse. */
411 p = mmap(0, len, PROT_READ | PROT_WRITE,
412 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
413 *lp = p;
414 addr = h2g(p);
415 if (addr == (target_ulong)addr) {
416 page_set_flags(addr & TARGET_PAGE_MASK,
417 TARGET_PAGE_ALIGN(addr + len),
418 PAGE_RESERVED);
419 }
420#else
421 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
422 *lp = p;
423#endif
424 }
425 return p + (index & (L2_SIZE - 1));
426}
427
428static inline PageDesc *page_find(target_ulong index)
429{
430 PageDesc **lp, *p;
431 lp = page_l1_map(index);
432 if (!lp)
433 return NULL;
434
435 p = *lp;
436 if (!p)
437 return 0;
438 return p + (index & (L2_SIZE - 1));
439}
440
441static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
442{
443 void **lp, **p;
444 PhysPageDesc *pd;
445
446#ifndef VBOX
447 p = (void **)l1_phys_map;
448#if TARGET_PHYS_ADDR_SPACE_BITS > 32
449
450#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
451#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
452#endif
453 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
454 p = *lp;
455 if (!p) {
456 /* allocate if not found */
457 if (!alloc)
458 return NULL;
459 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
460 memset(p, 0, sizeof(void *) * L1_SIZE);
461 *lp = p;
462 }
463#endif
464#else /* VBOX */
465 /* level 0 lookup and lazy allocation of level 1 map. */
466 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
467 return NULL;
468 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
469 if (RT_UNLIKELY(!p)) {
470 if (!alloc)
471 return NULL;
472 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
473 memset(p, 0, sizeof(void **) * L1_SIZE);
474 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
475 }
476
477 /* level 1 lookup and lazy allocation of level 2 map. */
478#endif /* VBOX */
479 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
480 pd = *lp;
481 if (!pd) {
482 int i;
483 /* allocate if not found */
484 if (!alloc)
485 return NULL;
486 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
487 *lp = pd;
488 for (i = 0; i < L2_SIZE; i++)
489 pd[i].phys_offset = IO_MEM_UNASSIGNED;
490 }
491 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
492}
493
494static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
495{
496 return phys_page_find_alloc(index, 0);
497}
498
499#if !defined(CONFIG_USER_ONLY)
500static void tlb_protect_code(ram_addr_t ram_addr);
501static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
502 target_ulong vaddr);
503#define mmap_lock() do { } while(0)
504#define mmap_unlock() do { } while(0)
505#endif
506
507#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
508 most of the code in raw or hwacc mode. */
509#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
510#else /* !VBOX */
511#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
512#endif /* !VBOX */
513
514#if defined(CONFIG_USER_ONLY)
515/* Currently it is not recommanded to allocate big chunks of data in
516 user mode. It will change when a dedicated libc will be used */
517#define USE_STATIC_CODE_GEN_BUFFER
518#endif
519
520/* VBox allocates codegen buffer dynamically */
521#ifndef VBOX
522#ifdef USE_STATIC_CODE_GEN_BUFFER
523static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
524#endif
525#endif
526
527static void code_gen_alloc(unsigned long tb_size)
528{
529#ifdef USE_STATIC_CODE_GEN_BUFFER
530 code_gen_buffer = static_code_gen_buffer;
531 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
532 map_exec(code_gen_buffer, code_gen_buffer_size);
533#else
534#ifdef VBOX
535 /* We cannot use phys_ram_size here, as it's 0 now,
536 * it only gets initialized once RAM registration callback
537 * (REMR3NotifyPhysRamRegister()) called.
538 */
539 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
540#else
541 code_gen_buffer_size = tb_size;
542 if (code_gen_buffer_size == 0) {
543#if defined(CONFIG_USER_ONLY)
544 /* in user mode, phys_ram_size is not meaningful */
545 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
546#else
547 /* XXX: needs adjustments */
548 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
549#endif
550
551 }
552 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
553 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
554#endif /* VBOX */
555
556 /* The code gen buffer location may have constraints depending on
557 the host cpu and OS */
558#ifdef VBOX
559 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
560
561 if (!code_gen_buffer) {
562 LogRel(("REM: failed allocate codegen buffer %lld\n",
563 code_gen_buffer_size));
564 return;
565 }
566#else //!VBOX
567#if defined(__linux__)
568 {
569 int flags;
570 void *start = NULL;
571
572 flags = MAP_PRIVATE | MAP_ANONYMOUS;
573#if defined(__x86_64__)
574 flags |= MAP_32BIT;
575 /* Cannot map more than that */
576 if (code_gen_buffer_size > (800 * 1024 * 1024))
577 code_gen_buffer_size = (800 * 1024 * 1024);
578#elif defined(__sparc_v9__)
579 // Map the buffer below 2G, so we can use direct calls and branches
580 flags |= MAP_FIXED;
581 start = (void *) 0x60000000UL;
582 if (code_gen_buffer_size > (512 * 1024 * 1024))
583 code_gen_buffer_size = (512 * 1024 * 1024);
584#endif
585 code_gen_buffer = mmap(start, code_gen_buffer_size,
586 PROT_WRITE | PROT_READ | PROT_EXEC,
587 flags, -1, 0);
588 if (code_gen_buffer == MAP_FAILED) {
589 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
590 exit(1);
591 }
592 }
593#elif defined(__FreeBSD__)
594 {
595 int flags;
596 void *addr = NULL;
597 flags = MAP_PRIVATE | MAP_ANONYMOUS;
598#if defined(__x86_64__)
599 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
600 * 0x40000000 is free */
601 flags |= MAP_FIXED;
602 addr = (void *)0x40000000;
603 /* Cannot map more than that */
604 if (code_gen_buffer_size > (800 * 1024 * 1024))
605 code_gen_buffer_size = (800 * 1024 * 1024);
606#endif
607 code_gen_buffer = mmap(addr, code_gen_buffer_size,
608 PROT_WRITE | PROT_READ | PROT_EXEC,
609 flags, -1, 0);
610 if (code_gen_buffer == MAP_FAILED) {
611 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
612 exit(1);
613 }
614 }
615#else
616 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
617 if (!code_gen_buffer) {
618 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
619 exit(1);
620 }
621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
624#endif /* !VBOX */
625#endif /* !USE_STATIC_CODE_GEN_BUFFER */
626#ifndef VBOX
627 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
628#else
629 map_exec(code_gen_prologue, _1K);
630#endif
631
632 code_gen_buffer_max_size = code_gen_buffer_size -
633 code_gen_max_block_size();
634 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
635 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
636}
637
638/* Must be called before using the QEMU cpus. 'tb_size' is the size
639 (in bytes) allocated to the translation buffer. Zero means default
640 size. */
641void cpu_exec_init_all(unsigned long tb_size)
642{
643 cpu_gen_init();
644 code_gen_alloc(tb_size);
645 code_gen_ptr = code_gen_buffer;
646 page_init();
647#if !defined(CONFIG_USER_ONLY)
648 io_mem_init();
649#endif
650}
651
652#ifndef VBOX
653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
655#define CPU_COMMON_SAVE_VERSION 1
656
657static void cpu_common_save(QEMUFile *f, void *opaque)
658{
659 CPUState *env = opaque;
660
661 qemu_put_be32s(f, &env->halted);
662 qemu_put_be32s(f, &env->interrupt_request);
663}
664
665static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
666{
667 CPUState *env = opaque;
668
669 if (version_id != CPU_COMMON_SAVE_VERSION)
670 return -EINVAL;
671
672 qemu_get_be32s(f, &env->halted);
673 qemu_get_be32s(f, &env->interrupt_request);
674 tlb_flush(env, 1);
675
676 return 0;
677}
678#endif
679#endif //!VBOX
680
681void cpu_exec_init(CPUState *env)
682{
683 CPUState **penv;
684 int cpu_index;
685
686 env->next_cpu = NULL;
687 penv = &first_cpu;
688 cpu_index = 0;
689 while (*penv != NULL) {
690 penv = (CPUState **)&(*penv)->next_cpu;
691 cpu_index++;
692 }
693 env->cpu_index = cpu_index;
694 env->nb_watchpoints = 0;
695 *penv = env;
696#ifndef VBOX
697#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
698 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
699 cpu_common_save, cpu_common_load, env);
700 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
701 cpu_save, cpu_load, env);
702#endif
703#endif // !VBOX
704}
705
706static inline void invalidate_page_bitmap(PageDesc *p)
707{
708 if (p->code_bitmap) {
709 qemu_free(p->code_bitmap);
710 p->code_bitmap = NULL;
711 }
712 p->code_write_count = 0;
713}
714
715/* set to NULL all the 'first_tb' fields in all PageDescs */
716static void page_flush_tb(void)
717{
718 int i, j;
719 PageDesc *p;
720#ifdef VBOX
721 int k;
722#endif
723
724#ifdef VBOX
725 k = l0_map_max_used;
726 while (k-- > 0) {
727 PageDesc **l1_map = l0_map[k];
728 if (l1_map) {
729#endif
730 for(i = 0; i < L1_SIZE; i++) {
731 p = l1_map[i];
732 if (p) {
733 for(j = 0; j < L2_SIZE; j++) {
734 p->first_tb = NULL;
735 invalidate_page_bitmap(p);
736 p++;
737 }
738 }
739 }
740#ifdef VBOX
741 }
742 }
743#endif
744}
745
746/* flush all the translation blocks */
747/* XXX: tb_flush is currently not thread safe */
748void tb_flush(CPUState *env1)
749{
750 CPUState *env;
751#ifdef VBOX
752 STAM_PROFILE_START(&env1->StatTbFlush, a);
753#endif
754#if defined(DEBUG_FLUSH)
755 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
756 (unsigned long)(code_gen_ptr - code_gen_buffer),
757 nb_tbs, nb_tbs > 0 ?
758 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
759#endif
760 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
761 cpu_abort(env1, "Internal error: code buffer overflow\n");
762
763 nb_tbs = 0;
764
765 for(env = first_cpu; env != NULL; env = env->next_cpu) {
766 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
767 }
768
769 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
770 page_flush_tb();
771
772 code_gen_ptr = code_gen_buffer;
773 /* XXX: flush processor icache at this point if cache flush is
774 expensive */
775 tb_flush_count++;
776#ifdef VBOX
777 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
778#endif
779}
780
781#ifdef DEBUG_TB_CHECK
782static void tb_invalidate_check(target_ulong address)
783{
784 TranslationBlock *tb;
785 int i;
786 address &= TARGET_PAGE_MASK;
787 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
788 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
789 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
790 address >= tb->pc + tb->size)) {
791 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
792 address, (long)tb->pc, tb->size);
793 }
794 }
795 }
796}
797
798/* verify that all the pages have correct rights for code */
799static void tb_page_check(void)
800{
801 TranslationBlock *tb;
802 int i, flags1, flags2;
803
804 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
805 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
806 flags1 = page_get_flags(tb->pc);
807 flags2 = page_get_flags(tb->pc + tb->size - 1);
808 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
809 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
810 (long)tb->pc, tb->size, flags1, flags2);
811 }
812 }
813 }
814}
815
816static void tb_jmp_check(TranslationBlock *tb)
817{
818 TranslationBlock *tb1;
819 unsigned int n1;
820
821 /* suppress any remaining jumps to this TB */
822 tb1 = tb->jmp_first;
823 for(;;) {
824 n1 = (long)tb1 & 3;
825 tb1 = (TranslationBlock *)((long)tb1 & ~3);
826 if (n1 == 2)
827 break;
828 tb1 = tb1->jmp_next[n1];
829 }
830 /* check end of list */
831 if (tb1 != tb) {
832 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
833 }
834}
835
836#endif
837
838/* invalidate one TB */
839static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
840 int next_offset)
841{
842 TranslationBlock *tb1;
843 for(;;) {
844 tb1 = *ptb;
845 if (tb1 == tb) {
846 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
847 break;
848 }
849 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
850 }
851}
852
853static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
854{
855 TranslationBlock *tb1;
856 unsigned int n1;
857
858 for(;;) {
859 tb1 = *ptb;
860 n1 = (long)tb1 & 3;
861 tb1 = (TranslationBlock *)((long)tb1 & ~3);
862 if (tb1 == tb) {
863 *ptb = tb1->page_next[n1];
864 break;
865 }
866 ptb = &tb1->page_next[n1];
867 }
868}
869
870static inline void tb_jmp_remove(TranslationBlock *tb, int n)
871{
872 TranslationBlock *tb1, **ptb;
873 unsigned int n1;
874
875 ptb = &tb->jmp_next[n];
876 tb1 = *ptb;
877 if (tb1) {
878 /* find tb(n) in circular list */
879 for(;;) {
880 tb1 = *ptb;
881 n1 = (long)tb1 & 3;
882 tb1 = (TranslationBlock *)((long)tb1 & ~3);
883 if (n1 == n && tb1 == tb)
884 break;
885 if (n1 == 2) {
886 ptb = &tb1->jmp_first;
887 } else {
888 ptb = &tb1->jmp_next[n1];
889 }
890 }
891 /* now we can suppress tb(n) from the list */
892 *ptb = tb->jmp_next[n];
893
894 tb->jmp_next[n] = NULL;
895 }
896}
897
898/* reset the jump entry 'n' of a TB so that it is not chained to
899 another TB */
900static inline void tb_reset_jump(TranslationBlock *tb, int n)
901{
902 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
903}
904
905void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
906{
907 CPUState *env;
908 PageDesc *p;
909 unsigned int h, n1;
910 target_phys_addr_t phys_pc;
911 TranslationBlock *tb1, *tb2;
912
913 /* remove the TB from the hash list */
914 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
915 h = tb_phys_hash_func(phys_pc);
916 tb_remove(&tb_phys_hash[h], tb,
917 offsetof(TranslationBlock, phys_hash_next));
918
919 /* remove the TB from the page list */
920 if (tb->page_addr[0] != page_addr) {
921 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
922 tb_page_remove(&p->first_tb, tb);
923 invalidate_page_bitmap(p);
924 }
925 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
926 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
927 tb_page_remove(&p->first_tb, tb);
928 invalidate_page_bitmap(p);
929 }
930
931 tb_invalidated_flag = 1;
932
933 /* remove the TB from the hash list */
934 h = tb_jmp_cache_hash_func(tb->pc);
935 for(env = first_cpu; env != NULL; env = env->next_cpu) {
936 if (env->tb_jmp_cache[h] == tb)
937 env->tb_jmp_cache[h] = NULL;
938 }
939
940 /* suppress this TB from the two jump lists */
941 tb_jmp_remove(tb, 0);
942 tb_jmp_remove(tb, 1);
943
944 /* suppress any remaining jumps to this TB */
945 tb1 = tb->jmp_first;
946 for(;;) {
947 n1 = (long)tb1 & 3;
948 if (n1 == 2)
949 break;
950 tb1 = (TranslationBlock *)((long)tb1 & ~3);
951 tb2 = tb1->jmp_next[n1];
952 tb_reset_jump(tb1, n1);
953 tb1->jmp_next[n1] = NULL;
954 tb1 = tb2;
955 }
956 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
957
958 tb_phys_invalidate_count++;
959}
960
961
962#ifdef VBOX
963
964void tb_invalidate_virt(CPUState *env, uint32_t eip)
965{
966# if 1
967 tb_flush(env);
968# else
969 uint8_t *cs_base, *pc;
970 unsigned int flags, h, phys_pc;
971 TranslationBlock *tb, **ptb;
972
973 flags = env->hflags;
974 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
975 cs_base = env->segs[R_CS].base;
976 pc = cs_base + eip;
977
978 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
979 flags);
980
981 if(tb)
982 {
983# ifdef DEBUG
984 printf("invalidating TB (%08X) at %08X\n", tb, eip);
985# endif
986 tb_invalidate(tb);
987 //Note: this will leak TBs, but the whole cache will be flushed
988 // when it happens too often
989 tb->pc = 0;
990 tb->cs_base = 0;
991 tb->flags = 0;
992 }
993# endif
994}
995
996# ifdef VBOX_STRICT
997/**
998 * Gets the page offset.
999 */
1000unsigned long get_phys_page_offset(target_ulong addr)
1001{
1002 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1003 return p ? p->phys_offset : 0;
1004}
1005# endif /* VBOX_STRICT */
1006
1007#endif /* VBOX */
1008
1009static inline void set_bits(uint8_t *tab, int start, int len)
1010{
1011 int end, mask, end1;
1012
1013 end = start + len;
1014 tab += start >> 3;
1015 mask = 0xff << (start & 7);
1016 if ((start & ~7) == (end & ~7)) {
1017 if (start < end) {
1018 mask &= ~(0xff << (end & 7));
1019 *tab |= mask;
1020 }
1021 } else {
1022 *tab++ |= mask;
1023 start = (start + 8) & ~7;
1024 end1 = end & ~7;
1025 while (start < end1) {
1026 *tab++ = 0xff;
1027 start += 8;
1028 }
1029 if (start < end) {
1030 mask = ~(0xff << (end & 7));
1031 *tab |= mask;
1032 }
1033 }
1034}
1035
1036static void build_page_bitmap(PageDesc *p)
1037{
1038 int n, tb_start, tb_end;
1039 TranslationBlock *tb;
1040
1041 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1042 if (!p->code_bitmap)
1043 return;
1044
1045 tb = p->first_tb;
1046 while (tb != NULL) {
1047 n = (long)tb & 3;
1048 tb = (TranslationBlock *)((long)tb & ~3);
1049 /* NOTE: this is subtle as a TB may span two physical pages */
1050 if (n == 0) {
1051 /* NOTE: tb_end may be after the end of the page, but
1052 it is not a problem */
1053 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1054 tb_end = tb_start + tb->size;
1055 if (tb_end > TARGET_PAGE_SIZE)
1056 tb_end = TARGET_PAGE_SIZE;
1057 } else {
1058 tb_start = 0;
1059 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1060 }
1061 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1062 tb = tb->page_next[n];
1063 }
1064}
1065
1066TranslationBlock *tb_gen_code(CPUState *env,
1067 target_ulong pc, target_ulong cs_base,
1068 int flags, int cflags)
1069{
1070 TranslationBlock *tb;
1071 uint8_t *tc_ptr;
1072 target_ulong phys_pc, phys_page2, virt_page2;
1073 int code_gen_size;
1074
1075 phys_pc = get_phys_addr_code(env, pc);
1076 tb = tb_alloc(pc);
1077 if (!tb) {
1078 /* flush must be done */
1079 tb_flush(env);
1080 /* cannot fail at this point */
1081 tb = tb_alloc(pc);
1082 /* Don't forget to invalidate previous TB info. */
1083 tb_invalidated_flag = 1;
1084 }
1085 tc_ptr = code_gen_ptr;
1086 tb->tc_ptr = tc_ptr;
1087 tb->cs_base = cs_base;
1088 tb->flags = flags;
1089 tb->cflags = cflags;
1090 cpu_gen_code(env, tb, &code_gen_size);
1091 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1092
1093 /* check next page if needed */
1094 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1095 phys_page2 = -1;
1096 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1097 phys_page2 = get_phys_addr_code(env, virt_page2);
1098 }
1099 tb_link_phys(tb, phys_pc, phys_page2);
1100 return tb;
1101}
1102
1103/* invalidate all TBs which intersect with the target physical page
1104 starting in range [start;end[. NOTE: start and end must refer to
1105 the same physical page. 'is_cpu_write_access' should be true if called
1106 from a real cpu write access: the virtual CPU will exit the current
1107 TB if code is modified inside this TB. */
1108void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1109 int is_cpu_write_access)
1110{
1111 int n, current_tb_modified, current_tb_not_found, current_flags;
1112 CPUState *env = cpu_single_env;
1113 PageDesc *p;
1114 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
1115 target_ulong tb_start, tb_end;
1116 target_ulong current_pc, current_cs_base;
1117
1118 p = page_find(start >> TARGET_PAGE_BITS);
1119 if (!p)
1120 return;
1121 if (!p->code_bitmap &&
1122 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1123 is_cpu_write_access) {
1124 /* build code bitmap */
1125 build_page_bitmap(p);
1126 }
1127
1128 /* we remove all the TBs in the range [start, end[ */
1129 /* XXX: see if in some cases it could be faster to invalidate all the code */
1130 current_tb_not_found = is_cpu_write_access;
1131 current_tb_modified = 0;
1132 current_tb = NULL; /* avoid warning */
1133 current_pc = 0; /* avoid warning */
1134 current_cs_base = 0; /* avoid warning */
1135 current_flags = 0; /* avoid warning */
1136 tb = p->first_tb;
1137 while (tb != NULL) {
1138 n = (long)tb & 3;
1139 tb = (TranslationBlock *)((long)tb & ~3);
1140 tb_next = tb->page_next[n];
1141 /* NOTE: this is subtle as a TB may span two physical pages */
1142 if (n == 0) {
1143 /* NOTE: tb_end may be after the end of the page, but
1144 it is not a problem */
1145 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1146 tb_end = tb_start + tb->size;
1147 } else {
1148 tb_start = tb->page_addr[1];
1149 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1150 }
1151 if (!(tb_end <= start || tb_start >= end)) {
1152#ifdef TARGET_HAS_PRECISE_SMC
1153 if (current_tb_not_found) {
1154 current_tb_not_found = 0;
1155 current_tb = NULL;
1156 if (env->mem_io_pc) {
1157 /* now we have a real cpu fault */
1158 current_tb = tb_find_pc(env->mem_io_pc);
1159 }
1160 }
1161 if (current_tb == tb &&
1162 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1163 /* If we are modifying the current TB, we must stop
1164 its execution. We could be more precise by checking
1165 that the modification is after the current PC, but it
1166 would require a specialized function to partially
1167 restore the CPU state */
1168
1169 current_tb_modified = 1;
1170 cpu_restore_state(current_tb, env,
1171 env->mem_io_pc, NULL);
1172#if defined(TARGET_I386)
1173 current_flags = env->hflags;
1174 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1175 current_cs_base = (target_ulong)env->segs[R_CS].base;
1176 current_pc = current_cs_base + env->eip;
1177#else
1178#error unsupported CPU
1179#endif
1180 }
1181#endif /* TARGET_HAS_PRECISE_SMC */
1182 /* we need to do that to handle the case where a signal
1183 occurs while doing tb_phys_invalidate() */
1184 saved_tb = NULL;
1185 if (env) {
1186 saved_tb = env->current_tb;
1187 env->current_tb = NULL;
1188 }
1189 tb_phys_invalidate(tb, -1);
1190 if (env) {
1191 env->current_tb = saved_tb;
1192 if (env->interrupt_request && env->current_tb)
1193 cpu_interrupt(env, env->interrupt_request);
1194 }
1195 }
1196 tb = tb_next;
1197 }
1198#if !defined(CONFIG_USER_ONLY)
1199 /* if no code remaining, no need to continue to use slow writes */
1200 if (!p->first_tb) {
1201 invalidate_page_bitmap(p);
1202 if (is_cpu_write_access) {
1203 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1204 }
1205 }
1206#endif
1207#ifdef TARGET_HAS_PRECISE_SMC
1208 if (current_tb_modified) {
1209 /* we generate a block containing just the instruction
1210 modifying the memory. It will ensure that it cannot modify
1211 itself */
1212 env->current_tb = NULL;
1213 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1214 cpu_resume_from_signal(env, NULL);
1215 }
1216#endif
1217}
1218
1219
1220/* len must be <= 8 and start must be a multiple of len */
1221static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1222{
1223 PageDesc *p;
1224 int offset, b;
1225#if 0
1226 if (1) {
1227 if (loglevel) {
1228 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1229 cpu_single_env->mem_io_vaddr, len,
1230 cpu_single_env->eip,
1231 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1232 }
1233 }
1234#endif
1235 p = page_find(start >> TARGET_PAGE_BITS);
1236 if (!p)
1237 return;
1238 if (p->code_bitmap) {
1239 offset = start & ~TARGET_PAGE_MASK;
1240 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1241 if (b & ((1 << len) - 1))
1242 goto do_invalidate;
1243 } else {
1244 do_invalidate:
1245 tb_invalidate_phys_page_range(start, start + len, 1);
1246 }
1247}
1248
1249
1250#if !defined(CONFIG_SOFTMMU)
1251static void tb_invalidate_phys_page(target_phys_addr_t addr,
1252 unsigned long pc, void *puc)
1253{
1254 int n, current_flags, current_tb_modified;
1255 target_ulong current_pc, current_cs_base;
1256 PageDesc *p;
1257 TranslationBlock *tb, *current_tb;
1258#ifdef TARGET_HAS_PRECISE_SMC
1259 CPUState *env = cpu_single_env;
1260#endif
1261
1262 addr &= TARGET_PAGE_MASK;
1263 p = page_find(addr >> TARGET_PAGE_BITS);
1264 if (!p)
1265 return;
1266 tb = p->first_tb;
1267 current_tb_modified = 0;
1268 current_tb = NULL;
1269 current_pc = 0; /* avoid warning */
1270 current_cs_base = 0; /* avoid warning */
1271 current_flags = 0; /* avoid warning */
1272#ifdef TARGET_HAS_PRECISE_SMC
1273 if (tb && pc != 0) {
1274 current_tb = tb_find_pc(pc);
1275 }
1276#endif
1277 while (tb != NULL) {
1278 n = (long)tb & 3;
1279 tb = (TranslationBlock *)((long)tb & ~3);
1280#ifdef TARGET_HAS_PRECISE_SMC
1281 if (current_tb == tb &&
1282 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1283 /* If we are modifying the current TB, we must stop
1284 its execution. We could be more precise by checking
1285 that the modification is after the current PC, but it
1286 would require a specialized function to partially
1287 restore the CPU state */
1288
1289 current_tb_modified = 1;
1290 cpu_restore_state(current_tb, env, pc, puc);
1291#if defined(TARGET_I386)
1292 current_flags = env->hflags;
1293 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1294 current_cs_base = (target_ulong)env->segs[R_CS].base;
1295 current_pc = current_cs_base + env->eip;
1296#else
1297#error unsupported CPU
1298#endif
1299 }
1300#endif /* TARGET_HAS_PRECISE_SMC */
1301 tb_phys_invalidate(tb, addr);
1302 tb = tb->page_next[n];
1303 }
1304 p->first_tb = NULL;
1305#ifdef TARGET_HAS_PRECISE_SMC
1306 if (current_tb_modified) {
1307 /* we generate a block containing just the instruction
1308 modifying the memory. It will ensure that it cannot modify
1309 itself */
1310 env->current_tb = NULL;
1311 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1312 cpu_resume_from_signal(env, puc);
1313 }
1314#endif
1315}
1316#endif
1317
1318/* add the tb in the target page and protect it if necessary */
1319static inline void tb_alloc_page(TranslationBlock *tb,
1320 unsigned int n, target_ulong page_addr)
1321{
1322 PageDesc *p;
1323 TranslationBlock *last_first_tb;
1324
1325 tb->page_addr[n] = page_addr;
1326 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1327 tb->page_next[n] = p->first_tb;
1328 last_first_tb = p->first_tb;
1329 p->first_tb = (TranslationBlock *)((long)tb | n);
1330 invalidate_page_bitmap(p);
1331
1332#if defined(TARGET_HAS_SMC) || 1
1333
1334#if defined(CONFIG_USER_ONLY)
1335 if (p->flags & PAGE_WRITE) {
1336 target_ulong addr;
1337 PageDesc *p2;
1338 int prot;
1339
1340 /* force the host page as non writable (writes will have a
1341 page fault + mprotect overhead) */
1342 page_addr &= qemu_host_page_mask;
1343 prot = 0;
1344 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1345 addr += TARGET_PAGE_SIZE) {
1346
1347 p2 = page_find (addr >> TARGET_PAGE_BITS);
1348 if (!p2)
1349 continue;
1350 prot |= p2->flags;
1351 p2->flags &= ~PAGE_WRITE;
1352 page_get_flags(addr);
1353 }
1354 mprotect(g2h(page_addr), qemu_host_page_size,
1355 (prot & PAGE_BITS) & ~PAGE_WRITE);
1356#ifdef DEBUG_TB_INVALIDATE
1357 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1358 page_addr);
1359#endif
1360 }
1361#else
1362 /* if some code is already present, then the pages are already
1363 protected. So we handle the case where only the first TB is
1364 allocated in a physical page */
1365 if (!last_first_tb) {
1366 tlb_protect_code(page_addr);
1367 }
1368#endif
1369
1370#endif /* TARGET_HAS_SMC */
1371}
1372
1373/* Allocate a new translation block. Flush the translation buffer if
1374 too many translation blocks or too much generated code. */
1375TranslationBlock *tb_alloc(target_ulong pc)
1376{
1377 TranslationBlock *tb;
1378
1379 if (nb_tbs >= code_gen_max_blocks ||
1380#ifndef VBOX
1381 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1382#else
1383 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1384#endif
1385 return NULL;
1386 tb = &tbs[nb_tbs++];
1387 tb->pc = pc;
1388 tb->cflags = 0;
1389 return tb;
1390}
1391
1392void tb_free(TranslationBlock *tb)
1393{
1394 /* In practice this is mostly used for single use temporary TB
1395 Ignore the hard cases and just back up if this TB happens to
1396 be the last one generated. */
1397 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1398 code_gen_ptr = tb->tc_ptr;
1399 nb_tbs--;
1400 }
1401}
1402
1403/* add a new TB and link it to the physical page tables. phys_page2 is
1404 (-1) to indicate that only one page contains the TB. */
1405void tb_link_phys(TranslationBlock *tb,
1406 target_ulong phys_pc, target_ulong phys_page2)
1407{
1408 unsigned int h;
1409 TranslationBlock **ptb;
1410
1411 /* Grab the mmap lock to stop another thread invalidating this TB
1412 before we are done. */
1413 mmap_lock();
1414 /* add in the physical hash table */
1415 h = tb_phys_hash_func(phys_pc);
1416 ptb = &tb_phys_hash[h];
1417 tb->phys_hash_next = *ptb;
1418 *ptb = tb;
1419
1420 /* add in the page list */
1421 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1422 if (phys_page2 != -1)
1423 tb_alloc_page(tb, 1, phys_page2);
1424 else
1425 tb->page_addr[1] = -1;
1426
1427 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1428 tb->jmp_next[0] = NULL;
1429 tb->jmp_next[1] = NULL;
1430
1431 /* init original jump addresses */
1432 if (tb->tb_next_offset[0] != 0xffff)
1433 tb_reset_jump(tb, 0);
1434 if (tb->tb_next_offset[1] != 0xffff)
1435 tb_reset_jump(tb, 1);
1436
1437#ifdef DEBUG_TB_CHECK
1438 tb_page_check();
1439#endif
1440 mmap_unlock();
1441}
1442
1443/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1444 tb[1].tc_ptr. Return NULL if not found */
1445TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1446{
1447 int m_min, m_max, m;
1448 unsigned long v;
1449 TranslationBlock *tb;
1450
1451 if (nb_tbs <= 0)
1452 return NULL;
1453 if (tc_ptr < (unsigned long)code_gen_buffer ||
1454 tc_ptr >= (unsigned long)code_gen_ptr)
1455 return NULL;
1456 /* binary search (cf Knuth) */
1457 m_min = 0;
1458 m_max = nb_tbs - 1;
1459 while (m_min <= m_max) {
1460 m = (m_min + m_max) >> 1;
1461 tb = &tbs[m];
1462 v = (unsigned long)tb->tc_ptr;
1463 if (v == tc_ptr)
1464 return tb;
1465 else if (tc_ptr < v) {
1466 m_max = m - 1;
1467 } else {
1468 m_min = m + 1;
1469 }
1470 }
1471 return &tbs[m_max];
1472}
1473
1474static void tb_reset_jump_recursive(TranslationBlock *tb);
1475
1476static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1477{
1478 TranslationBlock *tb1, *tb_next, **ptb;
1479 unsigned int n1;
1480
1481 tb1 = tb->jmp_next[n];
1482 if (tb1 != NULL) {
1483 /* find head of list */
1484 for(;;) {
1485 n1 = (long)tb1 & 3;
1486 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1487 if (n1 == 2)
1488 break;
1489 tb1 = tb1->jmp_next[n1];
1490 }
1491 /* we are now sure now that tb jumps to tb1 */
1492 tb_next = tb1;
1493
1494 /* remove tb from the jmp_first list */
1495 ptb = &tb_next->jmp_first;
1496 for(;;) {
1497 tb1 = *ptb;
1498 n1 = (long)tb1 & 3;
1499 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1500 if (n1 == n && tb1 == tb)
1501 break;
1502 ptb = &tb1->jmp_next[n1];
1503 }
1504 *ptb = tb->jmp_next[n];
1505 tb->jmp_next[n] = NULL;
1506
1507 /* suppress the jump to next tb in generated code */
1508 tb_reset_jump(tb, n);
1509
1510 /* suppress jumps in the tb on which we could have jumped */
1511 tb_reset_jump_recursive(tb_next);
1512 }
1513}
1514
1515static void tb_reset_jump_recursive(TranslationBlock *tb)
1516{
1517 tb_reset_jump_recursive2(tb, 0);
1518 tb_reset_jump_recursive2(tb, 1);
1519}
1520
1521#if defined(TARGET_HAS_ICE)
1522static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1523{
1524 target_phys_addr_t addr;
1525 target_ulong pd;
1526 ram_addr_t ram_addr;
1527 PhysPageDesc *p;
1528
1529 addr = cpu_get_phys_page_debug(env, pc);
1530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1531 if (!p) {
1532 pd = IO_MEM_UNASSIGNED;
1533 } else {
1534 pd = p->phys_offset;
1535 }
1536 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1537 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1538}
1539#endif
1540
1541/* Add a watchpoint. */
1542int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1543{
1544 int i;
1545
1546 for (i = 0; i < env->nb_watchpoints; i++) {
1547 if (addr == env->watchpoint[i].vaddr)
1548 return 0;
1549 }
1550 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1551 return -1;
1552
1553 i = env->nb_watchpoints++;
1554 env->watchpoint[i].vaddr = addr;
1555 env->watchpoint[i].type = type;
1556 tlb_flush_page(env, addr);
1557 /* FIXME: This flush is needed because of the hack to make memory ops
1558 terminate the TB. It can be removed once the proper IO trap and
1559 re-execute bits are in. */
1560 tb_flush(env);
1561 return i;
1562}
1563
1564/* Remove a watchpoint. */
1565int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1566{
1567 int i;
1568
1569 for (i = 0; i < env->nb_watchpoints; i++) {
1570 if (addr == env->watchpoint[i].vaddr) {
1571 env->nb_watchpoints--;
1572 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1573 tlb_flush_page(env, addr);
1574 return 0;
1575 }
1576 }
1577 return -1;
1578}
1579
1580/* Remove all watchpoints. */
1581void cpu_watchpoint_remove_all(CPUState *env) {
1582 int i;
1583
1584 for (i = 0; i < env->nb_watchpoints; i++) {
1585 tlb_flush_page(env, env->watchpoint[i].vaddr);
1586 }
1587 env->nb_watchpoints = 0;
1588}
1589
1590/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1591 breakpoint is reached */
1592int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1593{
1594#if defined(TARGET_HAS_ICE)
1595 int i;
1596
1597 for(i = 0; i < env->nb_breakpoints; i++) {
1598 if (env->breakpoints[i] == pc)
1599 return 0;
1600 }
1601
1602 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1603 return -1;
1604 env->breakpoints[env->nb_breakpoints++] = pc;
1605
1606 breakpoint_invalidate(env, pc);
1607 return 0;
1608#else
1609 return -1;
1610#endif
1611}
1612
1613/* remove all breakpoints */
1614void cpu_breakpoint_remove_all(CPUState *env) {
1615#if defined(TARGET_HAS_ICE)
1616 int i;
1617 for(i = 0; i < env->nb_breakpoints; i++) {
1618 breakpoint_invalidate(env, env->breakpoints[i]);
1619 }
1620 env->nb_breakpoints = 0;
1621#endif
1622}
1623
1624/* remove a breakpoint */
1625int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1626{
1627#if defined(TARGET_HAS_ICE)
1628 int i;
1629 for(i = 0; i < env->nb_breakpoints; i++) {
1630 if (env->breakpoints[i] == pc)
1631 goto found;
1632 }
1633 return -1;
1634 found:
1635 env->nb_breakpoints--;
1636 if (i < env->nb_breakpoints)
1637 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1638
1639 breakpoint_invalidate(env, pc);
1640 return 0;
1641#else
1642 return -1;
1643#endif
1644}
1645
1646/* enable or disable single step mode. EXCP_DEBUG is returned by the
1647 CPU loop after each instruction */
1648void cpu_single_step(CPUState *env, int enabled)
1649{
1650#if defined(TARGET_HAS_ICE)
1651 if (env->singlestep_enabled != enabled) {
1652 env->singlestep_enabled = enabled;
1653 /* must flush all the translated code to avoid inconsistancies */
1654 /* XXX: only flush what is necessary */
1655 tb_flush(env);
1656 }
1657#endif
1658}
1659
1660#ifndef VBOX
1661/* enable or disable low levels log */
1662void cpu_set_log(int log_flags)
1663{
1664 loglevel = log_flags;
1665 if (loglevel && !logfile) {
1666 logfile = fopen(logfilename, log_append ? "a" : "w");
1667 if (!logfile) {
1668 perror(logfilename);
1669 _exit(1);
1670 }
1671#if !defined(CONFIG_SOFTMMU)
1672 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1673 {
1674 static char logfile_buf[4096];
1675 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1676 }
1677#else
1678 setvbuf(logfile, NULL, _IOLBF, 0);
1679#endif
1680 log_append = 1;
1681 }
1682 if (!loglevel && logfile) {
1683 fclose(logfile);
1684 logfile = NULL;
1685 }
1686}
1687
1688void cpu_set_log_filename(const char *filename)
1689{
1690 logfilename = strdup(filename);
1691 if (logfile) {
1692 fclose(logfile);
1693 logfile = NULL;
1694 }
1695 cpu_set_log(loglevel);
1696}
1697#endif /* !VBOX */
1698
1699/* mask must never be zero, except for A20 change call */
1700void cpu_interrupt(CPUState *env, int mask)
1701{
1702#if !defined(USE_NPTL)
1703 TranslationBlock *tb;
1704 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1705#endif
1706 int old_mask;
1707
1708 old_mask = env->interrupt_request;
1709#ifdef VBOX
1710 VM_ASSERT_EMT(env->pVM);
1711 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1712#else /* !VBOX */
1713 /* FIXME: This is probably not threadsafe. A different thread could
1714 be in the middle of a read-modify-write operation. */
1715 env->interrupt_request |= mask;
1716#endif /* !VBOX */
1717#if defined(USE_NPTL)
1718 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1719 problem and hope the cpu will stop of its own accord. For userspace
1720 emulation this often isn't actually as bad as it sounds. Often
1721 signals are used primarily to interrupt blocking syscalls. */
1722#else
1723 if (use_icount) {
1724 env->icount_decr.u16.high = 0xffff;
1725#ifndef CONFIG_USER_ONLY
1726 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1727 an async event happened and we need to process it. */
1728 if (!can_do_io(env)
1729 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1730 cpu_abort(env, "Raised interrupt while not in I/O function");
1731 }
1732#endif
1733 } else {
1734 tb = env->current_tb;
1735 /* if the cpu is currently executing code, we must unlink it and
1736 all the potentially executing TB */
1737 if (tb && !testandset(&interrupt_lock)) {
1738 env->current_tb = NULL;
1739 tb_reset_jump_recursive(tb);
1740 resetlock(&interrupt_lock);
1741 }
1742 }
1743#endif
1744}
1745
1746void cpu_reset_interrupt(CPUState *env, int mask)
1747{
1748#ifdef VBOX
1749 /*
1750 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1751 * for future changes!
1752 */
1753 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1754#else /* !VBOX */
1755 env->interrupt_request &= ~mask;
1756#endif /* !VBOX */
1757}
1758
1759#ifndef VBOX
1760CPULogItem cpu_log_items[] = {
1761 { CPU_LOG_TB_OUT_ASM, "out_asm",
1762 "show generated host assembly code for each compiled TB" },
1763 { CPU_LOG_TB_IN_ASM, "in_asm",
1764 "show target assembly code for each compiled TB" },
1765 { CPU_LOG_TB_OP, "op",
1766 "show micro ops for each compiled TB" },
1767 { CPU_LOG_TB_OP_OPT, "op_opt",
1768 "show micro ops "
1769#ifdef TARGET_I386
1770 "before eflags optimization and "
1771#endif
1772 "after liveness analysis" },
1773 { CPU_LOG_INT, "int",
1774 "show interrupts/exceptions in short format" },
1775 { CPU_LOG_EXEC, "exec",
1776 "show trace before each executed TB (lots of logs)" },
1777 { CPU_LOG_TB_CPU, "cpu",
1778 "show CPU state before block translation" },
1779#ifdef TARGET_I386
1780 { CPU_LOG_PCALL, "pcall",
1781 "show protected mode far calls/returns/exceptions" },
1782#endif
1783#ifdef DEBUG_IOPORT
1784 { CPU_LOG_IOPORT, "ioport",
1785 "show all i/o ports accesses" },
1786#endif
1787 { 0, NULL, NULL },
1788};
1789
1790static int cmp1(const char *s1, int n, const char *s2)
1791{
1792 if (strlen(s2) != n)
1793 return 0;
1794 return memcmp(s1, s2, n) == 0;
1795}
1796
1797/* takes a comma separated list of log masks. Return 0 if error. */
1798int cpu_str_to_log_mask(const char *str)
1799{
1800 const CPULogItem *item;
1801 int mask;
1802 const char *p, *p1;
1803
1804 p = str;
1805 mask = 0;
1806 for(;;) {
1807 p1 = strchr(p, ',');
1808 if (!p1)
1809 p1 = p + strlen(p);
1810 if(cmp1(p,p1-p,"all")) {
1811 for(item = cpu_log_items; item->mask != 0; item++) {
1812 mask |= item->mask;
1813 }
1814 } else {
1815 for(item = cpu_log_items; item->mask != 0; item++) {
1816 if (cmp1(p, p1 - p, item->name))
1817 goto found;
1818 }
1819 return 0;
1820 }
1821 found:
1822 mask |= item->mask;
1823 if (*p1 != ',')
1824 break;
1825 p = p1 + 1;
1826 }
1827 return mask;
1828}
1829#endif /* !VBOX */
1830
1831#ifndef VBOX /* VBOX: we have our own routine. */
1832void cpu_abort(CPUState *env, const char *fmt, ...)
1833{
1834 va_list ap;
1835 va_list ap2;
1836
1837 va_start(ap, fmt);
1838 va_copy(ap2, ap);
1839 fprintf(stderr, "qemu: fatal: ");
1840 vfprintf(stderr, fmt, ap);
1841 fprintf(stderr, "\n");
1842#ifdef TARGET_I386
1843 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1844#else
1845 cpu_dump_state(env, stderr, fprintf, 0);
1846#endif
1847 if (logfile) {
1848 fprintf(logfile, "qemu: fatal: ");
1849 vfprintf(logfile, fmt, ap2);
1850 fprintf(logfile, "\n");
1851#ifdef TARGET_I386
1852 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1853#else
1854 cpu_dump_state(env, logfile, fprintf, 0);
1855#endif
1856 fflush(logfile);
1857 fclose(logfile);
1858 }
1859 va_end(ap2);
1860 va_end(ap);
1861 abort();
1862}
1863#endif /* !VBOX */
1864
1865#ifndef VBOX
1866CPUState *cpu_copy(CPUState *env)
1867{
1868 CPUState *new_env = cpu_init(env->cpu_model_str);
1869 /* preserve chaining and index */
1870 CPUState *next_cpu = new_env->next_cpu;
1871 int cpu_index = new_env->cpu_index;
1872 memcpy(new_env, env, sizeof(CPUState));
1873 new_env->next_cpu = next_cpu;
1874 new_env->cpu_index = cpu_index;
1875 return new_env;
1876}
1877#endif /* !VBOX */
1878
1879#if !defined(CONFIG_USER_ONLY)
1880
1881static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1882{
1883 unsigned int i;
1884
1885 /* Discard jump cache entries for any tb which might potentially
1886 overlap the flushed page. */
1887 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1888 memset (&env->tb_jmp_cache[i], 0,
1889 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1890
1891 i = tb_jmp_cache_hash_page(addr);
1892 memset (&env->tb_jmp_cache[i], 0,
1893 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1894
1895#ifdef VBOX
1896 /* inform raw mode about TLB page flush */
1897 remR3FlushPage(env, addr);
1898#endif /* VBOX */
1899}
1900
1901#ifdef VBOX
1902static CPUTLBEntry s_cputlb_empty_entry = {
1903 .addr_read = -1,
1904 .addr_write = -1,
1905 .addr_code = -1,
1906 .addend = -1,
1907};
1908#endif /* VBOX */
1909
1910/* NOTE: if flush_global is true, also flush global entries (not
1911 implemented yet) */
1912void tlb_flush(CPUState *env, int flush_global)
1913{
1914 int i;
1915
1916#if defined(DEBUG_TLB)
1917 printf("tlb_flush:\n");
1918#endif
1919 /* must reset current TB so that interrupts cannot modify the
1920 links while we are modifying them */
1921 env->current_tb = NULL;
1922
1923 for(i = 0; i < CPU_TLB_SIZE; i++) {
1924#ifdef VBOX
1925 int mmu_idx;
1926 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1927 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1928 }
1929#else /* !VBOX */
1930 env->tlb_table[0][i].addr_read = -1;
1931 env->tlb_table[0][i].addr_write = -1;
1932 env->tlb_table[0][i].addr_code = -1;
1933 env->tlb_table[1][i].addr_read = -1;
1934 env->tlb_table[1][i].addr_write = -1;
1935 env->tlb_table[1][i].addr_code = -1;
1936#if (NB_MMU_MODES >= 3)
1937 env->tlb_table[2][i].addr_read = -1;
1938 env->tlb_table[2][i].addr_write = -1;
1939 env->tlb_table[2][i].addr_code = -1;
1940#if (NB_MMU_MODES == 4)
1941 env->tlb_table[3][i].addr_read = -1;
1942 env->tlb_table[3][i].addr_write = -1;
1943 env->tlb_table[3][i].addr_code = -1;
1944#endif
1945#endif
1946#endif /* !VBOX */
1947 }
1948
1949 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1950
1951#ifdef VBOX
1952 /* inform raw mode about TLB flush */
1953 remR3FlushTLB(env, flush_global);
1954#endif
1955#ifdef USE_KQEMU
1956 if (env->kqemu_enabled) {
1957 kqemu_flush(env, flush_global);
1958 }
1959#endif
1960 tlb_flush_count++;
1961}
1962
1963static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1964{
1965 if (addr == (tlb_entry->addr_read &
1966 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1967 addr == (tlb_entry->addr_write &
1968 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1969 addr == (tlb_entry->addr_code &
1970 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1971 tlb_entry->addr_read = -1;
1972 tlb_entry->addr_write = -1;
1973 tlb_entry->addr_code = -1;
1974 }
1975}
1976
1977void tlb_flush_page(CPUState *env, target_ulong addr)
1978{
1979 int i;
1980
1981#if defined(DEBUG_TLB)
1982 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1983#endif
1984 /* must reset current TB so that interrupts cannot modify the
1985 links while we are modifying them */
1986 env->current_tb = NULL;
1987
1988 addr &= TARGET_PAGE_MASK;
1989 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1990 tlb_flush_entry(&env->tlb_table[0][i], addr);
1991 tlb_flush_entry(&env->tlb_table[1][i], addr);
1992#if (NB_MMU_MODES >= 3)
1993 tlb_flush_entry(&env->tlb_table[2][i], addr);
1994#if (NB_MMU_MODES == 4)
1995 tlb_flush_entry(&env->tlb_table[3][i], addr);
1996#endif
1997#endif
1998
1999 tlb_flush_jmp_cache(env, addr);
2000
2001#ifdef USE_KQEMU
2002 if (env->kqemu_enabled) {
2003 kqemu_flush_page(env, addr);
2004 }
2005#endif
2006}
2007
2008/* update the TLBs so that writes to code in the virtual page 'addr'
2009 can be detected */
2010static void tlb_protect_code(ram_addr_t ram_addr)
2011{
2012 cpu_physical_memory_reset_dirty(ram_addr,
2013 ram_addr + TARGET_PAGE_SIZE,
2014 CODE_DIRTY_FLAG);
2015#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2016 /** @todo Retest this? This function has changed... */
2017 remR3ProtectCode(cpu_single_env, ram_addr);
2018#endif
2019}
2020
2021/* update the TLB so that writes in physical page 'phys_addr' are no longer
2022 tested for self modifying code */
2023static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2024 target_ulong vaddr)
2025{
2026#ifdef VBOX
2027 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2028#endif
2029 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2030}
2031
2032static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2033 unsigned long start, unsigned long length)
2034{
2035 unsigned long addr;
2036
2037#ifdef VBOX
2038 if (start & 3)
2039 return;
2040#endif
2041 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2042 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2043 if ((addr - start) < length) {
2044 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2045 }
2046 }
2047}
2048
2049void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2050 int dirty_flags)
2051{
2052 CPUState *env;
2053 unsigned long length, start1;
2054 int i, mask, len;
2055 uint8_t *p;
2056
2057 start &= TARGET_PAGE_MASK;
2058 end = TARGET_PAGE_ALIGN(end);
2059
2060 length = end - start;
2061 if (length == 0)
2062 return;
2063 len = length >> TARGET_PAGE_BITS;
2064#ifdef USE_KQEMU
2065 /* XXX: should not depend on cpu context */
2066 env = first_cpu;
2067 if (env->kqemu_enabled) {
2068 ram_addr_t addr;
2069 addr = start;
2070 for(i = 0; i < len; i++) {
2071 kqemu_set_notdirty(env, addr);
2072 addr += TARGET_PAGE_SIZE;
2073 }
2074 }
2075#endif
2076 mask = ~dirty_flags;
2077 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2078#ifdef VBOX
2079 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2080#endif
2081 for(i = 0; i < len; i++)
2082 p[i] &= mask;
2083
2084 /* we modify the TLB cache so that the dirty bit will be set again
2085 when accessing the range */
2086#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2087 start1 = start;
2088#elif !defined(VBOX)
2089 start1 = start + (unsigned long)phys_ram_base;
2090#else
2091 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2092#endif
2093 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2094 for(i = 0; i < CPU_TLB_SIZE; i++)
2095 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2096 for(i = 0; i < CPU_TLB_SIZE; i++)
2097 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2098#if (NB_MMU_MODES >= 3)
2099 for(i = 0; i < CPU_TLB_SIZE; i++)
2100 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2101#if (NB_MMU_MODES == 4)
2102 for(i = 0; i < CPU_TLB_SIZE; i++)
2103 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2104#endif
2105#endif
2106 }
2107}
2108
2109#ifndef VBOX
2110int cpu_physical_memory_set_dirty_tracking(int enable)
2111{
2112 in_migration = enable;
2113 return 0;
2114}
2115
2116int cpu_physical_memory_get_dirty_tracking(void)
2117{
2118 return in_migration;
2119}
2120#endif
2121
2122#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2123DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2124#else
2125static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2126#endif
2127{
2128 ram_addr_t ram_addr;
2129
2130 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2131 /* RAM case */
2132#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2133 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2134#elif !defined(VBOX)
2135 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2136 tlb_entry->addend - (unsigned long)phys_ram_base;
2137#else
2138 Assert(phys_addend != -1);
2139 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2140#endif
2141 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2142 tlb_entry->addr_write |= TLB_NOTDIRTY;
2143 }
2144 }
2145}
2146
2147/* update the TLB according to the current state of the dirty bits */
2148void cpu_tlb_update_dirty(CPUState *env)
2149{
2150 int i;
2151#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2152 for(i = 0; i < CPU_TLB_SIZE; i++)
2153 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2154 for(i = 0; i < CPU_TLB_SIZE; i++)
2155 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2156#if (NB_MMU_MODES >= 3)
2157 for(i = 0; i < CPU_TLB_SIZE; i++)
2158 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2159#if (NB_MMU_MODES == 4)
2160 for(i = 0; i < CPU_TLB_SIZE; i++)
2161 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2162#endif
2163#endif
2164#else /* VBOX */
2165 for(i = 0; i < CPU_TLB_SIZE; i++)
2166 tlb_update_dirty(&env->tlb_table[0][i]);
2167 for(i = 0; i < CPU_TLB_SIZE; i++)
2168 tlb_update_dirty(&env->tlb_table[1][i]);
2169#if (NB_MMU_MODES >= 3)
2170 for(i = 0; i < CPU_TLB_SIZE; i++)
2171 tlb_update_dirty(&env->tlb_table[2][i]);
2172#if (NB_MMU_MODES == 4)
2173 for(i = 0; i < CPU_TLB_SIZE; i++)
2174 tlb_update_dirty(&env->tlb_table[3][i]);
2175#endif
2176#endif
2177#endif /* VBOX */
2178}
2179
2180static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2181{
2182 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2183 tlb_entry->addr_write = vaddr;
2184}
2185
2186/* update the TLB corresponding to virtual page vaddr
2187 so that it is no longer dirty */
2188static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2189{
2190 int i;
2191
2192 vaddr &= TARGET_PAGE_MASK;
2193 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2194 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2195 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2196#if (NB_MMU_MODES >= 3)
2197 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2198#if (NB_MMU_MODES == 4)
2199 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2200#endif
2201#endif
2202}
2203
2204/* add a new TLB entry. At most one entry for a given virtual address
2205 is permitted. Return 0 if OK or 2 if the page could not be mapped
2206 (can only happen in non SOFTMMU mode for I/O pages or pages
2207 conflicting with the host address space). */
2208int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2209 target_phys_addr_t paddr, int prot,
2210 int mmu_idx, int is_softmmu)
2211{
2212 PhysPageDesc *p;
2213 unsigned long pd;
2214 unsigned int index;
2215 target_ulong address;
2216 target_ulong code_address;
2217 target_phys_addr_t addend;
2218 int ret;
2219 CPUTLBEntry *te;
2220 int i;
2221 target_phys_addr_t iotlb;
2222#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2223 int read_mods = 0, write_mods = 0, code_mods = 0;
2224#endif
2225
2226 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2227 if (!p) {
2228 pd = IO_MEM_UNASSIGNED;
2229 } else {
2230 pd = p->phys_offset;
2231 }
2232#if defined(DEBUG_TLB)
2233 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2234 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2235#endif
2236
2237 ret = 0;
2238 address = vaddr;
2239 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2240 /* IO memory case (romd handled later) */
2241 address |= TLB_MMIO;
2242 }
2243#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2244 addend = pd & TARGET_PAGE_MASK;
2245#elif !defined(VBOX)
2246 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2247#else
2248 /** @todo this is racing the phys_page_find call above since it may register
2249 * a new chunk of memory... */
2250 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2251 pd & TARGET_PAGE_MASK,
2252 !!(prot & PAGE_WRITE));
2253#endif
2254
2255 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2256 /* Normal RAM. */
2257 iotlb = pd & TARGET_PAGE_MASK;
2258 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2259 iotlb |= IO_MEM_NOTDIRTY;
2260 else
2261 iotlb |= IO_MEM_ROM;
2262 } else {
2263 /* IO handlers are currently passed a phsical address.
2264 It would be nice to pass an offset from the base address
2265 of that region. This would avoid having to special case RAM,
2266 and avoid full address decoding in every device.
2267 We can't use the high bits of pd for this because
2268 IO_MEM_ROMD uses these as a ram address. */
2269 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
2270 }
2271
2272 code_address = address;
2273
2274#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2275 if (addend & 0x3)
2276 {
2277 if (addend & 0x2)
2278 {
2279 /* catch write */
2280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2281 write_mods |= TLB_MMIO;
2282 }
2283 else if (addend & 0x1)
2284 {
2285 /* catch all */
2286 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2287 {
2288 read_mods |= TLB_MMIO;
2289 write_mods |= TLB_MMIO;
2290 code_mods |= TLB_MMIO;
2291 }
2292 }
2293 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2294 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2295 addend &= ~(target_ulong)0x3;
2296 }
2297#endif
2298
2299 /* Make accesses to pages with watchpoints go via the
2300 watchpoint trap routines. */
2301 for (i = 0; i < env->nb_watchpoints; i++) {
2302 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
2303 iotlb = io_mem_watch + paddr;
2304 /* TODO: The memory case can be optimized by not trapping
2305 reads of pages with a write breakpoint. */
2306 address |= TLB_MMIO;
2307 }
2308 }
2309
2310 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2311 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2312 te = &env->tlb_table[mmu_idx][index];
2313 te->addend = addend - vaddr;
2314 if (prot & PAGE_READ) {
2315 te->addr_read = address;
2316 } else {
2317 te->addr_read = -1;
2318 }
2319
2320 if (prot & PAGE_EXEC) {
2321 te->addr_code = code_address;
2322 } else {
2323 te->addr_code = -1;
2324 }
2325 if (prot & PAGE_WRITE) {
2326 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2327 (pd & IO_MEM_ROMD)) {
2328 /* Write access calls the I/O callback. */
2329 te->addr_write = address | TLB_MMIO;
2330 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2331 !cpu_physical_memory_is_dirty(pd)) {
2332 te->addr_write = address | TLB_NOTDIRTY;
2333 } else {
2334 te->addr_write = address;
2335 }
2336 } else {
2337 te->addr_write = -1;
2338 }
2339
2340#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2341 if (prot & PAGE_READ)
2342 te->addr_read |= read_mods;
2343 if (prot & PAGE_EXEC)
2344 te->addr_code |= code_mods;
2345 if (prot & PAGE_WRITE)
2346 te->addr_write |= write_mods;
2347
2348 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2349#endif
2350
2351#ifdef VBOX
2352 /* inform raw mode about TLB page change */
2353 remR3FlushPage(env, vaddr);
2354#endif
2355 return ret;
2356}
2357
2358#else
2359
2360void tlb_flush(CPUState *env, int flush_global)
2361{
2362}
2363
2364void tlb_flush_page(CPUState *env, target_ulong addr)
2365{
2366}
2367
2368int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2369 target_phys_addr_t paddr, int prot,
2370 int mmu_idx, int is_softmmu)
2371{
2372 return 0;
2373}
2374
2375#ifndef VBOX
2376/* dump memory mappings */
2377void page_dump(FILE *f)
2378{
2379 unsigned long start, end;
2380 int i, j, prot, prot1;
2381 PageDesc *p;
2382
2383 fprintf(f, "%-8s %-8s %-8s %s\n",
2384 "start", "end", "size", "prot");
2385 start = -1;
2386 end = -1;
2387 prot = 0;
2388 for(i = 0; i <= L1_SIZE; i++) {
2389 if (i < L1_SIZE)
2390 p = l1_map[i];
2391 else
2392 p = NULL;
2393 for(j = 0;j < L2_SIZE; j++) {
2394 if (!p)
2395 prot1 = 0;
2396 else
2397 prot1 = p[j].flags;
2398 if (prot1 != prot) {
2399 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2400 if (start != -1) {
2401 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2402 start, end, end - start,
2403 prot & PAGE_READ ? 'r' : '-',
2404 prot & PAGE_WRITE ? 'w' : '-',
2405 prot & PAGE_EXEC ? 'x' : '-');
2406 }
2407 if (prot1 != 0)
2408 start = end;
2409 else
2410 start = -1;
2411 prot = prot1;
2412 }
2413 if (!p)
2414 break;
2415 }
2416 }
2417}
2418#endif /* !VBOX */
2419
2420int page_get_flags(target_ulong address)
2421{
2422 PageDesc *p;
2423
2424 p = page_find(address >> TARGET_PAGE_BITS);
2425 if (!p)
2426 return 0;
2427 return p->flags;
2428}
2429
2430/* modify the flags of a page and invalidate the code if
2431 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2432 depending on PAGE_WRITE */
2433void page_set_flags(target_ulong start, target_ulong end, int flags)
2434{
2435 PageDesc *p;
2436 target_ulong addr;
2437
2438 /* mmap_lock should already be held. */
2439 start = start & TARGET_PAGE_MASK;
2440 end = TARGET_PAGE_ALIGN(end);
2441 if (flags & PAGE_WRITE)
2442 flags |= PAGE_WRITE_ORG;
2443#ifdef VBOX
2444 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2445#endif
2446 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2447 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2448 /* We may be called for host regions that are outside guest
2449 address space. */
2450 if (!p)
2451 return;
2452 /* if the write protection is set, then we invalidate the code
2453 inside */
2454 if (!(p->flags & PAGE_WRITE) &&
2455 (flags & PAGE_WRITE) &&
2456 p->first_tb) {
2457 tb_invalidate_phys_page(addr, 0, NULL);
2458 }
2459 p->flags = flags;
2460 }
2461}
2462
2463int page_check_range(target_ulong start, target_ulong len, int flags)
2464{
2465 PageDesc *p;
2466 target_ulong end;
2467 target_ulong addr;
2468
2469 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2470 start = start & TARGET_PAGE_MASK;
2471
2472 if( end < start )
2473 /* we've wrapped around */
2474 return -1;
2475 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2476 p = page_find(addr >> TARGET_PAGE_BITS);
2477 if( !p )
2478 return -1;
2479 if( !(p->flags & PAGE_VALID) )
2480 return -1;
2481
2482 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2483 return -1;
2484 if (flags & PAGE_WRITE) {
2485 if (!(p->flags & PAGE_WRITE_ORG))
2486 return -1;
2487 /* unprotect the page if it was put read-only because it
2488 contains translated code */
2489 if (!(p->flags & PAGE_WRITE)) {
2490 if (!page_unprotect(addr, 0, NULL))
2491 return -1;
2492 }
2493 return 0;
2494 }
2495 }
2496 return 0;
2497}
2498
2499/* called from signal handler: invalidate the code and unprotect the
2500 page. Return TRUE if the fault was succesfully handled. */
2501int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2502{
2503 unsigned int page_index, prot, pindex;
2504 PageDesc *p, *p1;
2505 target_ulong host_start, host_end, addr;
2506
2507 /* Technically this isn't safe inside a signal handler. However we
2508 know this only ever happens in a synchronous SEGV handler, so in
2509 practice it seems to be ok. */
2510 mmap_lock();
2511
2512 host_start = address & qemu_host_page_mask;
2513 page_index = host_start >> TARGET_PAGE_BITS;
2514 p1 = page_find(page_index);
2515 if (!p1) {
2516 mmap_unlock();
2517 return 0;
2518 }
2519 host_end = host_start + qemu_host_page_size;
2520 p = p1;
2521 prot = 0;
2522 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2523 prot |= p->flags;
2524 p++;
2525 }
2526 /* if the page was really writable, then we change its
2527 protection back to writable */
2528 if (prot & PAGE_WRITE_ORG) {
2529 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2530 if (!(p1[pindex].flags & PAGE_WRITE)) {
2531 mprotect((void *)g2h(host_start), qemu_host_page_size,
2532 (prot & PAGE_BITS) | PAGE_WRITE);
2533 p1[pindex].flags |= PAGE_WRITE;
2534 /* and since the content will be modified, we must invalidate
2535 the corresponding translated code. */
2536 tb_invalidate_phys_page(address, pc, puc);
2537#ifdef DEBUG_TB_CHECK
2538 tb_invalidate_check(address);
2539#endif
2540 mmap_unlock();
2541 return 1;
2542 }
2543 }
2544 mmap_unlock();
2545 return 0;
2546}
2547
2548static inline void tlb_set_dirty(CPUState *env,
2549 unsigned long addr, target_ulong vaddr)
2550{
2551}
2552#endif /* defined(CONFIG_USER_ONLY) */
2553
2554#if !defined(CONFIG_USER_ONLY)
2555static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2556 ram_addr_t memory);
2557static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2558 ram_addr_t orig_memory);
2559#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2560 need_subpage) \
2561 do { \
2562 if (addr > start_addr) \
2563 start_addr2 = 0; \
2564 else { \
2565 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2566 if (start_addr2 > 0) \
2567 need_subpage = 1; \
2568 } \
2569 \
2570 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2571 end_addr2 = TARGET_PAGE_SIZE - 1; \
2572 else { \
2573 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2574 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2575 need_subpage = 1; \
2576 } \
2577 } while (0)
2578
2579/* register physical memory. 'size' must be a multiple of the target
2580 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2581 io memory page */
2582void cpu_register_physical_memory(target_phys_addr_t start_addr,
2583 ram_addr_t size,
2584 ram_addr_t phys_offset)
2585{
2586 target_phys_addr_t addr, end_addr;
2587 PhysPageDesc *p;
2588 CPUState *env;
2589 ram_addr_t orig_size = size;
2590 void *subpage;
2591
2592#ifdef USE_KQEMU
2593 /* XXX: should not depend on cpu context */
2594 env = first_cpu;
2595 if (env->kqemu_enabled) {
2596 kqemu_set_phys_mem(start_addr, size, phys_offset);
2597 }
2598#endif
2599 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2600 end_addr = start_addr + (target_phys_addr_t)size;
2601 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2602 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2603 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2604 ram_addr_t orig_memory = p->phys_offset;
2605 target_phys_addr_t start_addr2, end_addr2;
2606 int need_subpage = 0;
2607
2608 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2609 need_subpage);
2610 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2611 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2612 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2613 &p->phys_offset, orig_memory);
2614 } else {
2615 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2616 >> IO_MEM_SHIFT];
2617 }
2618 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2619 } else {
2620 p->phys_offset = phys_offset;
2621 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2622 (phys_offset & IO_MEM_ROMD))
2623 phys_offset += TARGET_PAGE_SIZE;
2624 }
2625 } else {
2626 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2627 p->phys_offset = phys_offset;
2628 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2629 (phys_offset & IO_MEM_ROMD))
2630 phys_offset += TARGET_PAGE_SIZE;
2631 else {
2632 target_phys_addr_t start_addr2, end_addr2;
2633 int need_subpage = 0;
2634
2635 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2636 end_addr2, need_subpage);
2637
2638 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2639 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2640 &p->phys_offset, IO_MEM_UNASSIGNED);
2641 subpage_register(subpage, start_addr2, end_addr2,
2642 phys_offset);
2643 }
2644 }
2645 }
2646 }
2647
2648 /* since each CPU stores ram addresses in its TLB cache, we must
2649 reset the modified entries */
2650 /* XXX: slow ! */
2651 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2652 tlb_flush(env, 1);
2653 }
2654}
2655
2656/* XXX: temporary until new memory mapping API */
2657ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2658{
2659 PhysPageDesc *p;
2660
2661 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2662 if (!p)
2663 return IO_MEM_UNASSIGNED;
2664 return p->phys_offset;
2665}
2666
2667#ifndef VBOX
2668/* XXX: better than nothing */
2669ram_addr_t qemu_ram_alloc(ram_addr_t size)
2670{
2671 ram_addr_t addr;
2672 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2673 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2674 (uint64_t)size, (uint64_t)phys_ram_size);
2675 abort();
2676 }
2677 addr = phys_ram_alloc_offset;
2678 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2679 return addr;
2680}
2681
2682void qemu_ram_free(ram_addr_t addr)
2683{
2684}
2685#endif /* !VBOX */
2686
2687static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2688{
2689#ifdef DEBUG_UNASSIGNED
2690 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2691#endif
2692#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2693 do_unassigned_access(addr, 0, 0, 0, 1);
2694#endif
2695 return 0;
2696}
2697
2698static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2699{
2700#ifdef DEBUG_UNASSIGNED
2701 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2702#endif
2703#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2704 do_unassigned_access(addr, 0, 0, 0, 2);
2705#endif
2706 return 0;
2707}
2708
2709static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2710{
2711#ifdef DEBUG_UNASSIGNED
2712 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2713#endif
2714#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2715 do_unassigned_access(addr, 0, 0, 0, 4);
2716#endif
2717 return 0;
2718}
2719
2720static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2721{
2722#ifdef DEBUG_UNASSIGNED
2723 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2724#endif
2725#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2726 do_unassigned_access(addr, 1, 0, 0, 1);
2727#endif
2728}
2729
2730static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2731{
2732#ifdef DEBUG_UNASSIGNED
2733 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2734#endif
2735#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2736 do_unassigned_access(addr, 1, 0, 0, 2);
2737#endif
2738}
2739
2740static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2741{
2742#ifdef DEBUG_UNASSIGNED
2743 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2744#endif
2745#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2746 do_unassigned_access(addr, 1, 0, 0, 4);
2747#endif
2748}
2749
2750static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2751 unassigned_mem_readb,
2752 unassigned_mem_readw,
2753 unassigned_mem_readl,
2754};
2755
2756static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2757 unassigned_mem_writeb,
2758 unassigned_mem_writew,
2759 unassigned_mem_writel,
2760};
2761
2762static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2763 uint32_t val)
2764{
2765 int dirty_flags;
2766#ifdef VBOX
2767 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2768 dirty_flags = 0xff;
2769 else
2770#endif /* VBOX */
2771 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2772 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2773#if !defined(CONFIG_USER_ONLY)
2774 tb_invalidate_phys_page_fast(ram_addr, 1);
2775# ifdef VBOX
2776 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2777 dirty_flags = 0xff;
2778 else
2779# endif /* VBOX */
2780 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2781#endif
2782 }
2783#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2784 remR3PhysWriteU8(ram_addr, val);
2785#else
2786 stb_p(phys_ram_base + ram_addr, val);
2787#endif
2788#ifdef USE_KQEMU
2789 if (cpu_single_env->kqemu_enabled &&
2790 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2791 kqemu_modify_page(cpu_single_env, ram_addr);
2792#endif
2793 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2794#ifdef VBOX
2795 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2796#endif /* !VBOX */
2797 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2798 /* we remove the notdirty callback only if the code has been
2799 flushed */
2800 if (dirty_flags == 0xff)
2801 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2802}
2803
2804static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2805 uint32_t val)
2806{
2807 int dirty_flags;
2808#ifdef VBOX
2809 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2810 dirty_flags = 0xff;
2811 else
2812#endif /* VBOX */
2813 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2814 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2815#if !defined(CONFIG_USER_ONLY)
2816 tb_invalidate_phys_page_fast(ram_addr, 2);
2817# ifdef VBOX
2818 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2819 dirty_flags = 0xff;
2820 else
2821# endif /* VBOX */
2822 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2823#endif
2824 }
2825#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2826 remR3PhysWriteU16(ram_addr, val);
2827#else
2828 stw_p(phys_ram_base + ram_addr, val);
2829#endif
2830
2831#ifdef USE_KQEMU
2832 if (cpu_single_env->kqemu_enabled &&
2833 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2834 kqemu_modify_page(cpu_single_env, ram_addr);
2835#endif
2836 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2837#ifdef VBOX
2838 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2839#endif
2840 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2841 /* we remove the notdirty callback only if the code has been
2842 flushed */
2843 if (dirty_flags == 0xff)
2844 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2845}
2846
2847static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2848 uint32_t val)
2849{
2850 int dirty_flags;
2851#ifdef VBOX
2852 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2853 dirty_flags = 0xff;
2854 else
2855#endif /* VBOX */
2856 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2857 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2858#if !defined(CONFIG_USER_ONLY)
2859 tb_invalidate_phys_page_fast(ram_addr, 4);
2860# ifdef VBOX
2861 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2862 dirty_flags = 0xff;
2863 else
2864# endif /* VBOX */
2865 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2866#endif
2867 }
2868#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2869 remR3PhysWriteU32(ram_addr, val);
2870#else
2871 stl_p(phys_ram_base + ram_addr, val);
2872#endif
2873#ifdef USE_KQEMU
2874 if (cpu_single_env->kqemu_enabled &&
2875 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2876 kqemu_modify_page(cpu_single_env, ram_addr);
2877#endif
2878 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2879#ifdef VBOX
2880 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2881#endif
2882 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2883 /* we remove the notdirty callback only if the code has been
2884 flushed */
2885 if (dirty_flags == 0xff)
2886 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2887}
2888
2889static CPUReadMemoryFunc *error_mem_read[3] = {
2890 NULL, /* never used */
2891 NULL, /* never used */
2892 NULL, /* never used */
2893};
2894
2895static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2896 notdirty_mem_writeb,
2897 notdirty_mem_writew,
2898 notdirty_mem_writel,
2899};
2900
2901/* Generate a debug exception if a watchpoint has been hit. */
2902static void check_watchpoint(int offset, int flags)
2903{
2904 CPUState *env = cpu_single_env;
2905 target_ulong vaddr;
2906 int i;
2907
2908 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2909 for (i = 0; i < env->nb_watchpoints; i++) {
2910 if (vaddr == env->watchpoint[i].vaddr
2911 && (env->watchpoint[i].type & flags)) {
2912 env->watchpoint_hit = i + 1;
2913 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2914 break;
2915 }
2916 }
2917}
2918
2919/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2920 so these check for a hit then pass through to the normal out-of-line
2921 phys routines. */
2922static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2923{
2924 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2925 return ldub_phys(addr);
2926}
2927
2928static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2929{
2930 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2931 return lduw_phys(addr);
2932}
2933
2934static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2935{
2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2937 return ldl_phys(addr);
2938}
2939
2940static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2941 uint32_t val)
2942{
2943 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2944 stb_phys(addr, val);
2945}
2946
2947static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2948 uint32_t val)
2949{
2950 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2951 stw_phys(addr, val);
2952}
2953
2954static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2955 uint32_t val)
2956{
2957 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2958 stl_phys(addr, val);
2959}
2960
2961static CPUReadMemoryFunc *watch_mem_read[3] = {
2962 watch_mem_readb,
2963 watch_mem_readw,
2964 watch_mem_readl,
2965};
2966
2967static CPUWriteMemoryFunc *watch_mem_write[3] = {
2968 watch_mem_writeb,
2969 watch_mem_writew,
2970 watch_mem_writel,
2971};
2972
2973static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2974 unsigned int len)
2975{
2976 uint32_t ret;
2977 unsigned int idx;
2978
2979 idx = SUBPAGE_IDX(addr - mmio->base);
2980#if defined(DEBUG_SUBPAGE)
2981 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2982 mmio, len, addr, idx);
2983#endif
2984 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2985
2986 return ret;
2987}
2988
2989static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2990 uint32_t value, unsigned int len)
2991{
2992 unsigned int idx;
2993
2994 idx = SUBPAGE_IDX(addr - mmio->base);
2995#if defined(DEBUG_SUBPAGE)
2996 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2997 mmio, len, addr, idx, value);
2998#endif
2999 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
3000}
3001
3002static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3003{
3004#if defined(DEBUG_SUBPAGE)
3005 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3006#endif
3007
3008 return subpage_readlen(opaque, addr, 0);
3009}
3010
3011static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3012 uint32_t value)
3013{
3014#if defined(DEBUG_SUBPAGE)
3015 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3016#endif
3017 subpage_writelen(opaque, addr, value, 0);
3018}
3019
3020static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3021{
3022#if defined(DEBUG_SUBPAGE)
3023 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3024#endif
3025
3026 return subpage_readlen(opaque, addr, 1);
3027}
3028
3029static void subpage_writew (void *opaque, target_phys_addr_t addr,
3030 uint32_t value)
3031{
3032#if defined(DEBUG_SUBPAGE)
3033 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3034#endif
3035 subpage_writelen(opaque, addr, value, 1);
3036}
3037
3038static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3039{
3040#if defined(DEBUG_SUBPAGE)
3041 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3042#endif
3043
3044 return subpage_readlen(opaque, addr, 2);
3045}
3046
3047static void subpage_writel (void *opaque,
3048 target_phys_addr_t addr, uint32_t value)
3049{
3050#if defined(DEBUG_SUBPAGE)
3051 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3052#endif
3053 subpage_writelen(opaque, addr, value, 2);
3054}
3055
3056static CPUReadMemoryFunc *subpage_read[] = {
3057 &subpage_readb,
3058 &subpage_readw,
3059 &subpage_readl,
3060};
3061
3062static CPUWriteMemoryFunc *subpage_write[] = {
3063 &subpage_writeb,
3064 &subpage_writew,
3065 &subpage_writel,
3066};
3067
3068static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3069 ram_addr_t memory)
3070{
3071 int idx, eidx;
3072 unsigned int i;
3073
3074 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3075 return -1;
3076 idx = SUBPAGE_IDX(start);
3077 eidx = SUBPAGE_IDX(end);
3078#if defined(DEBUG_SUBPAGE)
3079 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3080 mmio, start, end, idx, eidx, memory);
3081#endif
3082 memory >>= IO_MEM_SHIFT;
3083 for (; idx <= eidx; idx++) {
3084 for (i = 0; i < 4; i++) {
3085 if (io_mem_read[memory][i]) {
3086 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3087 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3088 }
3089 if (io_mem_write[memory][i]) {
3090 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3091 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3092 }
3093 }
3094 }
3095
3096 return 0;
3097}
3098
3099static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3100 ram_addr_t orig_memory)
3101{
3102 subpage_t *mmio;
3103 int subpage_memory;
3104
3105 mmio = qemu_mallocz(sizeof(subpage_t));
3106 if (mmio != NULL) {
3107 mmio->base = base;
3108 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3109#if defined(DEBUG_SUBPAGE)
3110 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3111 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3112#endif
3113 *phys = subpage_memory | IO_MEM_SUBPAGE;
3114 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
3115 }
3116
3117 return mmio;
3118}
3119
3120static void io_mem_init(void)
3121{
3122 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3123 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3124 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3125 io_mem_nb = 5;
3126
3127 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3128 watch_mem_write, NULL);
3129
3130#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3131 /* alloc dirty bits array */
3132 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3133 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3134#endif /* !VBOX */
3135}
3136
3137/* mem_read and mem_write are arrays of functions containing the
3138 function to access byte (index 0), word (index 1) and dword (index
3139 2). Functions can be omitted with a NULL function pointer. The
3140 registered functions may be modified dynamically later.
3141 If io_index is non zero, the corresponding io zone is
3142 modified. If it is zero, a new io zone is allocated. The return
3143 value can be used with cpu_register_physical_memory(). (-1) is
3144 returned if error. */
3145int cpu_register_io_memory(int io_index,
3146 CPUReadMemoryFunc **mem_read,
3147 CPUWriteMemoryFunc **mem_write,
3148 void *opaque)
3149{
3150 int i, subwidth = 0;
3151
3152 if (io_index <= 0) {
3153 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3154 return -1;
3155 io_index = io_mem_nb++;
3156 } else {
3157 if (io_index >= IO_MEM_NB_ENTRIES)
3158 return -1;
3159 }
3160
3161 for(i = 0;i < 3; i++) {
3162 if (!mem_read[i] || !mem_write[i])
3163 subwidth = IO_MEM_SUBWIDTH;
3164 io_mem_read[io_index][i] = mem_read[i];
3165 io_mem_write[io_index][i] = mem_write[i];
3166 }
3167 io_mem_opaque[io_index] = opaque;
3168 return (io_index << IO_MEM_SHIFT) | subwidth;
3169}
3170
3171CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3172{
3173 return io_mem_write[io_index >> IO_MEM_SHIFT];
3174}
3175
3176CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3177{
3178 return io_mem_read[io_index >> IO_MEM_SHIFT];
3179}
3180
3181#endif /* !defined(CONFIG_USER_ONLY) */
3182
3183/* physical memory access (slow version, mainly for debug) */
3184#if defined(CONFIG_USER_ONLY)
3185void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3186 int len, int is_write)
3187{
3188 int l, flags;
3189 target_ulong page;
3190 void * p;
3191
3192 while (len > 0) {
3193 page = addr & TARGET_PAGE_MASK;
3194 l = (page + TARGET_PAGE_SIZE) - addr;
3195 if (l > len)
3196 l = len;
3197 flags = page_get_flags(page);
3198 if (!(flags & PAGE_VALID))
3199 return;
3200 if (is_write) {
3201 if (!(flags & PAGE_WRITE))
3202 return;
3203 /* XXX: this code should not depend on lock_user */
3204 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3205 /* FIXME - should this return an error rather than just fail? */
3206 return;
3207 memcpy(p, buf, l);
3208 unlock_user(p, addr, l);
3209 } else {
3210 if (!(flags & PAGE_READ))
3211 return;
3212 /* XXX: this code should not depend on lock_user */
3213 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3214 /* FIXME - should this return an error rather than just fail? */
3215 return;
3216 memcpy(buf, p, l);
3217 unlock_user(p, addr, 0);
3218 }
3219 len -= l;
3220 buf += l;
3221 addr += l;
3222 }
3223}
3224
3225#else
3226void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3227 int len, int is_write)
3228{
3229 int l, io_index;
3230 uint8_t *ptr;
3231 uint32_t val;
3232 target_phys_addr_t page;
3233 unsigned long pd;
3234 PhysPageDesc *p;
3235
3236 while (len > 0) {
3237 page = addr & TARGET_PAGE_MASK;
3238 l = (page + TARGET_PAGE_SIZE) - addr;
3239 if (l > len)
3240 l = len;
3241 p = phys_page_find(page >> TARGET_PAGE_BITS);
3242 if (!p) {
3243 pd = IO_MEM_UNASSIGNED;
3244 } else {
3245 pd = p->phys_offset;
3246 }
3247
3248 if (is_write) {
3249 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3250 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3251 /* XXX: could force cpu_single_env to NULL to avoid
3252 potential bugs */
3253 if (l >= 4 && ((addr & 3) == 0)) {
3254 /* 32 bit write access */
3255#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3256 val = ldl_p(buf);
3257#else
3258 val = *(const uint32_t *)buf;
3259#endif
3260 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3261 l = 4;
3262 } else if (l >= 2 && ((addr & 1) == 0)) {
3263 /* 16 bit write access */
3264#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3265 val = lduw_p(buf);
3266#else
3267 val = *(const uint16_t *)buf;
3268#endif
3269 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3270 l = 2;
3271 } else {
3272 /* 8 bit write access */
3273#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3274 val = ldub_p(buf);
3275#else
3276 val = *(const uint8_t *)buf;
3277#endif
3278 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
3279 l = 1;
3280 }
3281 } else {
3282 unsigned long addr1;
3283 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3284 /* RAM case */
3285#ifdef VBOX
3286 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3287#else
3288 ptr = phys_ram_base + addr1;
3289 memcpy(ptr, buf, l);
3290#endif
3291 if (!cpu_physical_memory_is_dirty(addr1)) {
3292 /* invalidate code */
3293 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3294 /* set dirty bit */
3295#ifdef VBOX
3296 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3297#endif
3298 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3299 (0xff & ~CODE_DIRTY_FLAG);
3300 }
3301 }
3302 } else {
3303 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3304 !(pd & IO_MEM_ROMD)) {
3305 /* I/O case */
3306 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3307 if (l >= 4 && ((addr & 3) == 0)) {
3308 /* 32 bit read access */
3309 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3310#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3311 stl_p(buf, val);
3312#else
3313 *(uint32_t *)buf = val;
3314#endif
3315 l = 4;
3316 } else if (l >= 2 && ((addr & 1) == 0)) {
3317 /* 16 bit read access */
3318 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3319#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3320 stw_p(buf, val);
3321#else
3322 *(uint16_t *)buf = val;
3323#endif
3324 l = 2;
3325 } else {
3326 /* 8 bit read access */
3327 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
3328#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3329 stb_p(buf, val);
3330#else
3331 *(uint8_t *)buf = val;
3332#endif
3333 l = 1;
3334 }
3335 } else {
3336 /* RAM case */
3337#ifdef VBOX
3338 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3339#else
3340 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3341 (addr & ~TARGET_PAGE_MASK);
3342 memcpy(buf, ptr, l);
3343#endif
3344 }
3345 }
3346 len -= l;
3347 buf += l;
3348 addr += l;
3349 }
3350}
3351
3352#ifndef VBOX
3353/* used for ROM loading : can write in RAM and ROM */
3354void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3355 const uint8_t *buf, int len)
3356{
3357 int l;
3358 uint8_t *ptr;
3359 target_phys_addr_t page;
3360 unsigned long pd;
3361 PhysPageDesc *p;
3362
3363 while (len > 0) {
3364 page = addr & TARGET_PAGE_MASK;
3365 l = (page + TARGET_PAGE_SIZE) - addr;
3366 if (l > len)
3367 l = len;
3368 p = phys_page_find(page >> TARGET_PAGE_BITS);
3369 if (!p) {
3370 pd = IO_MEM_UNASSIGNED;
3371 } else {
3372 pd = p->phys_offset;
3373 }
3374
3375 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3376 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3377 !(pd & IO_MEM_ROMD)) {
3378 /* do nothing */
3379 } else {
3380 unsigned long addr1;
3381 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3382 /* ROM/RAM case */
3383 ptr = phys_ram_base + addr1;
3384 memcpy(ptr, buf, l);
3385 }
3386 len -= l;
3387 buf += l;
3388 addr += l;
3389 }
3390}
3391#endif /* !VBOX */
3392
3393
3394/* warning: addr must be aligned */
3395uint32_t ldl_phys(target_phys_addr_t addr)
3396{
3397 int io_index;
3398 uint8_t *ptr;
3399 uint32_t val;
3400 unsigned long pd;
3401 PhysPageDesc *p;
3402
3403 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3404 if (!p) {
3405 pd = IO_MEM_UNASSIGNED;
3406 } else {
3407 pd = p->phys_offset;
3408 }
3409
3410 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3411 !(pd & IO_MEM_ROMD)) {
3412 /* I/O case */
3413 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3414 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3415 } else {
3416 /* RAM case */
3417#ifndef VBOX
3418 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3419 (addr & ~TARGET_PAGE_MASK);
3420 val = ldl_p(ptr);
3421#else
3422 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3423#endif
3424 }
3425 return val;
3426}
3427
3428/* warning: addr must be aligned */
3429uint64_t ldq_phys(target_phys_addr_t addr)
3430{
3431 int io_index;
3432 uint8_t *ptr;
3433 uint64_t val;
3434 unsigned long pd;
3435 PhysPageDesc *p;
3436
3437 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3438 if (!p) {
3439 pd = IO_MEM_UNASSIGNED;
3440 } else {
3441 pd = p->phys_offset;
3442 }
3443
3444 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3445 !(pd & IO_MEM_ROMD)) {
3446 /* I/O case */
3447 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3448#ifdef TARGET_WORDS_BIGENDIAN
3449 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3450 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3451#else
3452 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3453 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3454#endif
3455 } else {
3456 /* RAM case */
3457#ifndef VBOX
3458 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3459 (addr & ~TARGET_PAGE_MASK);
3460 val = ldq_p(ptr);
3461#else
3462 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3463#endif
3464 }
3465 return val;
3466}
3467
3468/* XXX: optimize */
3469uint32_t ldub_phys(target_phys_addr_t addr)
3470{
3471 uint8_t val;
3472 cpu_physical_memory_read(addr, &val, 1);
3473 return val;
3474}
3475
3476/* XXX: optimize */
3477uint32_t lduw_phys(target_phys_addr_t addr)
3478{
3479 uint16_t val;
3480 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3481 return tswap16(val);
3482}
3483
3484/* warning: addr must be aligned. The ram page is not masked as dirty
3485 and the code inside is not invalidated. It is useful if the dirty
3486 bits are used to track modified PTEs */
3487void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3488{
3489 int io_index;
3490 uint8_t *ptr;
3491 unsigned long pd;
3492 PhysPageDesc *p;
3493
3494 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3495 if (!p) {
3496 pd = IO_MEM_UNASSIGNED;
3497 } else {
3498 pd = p->phys_offset;
3499 }
3500
3501 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3502 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3503 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3504 } else {
3505#ifndef VBOX
3506 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3507 ptr = phys_ram_base + addr1;
3508 stl_p(ptr, val);
3509#else
3510 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3511#endif
3512#ifndef VBOX
3513 if (unlikely(in_migration)) {
3514 if (!cpu_physical_memory_is_dirty(addr1)) {
3515 /* invalidate code */
3516 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3517 /* set dirty bit */
3518 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3519 (0xff & ~CODE_DIRTY_FLAG);
3520 }
3521 }
3522#endif /* !VBOX */
3523 }
3524}
3525
3526void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3527{
3528 int io_index;
3529 uint8_t *ptr;
3530 unsigned long pd;
3531 PhysPageDesc *p;
3532
3533 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3534 if (!p) {
3535 pd = IO_MEM_UNASSIGNED;
3536 } else {
3537 pd = p->phys_offset;
3538 }
3539
3540 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3541 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3542#ifdef TARGET_WORDS_BIGENDIAN
3543 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3544 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3545#else
3546 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3547 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3548#endif
3549 } else {
3550#ifndef VBOX
3551 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3552 (addr & ~TARGET_PAGE_MASK);
3553 stq_p(ptr, val);
3554#else
3555 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3556#endif
3557 }
3558}
3559
3560/* warning: addr must be aligned */
3561void stl_phys(target_phys_addr_t addr, uint32_t val)
3562{
3563 int io_index;
3564 uint8_t *ptr;
3565 unsigned long pd;
3566 PhysPageDesc *p;
3567
3568 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3569 if (!p) {
3570 pd = IO_MEM_UNASSIGNED;
3571 } else {
3572 pd = p->phys_offset;
3573 }
3574
3575 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3576 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3577 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3578 } else {
3579 unsigned long addr1;
3580 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3581 /* RAM case */
3582#ifndef VBOX
3583 ptr = phys_ram_base + addr1;
3584 stl_p(ptr, val);
3585#else
3586 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3587#endif
3588 if (!cpu_physical_memory_is_dirty(addr1)) {
3589 /* invalidate code */
3590 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3591 /* set dirty bit */
3592#ifdef VBOX
3593 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3594#endif
3595 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3596 (0xff & ~CODE_DIRTY_FLAG);
3597 }
3598 }
3599}
3600
3601/* XXX: optimize */
3602void stb_phys(target_phys_addr_t addr, uint32_t val)
3603{
3604 uint8_t v = val;
3605 cpu_physical_memory_write(addr, &v, 1);
3606}
3607
3608/* XXX: optimize */
3609void stw_phys(target_phys_addr_t addr, uint32_t val)
3610{
3611 uint16_t v = tswap16(val);
3612 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3613}
3614
3615/* XXX: optimize */
3616void stq_phys(target_phys_addr_t addr, uint64_t val)
3617{
3618 val = tswap64(val);
3619 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3620}
3621
3622#endif
3623
3624/* virtual memory access for debug */
3625int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3626 uint8_t *buf, int len, int is_write)
3627{
3628 int l;
3629 target_phys_addr_t phys_addr;
3630 target_ulong page;
3631
3632 while (len > 0) {
3633 page = addr & TARGET_PAGE_MASK;
3634 phys_addr = cpu_get_phys_page_debug(env, page);
3635 /* if no physical page mapped, return an error */
3636 if (phys_addr == -1)
3637 return -1;
3638 l = (page + TARGET_PAGE_SIZE) - addr;
3639 if (l > len)
3640 l = len;
3641 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3642 buf, l, is_write);
3643 len -= l;
3644 buf += l;
3645 addr += l;
3646 }
3647 return 0;
3648}
3649
3650/* in deterministic execution mode, instructions doing device I/Os
3651 must be at the end of the TB */
3652void cpu_io_recompile(CPUState *env, void *retaddr)
3653{
3654 TranslationBlock *tb;
3655 uint32_t n, cflags;
3656 target_ulong pc, cs_base;
3657 uint64_t flags;
3658
3659 tb = tb_find_pc((unsigned long)retaddr);
3660 if (!tb) {
3661 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3662 retaddr);
3663 }
3664 n = env->icount_decr.u16.low + tb->icount;
3665 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3666 /* Calculate how many instructions had been executed before the fault
3667 occurred. */
3668 n = n - env->icount_decr.u16.low;
3669 /* Generate a new TB ending on the I/O insn. */
3670 n++;
3671 /* On MIPS and SH, delay slot instructions can only be restarted if
3672 they were already the first instruction in the TB. If this is not
3673 the first instruction in a TB then re-execute the preceding
3674 branch. */
3675#if defined(TARGET_MIPS)
3676 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3677 env->active_tc.PC -= 4;
3678 env->icount_decr.u16.low++;
3679 env->hflags &= ~MIPS_HFLAG_BMASK;
3680 }
3681#elif defined(TARGET_SH4)
3682 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3683 && n > 1) {
3684 env->pc -= 2;
3685 env->icount_decr.u16.low++;
3686 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3687 }
3688#endif
3689 /* This should never happen. */
3690 if (n > CF_COUNT_MASK)
3691 cpu_abort(env, "TB too big during recompile");
3692
3693 cflags = n | CF_LAST_IO;
3694 pc = tb->pc;
3695 cs_base = tb->cs_base;
3696 flags = tb->flags;
3697 tb_phys_invalidate(tb, -1);
3698 /* FIXME: In theory this could raise an exception. In practice
3699 we have already translated the block once so it's probably ok. */
3700 tb_gen_code(env, pc, cs_base, flags, cflags);
3701 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3702 the first in the TB) then we end up generating a whole new TB and
3703 repeating the fault, which is horribly inefficient.
3704 Better would be to execute just this insn uncached, or generate a
3705 second new TB. */
3706 cpu_resume_from_signal(env, NULL);
3707}
3708
3709#ifndef VBOX
3710void dump_exec_info(FILE *f,
3711 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3712{
3713 int i, target_code_size, max_target_code_size;
3714 int direct_jmp_count, direct_jmp2_count, cross_page;
3715 TranslationBlock *tb;
3716
3717 target_code_size = 0;
3718 max_target_code_size = 0;
3719 cross_page = 0;
3720 direct_jmp_count = 0;
3721 direct_jmp2_count = 0;
3722 for(i = 0; i < nb_tbs; i++) {
3723 tb = &tbs[i];
3724 target_code_size += tb->size;
3725 if (tb->size > max_target_code_size)
3726 max_target_code_size = tb->size;
3727 if (tb->page_addr[1] != -1)
3728 cross_page++;
3729 if (tb->tb_next_offset[0] != 0xffff) {
3730 direct_jmp_count++;
3731 if (tb->tb_next_offset[1] != 0xffff) {
3732 direct_jmp2_count++;
3733 }
3734 }
3735 }
3736 /* XXX: avoid using doubles ? */
3737 cpu_fprintf(f, "Translation buffer state:\n");
3738 cpu_fprintf(f, "gen code size %ld/%ld\n",
3739 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3740 cpu_fprintf(f, "TB count %d/%d\n",
3741 nb_tbs, code_gen_max_blocks);
3742 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3743 nb_tbs ? target_code_size / nb_tbs : 0,
3744 max_target_code_size);
3745 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3746 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3747 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3748 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3749 cross_page,
3750 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3751 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3752 direct_jmp_count,
3753 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3754 direct_jmp2_count,
3755 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3756 cpu_fprintf(f, "\nStatistics:\n");
3757 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3758 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3759 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3760 tcg_dump_info(f, cpu_fprintf);
3761}
3762#endif /* !VBOX */
3763
3764#if !defined(CONFIG_USER_ONLY)
3765
3766#define MMUSUFFIX _cmmu
3767#define GETPC() NULL
3768#define env cpu_single_env
3769#define SOFTMMU_CODE_ACCESS
3770
3771#define SHIFT 0
3772#include "softmmu_template.h"
3773
3774#define SHIFT 1
3775#include "softmmu_template.h"
3776
3777#define SHIFT 2
3778#include "softmmu_template.h"
3779
3780#define SHIFT 3
3781#include "softmmu_template.h"
3782
3783#undef env
3784
3785#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette