VirtualBox

source: vbox/trunk/src/recompiler/exec.c@ 36170

最後變更 在這個檔案從36170是 36170,由 vboxsync 提交於 14 年 前

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • 屬性 svn:eol-style 設為 native
檔案大小: 121.9 KB
 
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#include "config.h"
31#ifndef VBOX
32#ifdef _WIN32
33#define WIN32_LEAN_AND_MEAN
34#include <windows.h>
35#else
36#include <sys/types.h>
37#include <sys/mman.h>
38#endif
39#include <stdlib.h>
40#include <stdio.h>
41#include <stdarg.h>
42#include <string.h>
43#include <errno.h>
44#include <unistd.h>
45#include <inttypes.h>
46#else /* VBOX */
47# include <stdlib.h>
48# include <stdio.h>
49# include <iprt/alloc.h>
50# include <iprt/string.h>
51# include <iprt/param.h>
52# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
53#endif /* VBOX */
54
55#include "cpu.h"
56#include "exec-all.h"
57#include "qemu-common.h"
58#include "tcg.h"
59#ifndef VBOX
60#include "hw/hw.h"
61#endif
62#include "osdep.h"
63#include "kvm.h"
64#if defined(CONFIG_USER_ONLY)
65#include <qemu.h>
66#endif
67
68//#define DEBUG_TB_INVALIDATE
69//#define DEBUG_FLUSH
70//#define DEBUG_TLB
71//#define DEBUG_UNASSIGNED
72
73/* make various TB consistency checks */
74//#define DEBUG_TB_CHECK
75//#define DEBUG_TLB_CHECK
76
77//#define DEBUG_IOPORT
78//#define DEBUG_SUBPAGE
79
80#if !defined(CONFIG_USER_ONLY)
81/* TB consistency checks only implemented for usermode emulation. */
82#undef DEBUG_TB_CHECK
83#endif
84
85#define SMC_BITMAP_USE_THRESHOLD 10
86
87#define MMAP_AREA_START 0x00000000
88#define MMAP_AREA_END 0xa8000000
89
90#if defined(TARGET_SPARC64)
91#define TARGET_PHYS_ADDR_SPACE_BITS 41
92#elif defined(TARGET_SPARC)
93#define TARGET_PHYS_ADDR_SPACE_BITS 36
94#elif defined(TARGET_ALPHA)
95#define TARGET_PHYS_ADDR_SPACE_BITS 42
96#define TARGET_VIRT_ADDR_SPACE_BITS 42
97#elif defined(TARGET_PPC64)
98#define TARGET_PHYS_ADDR_SPACE_BITS 42
99#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
100#define TARGET_PHYS_ADDR_SPACE_BITS 42
101#elif defined(TARGET_I386) && !defined(USE_KQEMU)
102#define TARGET_PHYS_ADDR_SPACE_BITS 36
103#else
104/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
105#define TARGET_PHYS_ADDR_SPACE_BITS 32
106#endif
107
108static TranslationBlock *tbs;
109int code_gen_max_blocks;
110TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
111static int nb_tbs;
112/* any access to the tbs or the page table must use this lock */
113spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
114
115#ifndef VBOX
116#if defined(__arm__) || defined(__sparc_v9__)
117/* The prologue must be reachable with a direct jump. ARM and Sparc64
118 have limited branch ranges (possibly also PPC) so place it in a
119 section close to code segment. */
120#define code_gen_section \
121 __attribute__((__section__(".gen_code"))) \
122 __attribute__((aligned (32)))
123#else
124#define code_gen_section \
125 __attribute__((aligned (32)))
126#endif
127
128uint8_t code_gen_prologue[1024] code_gen_section;
129#else /* VBOX */
130extern uint8_t* code_gen_prologue;
131#endif /* VBOX */
132static uint8_t *code_gen_buffer;
133static unsigned long code_gen_buffer_size;
134/* threshold to flush the translated code buffer */
135static unsigned long code_gen_buffer_max_size;
136uint8_t *code_gen_ptr;
137
138#ifndef VBOX
139#if !defined(CONFIG_USER_ONLY)
140ram_addr_t phys_ram_size;
141int phys_ram_fd;
142uint8_t *phys_ram_base;
143uint8_t *phys_ram_dirty;
144static int in_migration;
145static ram_addr_t phys_ram_alloc_offset = 0;
146#endif
147#else /* VBOX */
148RTGCPHYS phys_ram_size;
149/* we have memory ranges (the high PC-BIOS mapping) which
150 causes some pages to fall outside the dirty map here. */
151RTGCPHYS phys_ram_dirty_size;
152uint8_t *phys_ram_dirty;
153#endif /* VBOX */
154
155CPUState *first_cpu;
156/* current CPU in the current thread. It is only valid inside
157 cpu_exec() */
158CPUState *cpu_single_env;
159/* 0 = Do not count executed instructions.
160 1 = Precise instruction counting.
161 2 = Adaptive rate instruction counting. */
162int use_icount = 0;
163/* Current instruction counter. While executing translated code this may
164 include some instructions that have not yet been executed. */
165int64_t qemu_icount;
166
167typedef struct PageDesc {
168 /* list of TBs intersecting this ram page */
169 TranslationBlock *first_tb;
170 /* in order to optimize self modifying code, we count the number
171 of lookups we do to a given page to use a bitmap */
172 unsigned int code_write_count;
173 uint8_t *code_bitmap;
174#if defined(CONFIG_USER_ONLY)
175 unsigned long flags;
176#endif
177} PageDesc;
178
179typedef struct PhysPageDesc {
180 /* offset in host memory of the page + io_index in the low bits */
181 ram_addr_t phys_offset;
182 ram_addr_t region_offset;
183} PhysPageDesc;
184
185#define L2_BITS 10
186#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
187/* XXX: this is a temporary hack for alpha target.
188 * In the future, this is to be replaced by a multi-level table
189 * to actually be able to handle the complete 64 bits address space.
190 */
191#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
192#else
193#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
194#endif
195#ifdef VBOX
196#define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32)
197#endif
198
199#ifdef VBOX
200#define L0_SIZE (1 << L0_BITS)
201#endif
202#define L1_SIZE (1 << L1_BITS)
203#define L2_SIZE (1 << L2_BITS)
204
205unsigned long qemu_real_host_page_size;
206unsigned long qemu_host_page_bits;
207unsigned long qemu_host_page_size;
208unsigned long qemu_host_page_mask;
209
210/* XXX: for system emulation, it could just be an array */
211#ifndef VBOX
212static PageDesc *l1_map[L1_SIZE];
213static PhysPageDesc **l1_phys_map;
214#else
215static unsigned l0_map_max_used = 0;
216static PageDesc **l0_map[L0_SIZE];
217static void **l0_phys_map[L0_SIZE];
218#endif
219
220#if !defined(CONFIG_USER_ONLY)
221static void io_mem_init(void);
222
223/* io memory support */
224CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
225CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
226void *io_mem_opaque[IO_MEM_NB_ENTRIES];
227char io_mem_used[IO_MEM_NB_ENTRIES];
228static int io_mem_watch;
229#endif
230
231#ifndef VBOX
232/* log support */
233static const char *logfilename = "/tmp/qemu.log";
234#endif /* !VBOX */
235FILE *logfile;
236int loglevel;
237#ifndef VBOX
238static int log_append = 0;
239#endif
240
241/* statistics */
242#ifndef VBOX
243static int tlb_flush_count;
244static int tb_flush_count;
245static int tb_phys_invalidate_count;
246#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
247uint32_t tlb_flush_count;
248uint32_t tb_flush_count;
249uint32_t tb_phys_invalidate_count;
250#endif /* VBOX */
251
252#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
253typedef struct subpage_t {
254 target_phys_addr_t base;
255 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
256 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
257 void *opaque[TARGET_PAGE_SIZE][2][4];
258 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
259} subpage_t;
260
261#ifndef VBOX
262#ifdef _WIN32
263static void map_exec(void *addr, long size)
264{
265 DWORD old_protect;
266 VirtualProtect(addr, size,
267 PAGE_EXECUTE_READWRITE, &old_protect);
268
269}
270#else
271static void map_exec(void *addr, long size)
272{
273 unsigned long start, end, page_size;
274
275 page_size = getpagesize();
276 start = (unsigned long)addr;
277 start &= ~(page_size - 1);
278
279 end = (unsigned long)addr + size;
280 end += page_size - 1;
281 end &= ~(page_size - 1);
282
283 mprotect((void *)start, end - start,
284 PROT_READ | PROT_WRITE | PROT_EXEC);
285}
286#endif
287#else /* VBOX */
288static void map_exec(void *addr, long size)
289{
290 RTMemProtect(addr, size,
291 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
292}
293#endif /* VBOX */
294
295static void page_init(void)
296{
297 /* NOTE: we can always suppose that qemu_host_page_size >=
298 TARGET_PAGE_SIZE */
299#ifdef VBOX
300 RTMemProtect(code_gen_buffer, sizeof(code_gen_buffer),
301 RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE);
302 qemu_real_host_page_size = PAGE_SIZE;
303#else /* !VBOX */
304#ifdef _WIN32
305 {
306 SYSTEM_INFO system_info;
307
308 GetSystemInfo(&system_info);
309 qemu_real_host_page_size = system_info.dwPageSize;
310 }
311#else
312 qemu_real_host_page_size = getpagesize();
313#endif
314#endif /* !VBOX */
315 if (qemu_host_page_size == 0)
316 qemu_host_page_size = qemu_real_host_page_size;
317 if (qemu_host_page_size < TARGET_PAGE_SIZE)
318 qemu_host_page_size = TARGET_PAGE_SIZE;
319 qemu_host_page_bits = 0;
320#ifndef VBOX
321 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
322#else
323 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size)
324#endif
325 qemu_host_page_bits++;
326 qemu_host_page_mask = ~(qemu_host_page_size - 1);
327#ifndef VBOX
328 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
329 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
330#endif
331
332#ifdef VBOX
333 /* We use other means to set reserved bit on our pages */
334#else /* !VBOX */
335#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
336 {
337 long long startaddr, endaddr;
338 FILE *f;
339 int n;
340
341 mmap_lock();
342 last_brk = (unsigned long)sbrk(0);
343 f = fopen("/proc/self/maps", "r");
344 if (f) {
345 do {
346 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
347 if (n == 2) {
348 startaddr = MIN(startaddr,
349 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
350 endaddr = MIN(endaddr,
351 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
352 page_set_flags(startaddr & TARGET_PAGE_MASK,
353 TARGET_PAGE_ALIGN(endaddr),
354 PAGE_RESERVED);
355 }
356 } while (!feof(f));
357 fclose(f);
358 }
359 mmap_unlock();
360 }
361#endif
362#endif /* !VBOX */
363}
364
365static inline PageDesc **page_l1_map(target_ulong index)
366{
367#ifndef VBOX
368#if TARGET_LONG_BITS > 32
369 /* Host memory outside guest VM. For 32-bit targets we have already
370 excluded high addresses. */
371 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
372 return NULL;
373#endif
374 return &l1_map[index >> L2_BITS];
375#else /* VBOX */
376 PageDesc **l1_map;
377 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE,
378 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n",
379 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE),
380 NULL);
381 l1_map = l0_map[index >> (L1_BITS + L2_BITS)];
382 if (RT_UNLIKELY(!l1_map))
383 {
384 unsigned i0 = index >> (L1_BITS + L2_BITS);
385 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE);
386 if (RT_UNLIKELY(!l1_map))
387 return NULL;
388 if (i0 >= l0_map_max_used)
389 l0_map_max_used = i0 + 1;
390 }
391 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)];
392#endif /* VBOX */
393}
394
395static inline PageDesc *page_find_alloc(target_ulong index)
396{
397 PageDesc **lp, *p;
398 lp = page_l1_map(index);
399 if (!lp)
400 return NULL;
401
402 p = *lp;
403 if (!p) {
404 /* allocate if not found */
405#if defined(CONFIG_USER_ONLY)
406 size_t len = sizeof(PageDesc) * L2_SIZE;
407 /* Don't use qemu_malloc because it may recurse. */
408 p = mmap(0, len, PROT_READ | PROT_WRITE,
409 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
410 *lp = p;
411 if (h2g_valid(p)) {
412 unsigned long addr = h2g(p);
413 page_set_flags(addr & TARGET_PAGE_MASK,
414 TARGET_PAGE_ALIGN(addr + len),
415 PAGE_RESERVED);
416 }
417#else
418 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
419 *lp = p;
420#endif
421 }
422 return p + (index & (L2_SIZE - 1));
423}
424
425static inline PageDesc *page_find(target_ulong index)
426{
427 PageDesc **lp, *p;
428 lp = page_l1_map(index);
429 if (!lp)
430 return NULL;
431
432 p = *lp;
433 if (!p)
434 return 0;
435 return p + (index & (L2_SIZE - 1));
436}
437
438static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
439{
440 void **lp, **p;
441 PhysPageDesc *pd;
442
443#ifndef VBOX
444 p = (void **)l1_phys_map;
445#if TARGET_PHYS_ADDR_SPACE_BITS > 32
446
447#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
448#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
449#endif
450 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
451 p = *lp;
452 if (!p) {
453 /* allocate if not found */
454 if (!alloc)
455 return NULL;
456 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
457 memset(p, 0, sizeof(void *) * L1_SIZE);
458 *lp = p;
459 }
460#endif
461#else /* VBOX */
462 /* level 0 lookup and lazy allocation of level 1 map. */
463 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE))
464 return NULL;
465 p = l0_phys_map[index >> (L1_BITS + L2_BITS)];
466 if (RT_UNLIKELY(!p)) {
467 if (!alloc)
468 return NULL;
469 p = qemu_vmalloc(sizeof(void **) * L1_SIZE);
470 memset(p, 0, sizeof(void **) * L1_SIZE);
471 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p;
472 }
473
474 /* level 1 lookup and lazy allocation of level 2 map. */
475#endif /* VBOX */
476 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
477 pd = *lp;
478 if (!pd) {
479 int i;
480 /* allocate if not found */
481 if (!alloc)
482 return NULL;
483 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
484 *lp = pd;
485 for (i = 0; i < L2_SIZE; i++) {
486 pd[i].phys_offset = IO_MEM_UNASSIGNED;
487 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
488 }
489 }
490 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
491}
492
493static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
494{
495 return phys_page_find_alloc(index, 0);
496}
497
498#if !defined(CONFIG_USER_ONLY)
499static void tlb_protect_code(ram_addr_t ram_addr);
500static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
501 target_ulong vaddr);
502#define mmap_lock() do { } while(0)
503#define mmap_unlock() do { } while(0)
504#endif
505
506#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
507 most of the code in raw or hwacc mode. */
508#define DEFAULT_CODE_GEN_BUFFER_SIZE (8 * 1024 * 1024)
509#else /* !VBOX */
510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511#endif /* !VBOX */
512
513#if defined(CONFIG_USER_ONLY)
514/* Currently it is not recommanded to allocate big chunks of data in
515 user mode. It will change when a dedicated libc will be used */
516#define USE_STATIC_CODE_GEN_BUFFER
517#endif
518
519/* VBox allocates codegen buffer dynamically */
520#ifndef VBOX
521#ifdef USE_STATIC_CODE_GEN_BUFFER
522static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
523#endif
524#endif
525
526static void code_gen_alloc(unsigned long tb_size)
527{
528#ifdef USE_STATIC_CODE_GEN_BUFFER
529 code_gen_buffer = static_code_gen_buffer;
530 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
531 map_exec(code_gen_buffer, code_gen_buffer_size);
532#else
533#ifdef VBOX
534 /* We cannot use phys_ram_size here, as it's 0 now,
535 * it only gets initialized once RAM registration callback
536 * (REMR3NotifyPhysRamRegister()) called.
537 */
538 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
539#else
540 code_gen_buffer_size = tb_size;
541 if (code_gen_buffer_size == 0) {
542#if defined(CONFIG_USER_ONLY)
543 /* in user mode, phys_ram_size is not meaningful */
544 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
545#else
546 /* XXX: needs ajustments */
547 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
548#endif
549 }
550 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
551 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
552#endif /* VBOX */
553 /* The code gen buffer location may have constraints depending on
554 the host cpu and OS */
555#ifdef VBOX
556 code_gen_buffer = RTMemExecAlloc(code_gen_buffer_size);
557
558 if (!code_gen_buffer) {
559 LogRel(("REM: failed allocate codegen buffer %lld\n",
560 code_gen_buffer_size));
561 return;
562 }
563#else /* !VBOX */
564#if defined(__linux__)
565 {
566 int flags;
567 void *start = NULL;
568
569 flags = MAP_PRIVATE | MAP_ANONYMOUS;
570#if defined(__x86_64__)
571 flags |= MAP_32BIT;
572 /* Cannot map more than that */
573 if (code_gen_buffer_size > (800 * 1024 * 1024))
574 code_gen_buffer_size = (800 * 1024 * 1024);
575#elif defined(__sparc_v9__)
576 // Map the buffer below 2G, so we can use direct calls and branches
577 flags |= MAP_FIXED;
578 start = (void *) 0x60000000UL;
579 if (code_gen_buffer_size > (512 * 1024 * 1024))
580 code_gen_buffer_size = (512 * 1024 * 1024);
581#elif defined(__arm__)
582 /* Map the buffer below 32M, so we can use direct calls and branches */
583 flags |= MAP_FIXED;
584 start = (void *) 0x01000000UL;
585 if (code_gen_buffer_size > 16 * 1024 * 1024)
586 code_gen_buffer_size = 16 * 1024 * 1024;
587#endif
588 code_gen_buffer = mmap(start, code_gen_buffer_size,
589 PROT_WRITE | PROT_READ | PROT_EXEC,
590 flags, -1, 0);
591 if (code_gen_buffer == MAP_FAILED) {
592 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
593 exit(1);
594 }
595 }
596#elif defined(__FreeBSD__)
597 {
598 int flags;
599 void *addr = NULL;
600 flags = MAP_PRIVATE | MAP_ANONYMOUS;
601#if defined(__x86_64__)
602 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
603 * 0x40000000 is free */
604 flags |= MAP_FIXED;
605 addr = (void *)0x40000000;
606 /* Cannot map more than that */
607 if (code_gen_buffer_size > (800 * 1024 * 1024))
608 code_gen_buffer_size = (800 * 1024 * 1024);
609#endif
610 code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 PROT_WRITE | PROT_READ | PROT_EXEC,
612 flags, -1, 0);
613 if (code_gen_buffer == MAP_FAILED) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 }
618#else
619 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
620 map_exec(code_gen_buffer, code_gen_buffer_size);
621#endif
622#endif /* !VBOX */
623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
624#ifndef VBOX
625 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
626#else
627 map_exec(code_gen_prologue, _1K);
628#endif
629 code_gen_buffer_max_size = code_gen_buffer_size -
630 code_gen_max_block_size();
631 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
632 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
633}
634
635/* Must be called before using the QEMU cpus. 'tb_size' is the size
636 (in bytes) allocated to the translation buffer. Zero means default
637 size. */
638void cpu_exec_init_all(unsigned long tb_size)
639{
640 cpu_gen_init();
641 code_gen_alloc(tb_size);
642 code_gen_ptr = code_gen_buffer;
643 page_init();
644#if !defined(CONFIG_USER_ONLY)
645 io_mem_init();
646#endif
647}
648
649#ifndef VBOX
650#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
651
652#define CPU_COMMON_SAVE_VERSION 1
653
654static void cpu_common_save(QEMUFile *f, void *opaque)
655{
656 CPUState *env = opaque;
657
658 qemu_put_be32s(f, &env->halted);
659 qemu_put_be32s(f, &env->interrupt_request);
660}
661
662static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
663{
664 CPUState *env = opaque;
665
666 if (version_id != CPU_COMMON_SAVE_VERSION)
667 return -EINVAL;
668
669 qemu_get_be32s(f, &env->halted);
670 qemu_get_be32s(f, &env->interrupt_request);
671 tlb_flush(env, 1);
672
673 return 0;
674}
675#endif
676#endif /* !VBOX */
677
678void cpu_exec_init(CPUState *env)
679{
680 CPUState **penv;
681 int cpu_index;
682
683 env->next_cpu = NULL;
684 penv = &first_cpu;
685 cpu_index = 0;
686 while (*penv != NULL) {
687 penv = (CPUState **)&(*penv)->next_cpu;
688 cpu_index++;
689 }
690 env->cpu_index = cpu_index;
691 TAILQ_INIT(&env->breakpoints);
692 TAILQ_INIT(&env->watchpoints);
693 *penv = env;
694#ifndef VBOX
695#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
696 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
697 cpu_common_save, cpu_common_load, env);
698 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
699 cpu_save, cpu_load, env);
700#endif
701#endif /* !VBOX */
702}
703
704static inline void invalidate_page_bitmap(PageDesc *p)
705{
706 if (p->code_bitmap) {
707 qemu_free(p->code_bitmap);
708 p->code_bitmap = NULL;
709 }
710 p->code_write_count = 0;
711}
712
713/* set to NULL all the 'first_tb' fields in all PageDescs */
714static void page_flush_tb(void)
715{
716 int i, j;
717 PageDesc *p;
718#ifdef VBOX
719 int k;
720#endif
721
722#ifdef VBOX
723 k = l0_map_max_used;
724 while (k-- > 0) {
725 PageDesc **l1_map = l0_map[k];
726 if (l1_map) {
727#endif
728 for(i = 0; i < L1_SIZE; i++) {
729 p = l1_map[i];
730 if (p) {
731 for(j = 0; j < L2_SIZE; j++) {
732 p->first_tb = NULL;
733 invalidate_page_bitmap(p);
734 p++;
735 }
736 }
737 }
738#ifdef VBOX
739 }
740 }
741#endif
742}
743
744/* flush all the translation blocks */
745/* XXX: tb_flush is currently not thread safe */
746void tb_flush(CPUState *env1)
747{
748 CPUState *env;
749#ifdef VBOX
750 STAM_PROFILE_START(&env1->StatTbFlush, a);
751#endif
752#if defined(DEBUG_FLUSH)
753 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
754 (unsigned long)(code_gen_ptr - code_gen_buffer),
755 nb_tbs, nb_tbs > 0 ?
756 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
757#endif
758 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
759 cpu_abort(env1, "Internal error: code buffer overflow\n");
760
761 nb_tbs = 0;
762
763 for(env = first_cpu; env != NULL; env = env->next_cpu) {
764 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
765 }
766
767 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
768 page_flush_tb();
769
770 code_gen_ptr = code_gen_buffer;
771 /* XXX: flush processor icache at this point if cache flush is
772 expensive */
773 tb_flush_count++;
774#ifdef VBOX
775 STAM_PROFILE_STOP(&env1->StatTbFlush, a);
776#endif
777}
778
779#ifdef DEBUG_TB_CHECK
780
781static void tb_invalidate_check(target_ulong address)
782{
783 TranslationBlock *tb;
784 int i;
785 address &= TARGET_PAGE_MASK;
786 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
787 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
788 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
789 address >= tb->pc + tb->size)) {
790 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
791 address, (long)tb->pc, tb->size);
792 }
793 }
794 }
795}
796
797/* verify that all the pages have correct rights for code */
798static void tb_page_check(void)
799{
800 TranslationBlock *tb;
801 int i, flags1, flags2;
802
803 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
804 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
805 flags1 = page_get_flags(tb->pc);
806 flags2 = page_get_flags(tb->pc + tb->size - 1);
807 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
808 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
809 (long)tb->pc, tb->size, flags1, flags2);
810 }
811 }
812 }
813}
814
815static void tb_jmp_check(TranslationBlock *tb)
816{
817 TranslationBlock *tb1;
818 unsigned int n1;
819
820 /* suppress any remaining jumps to this TB */
821 tb1 = tb->jmp_first;
822 for(;;) {
823 n1 = (long)tb1 & 3;
824 tb1 = (TranslationBlock *)((long)tb1 & ~3);
825 if (n1 == 2)
826 break;
827 tb1 = tb1->jmp_next[n1];
828 }
829 /* check end of list */
830 if (tb1 != tb) {
831 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
832 }
833}
834
835#endif
836
837/* invalidate one TB */
838static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
839 int next_offset)
840{
841 TranslationBlock *tb1;
842 for(;;) {
843 tb1 = *ptb;
844 if (tb1 == tb) {
845 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
846 break;
847 }
848 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
849 }
850}
851
852static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
853{
854 TranslationBlock *tb1;
855 unsigned int n1;
856
857 for(;;) {
858 tb1 = *ptb;
859 n1 = (long)tb1 & 3;
860 tb1 = (TranslationBlock *)((long)tb1 & ~3);
861 if (tb1 == tb) {
862 *ptb = tb1->page_next[n1];
863 break;
864 }
865 ptb = &tb1->page_next[n1];
866 }
867}
868
869static inline void tb_jmp_remove(TranslationBlock *tb, int n)
870{
871 TranslationBlock *tb1, **ptb;
872 unsigned int n1;
873
874 ptb = &tb->jmp_next[n];
875 tb1 = *ptb;
876 if (tb1) {
877 /* find tb(n) in circular list */
878 for(;;) {
879 tb1 = *ptb;
880 n1 = (long)tb1 & 3;
881 tb1 = (TranslationBlock *)((long)tb1 & ~3);
882 if (n1 == n && tb1 == tb)
883 break;
884 if (n1 == 2) {
885 ptb = &tb1->jmp_first;
886 } else {
887 ptb = &tb1->jmp_next[n1];
888 }
889 }
890 /* now we can suppress tb(n) from the list */
891 *ptb = tb->jmp_next[n];
892
893 tb->jmp_next[n] = NULL;
894 }
895}
896
897/* reset the jump entry 'n' of a TB so that it is not chained to
898 another TB */
899static inline void tb_reset_jump(TranslationBlock *tb, int n)
900{
901 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
902}
903
904void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
905{
906 CPUState *env;
907 PageDesc *p;
908 unsigned int h, n1;
909 target_phys_addr_t phys_pc;
910 TranslationBlock *tb1, *tb2;
911
912 /* remove the TB from the hash list */
913 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
914 h = tb_phys_hash_func(phys_pc);
915 tb_remove(&tb_phys_hash[h], tb,
916 offsetof(TranslationBlock, phys_hash_next));
917
918 /* remove the TB from the page list */
919 if (tb->page_addr[0] != page_addr) {
920 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
921 tb_page_remove(&p->first_tb, tb);
922 invalidate_page_bitmap(p);
923 }
924 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
925 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
926 tb_page_remove(&p->first_tb, tb);
927 invalidate_page_bitmap(p);
928 }
929
930 tb_invalidated_flag = 1;
931
932 /* remove the TB from the hash list */
933 h = tb_jmp_cache_hash_func(tb->pc);
934 for(env = first_cpu; env != NULL; env = env->next_cpu) {
935 if (env->tb_jmp_cache[h] == tb)
936 env->tb_jmp_cache[h] = NULL;
937 }
938
939 /* suppress this TB from the two jump lists */
940 tb_jmp_remove(tb, 0);
941 tb_jmp_remove(tb, 1);
942
943 /* suppress any remaining jumps to this TB */
944 tb1 = tb->jmp_first;
945 for(;;) {
946 n1 = (long)tb1 & 3;
947 if (n1 == 2)
948 break;
949 tb1 = (TranslationBlock *)((long)tb1 & ~3);
950 tb2 = tb1->jmp_next[n1];
951 tb_reset_jump(tb1, n1);
952 tb1->jmp_next[n1] = NULL;
953 tb1 = tb2;
954 }
955 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
956
957 tb_phys_invalidate_count++;
958}
959
960
961#ifdef VBOX
962
963void tb_invalidate_virt(CPUState *env, uint32_t eip)
964{
965# if 1
966 tb_flush(env);
967# else
968 uint8_t *cs_base, *pc;
969 unsigned int flags, h, phys_pc;
970 TranslationBlock *tb, **ptb;
971
972 flags = env->hflags;
973 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
974 cs_base = env->segs[R_CS].base;
975 pc = cs_base + eip;
976
977 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
978 flags);
979
980 if(tb)
981 {
982# ifdef DEBUG
983 printf("invalidating TB (%08X) at %08X\n", tb, eip);
984# endif
985 tb_invalidate(tb);
986 //Note: this will leak TBs, but the whole cache will be flushed
987 // when it happens too often
988 tb->pc = 0;
989 tb->cs_base = 0;
990 tb->flags = 0;
991 }
992# endif
993}
994
995# ifdef VBOX_STRICT
996/**
997 * Gets the page offset.
998 */
999unsigned long get_phys_page_offset(target_ulong addr)
1000{
1001 PhysPageDesc *p = phys_page_find(addr >> TARGET_PAGE_BITS);
1002 return p ? p->phys_offset : 0;
1003}
1004# endif /* VBOX_STRICT */
1005
1006#endif /* VBOX */
1007
1008static inline void set_bits(uint8_t *tab, int start, int len)
1009{
1010 int end, mask, end1;
1011
1012 end = start + len;
1013 tab += start >> 3;
1014 mask = 0xff << (start & 7);
1015 if ((start & ~7) == (end & ~7)) {
1016 if (start < end) {
1017 mask &= ~(0xff << (end & 7));
1018 *tab |= mask;
1019 }
1020 } else {
1021 *tab++ |= mask;
1022 start = (start + 8) & ~7;
1023 end1 = end & ~7;
1024 while (start < end1) {
1025 *tab++ = 0xff;
1026 start += 8;
1027 }
1028 if (start < end) {
1029 mask = ~(0xff << (end & 7));
1030 *tab |= mask;
1031 }
1032 }
1033}
1034
1035static void build_page_bitmap(PageDesc *p)
1036{
1037 int n, tb_start, tb_end;
1038 TranslationBlock *tb;
1039
1040 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
1041
1042 tb = p->first_tb;
1043 while (tb != NULL) {
1044 n = (long)tb & 3;
1045 tb = (TranslationBlock *)((long)tb & ~3);
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1047 if (n == 0) {
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1051 tb_end = tb_start + tb->size;
1052 if (tb_end > TARGET_PAGE_SIZE)
1053 tb_end = TARGET_PAGE_SIZE;
1054 } else {
1055 tb_start = 0;
1056 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1057 }
1058 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1059 tb = tb->page_next[n];
1060 }
1061}
1062
1063TranslationBlock *tb_gen_code(CPUState *env,
1064 target_ulong pc, target_ulong cs_base,
1065 int flags, int cflags)
1066{
1067 TranslationBlock *tb;
1068 uint8_t *tc_ptr;
1069 target_ulong phys_pc, phys_page2, virt_page2;
1070 int code_gen_size;
1071
1072 phys_pc = get_phys_addr_code(env, pc);
1073 tb = tb_alloc(pc);
1074 if (!tb) {
1075 /* flush must be done */
1076 tb_flush(env);
1077 /* cannot fail at this point */
1078 tb = tb_alloc(pc);
1079 /* Don't forget to invalidate previous TB info. */
1080 tb_invalidated_flag = 1;
1081 }
1082 tc_ptr = code_gen_ptr;
1083 tb->tc_ptr = tc_ptr;
1084 tb->cs_base = cs_base;
1085 tb->flags = flags;
1086 tb->cflags = cflags;
1087 cpu_gen_code(env, tb, &code_gen_size);
1088 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1089
1090 /* check next page if needed */
1091 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1092 phys_page2 = -1;
1093 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1094 phys_page2 = get_phys_addr_code(env, virt_page2);
1095 }
1096 tb_link_phys(tb, phys_pc, phys_page2);
1097 return tb;
1098}
1099
1100/* invalidate all TBs which intersect with the target physical page
1101 starting in range [start;end[. NOTE: start and end must refer to
1102 the same physical page. 'is_cpu_write_access' should be true if called
1103 from a real cpu write access: the virtual CPU will exit the current
1104 TB if code is modified inside this TB. */
1105void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
1106 int is_cpu_write_access)
1107{
1108 TranslationBlock *tb, *tb_next, *saved_tb;
1109 CPUState *env = cpu_single_env;
1110 target_ulong tb_start, tb_end;
1111 PageDesc *p;
1112 int n;
1113#ifdef TARGET_HAS_PRECISE_SMC
1114 int current_tb_not_found = is_cpu_write_access;
1115 TranslationBlock *current_tb = NULL;
1116 int current_tb_modified = 0;
1117 target_ulong current_pc = 0;
1118 target_ulong current_cs_base = 0;
1119 int current_flags = 0;
1120#endif /* TARGET_HAS_PRECISE_SMC */
1121
1122 p = page_find(start >> TARGET_PAGE_BITS);
1123 if (!p)
1124 return;
1125 if (!p->code_bitmap &&
1126 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1127 is_cpu_write_access) {
1128 /* build code bitmap */
1129 build_page_bitmap(p);
1130 }
1131
1132 /* we remove all the TBs in the range [start, end[ */
1133 /* XXX: see if in some cases it could be faster to invalidate all the code */
1134 tb = p->first_tb;
1135 while (tb != NULL) {
1136 n = (long)tb & 3;
1137 tb = (TranslationBlock *)((long)tb & ~3);
1138 tb_next = tb->page_next[n];
1139 /* NOTE: this is subtle as a TB may span two physical pages */
1140 if (n == 0) {
1141 /* NOTE: tb_end may be after the end of the page, but
1142 it is not a problem */
1143 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1144 tb_end = tb_start + tb->size;
1145 } else {
1146 tb_start = tb->page_addr[1];
1147 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1148 }
1149 if (!(tb_end <= start || tb_start >= end)) {
1150#ifdef TARGET_HAS_PRECISE_SMC
1151 if (current_tb_not_found) {
1152 current_tb_not_found = 0;
1153 current_tb = NULL;
1154 if (env->mem_io_pc) {
1155 /* now we have a real cpu fault */
1156 current_tb = tb_find_pc(env->mem_io_pc);
1157 }
1158 }
1159 if (current_tb == tb &&
1160 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1161 /* If we are modifying the current TB, we must stop
1162 its execution. We could be more precise by checking
1163 that the modification is after the current PC, but it
1164 would require a specialized function to partially
1165 restore the CPU state */
1166
1167 current_tb_modified = 1;
1168 cpu_restore_state(current_tb, env,
1169 env->mem_io_pc, NULL);
1170 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1171 &current_flags);
1172 }
1173#endif /* TARGET_HAS_PRECISE_SMC */
1174 /* we need to do that to handle the case where a signal
1175 occurs while doing tb_phys_invalidate() */
1176 saved_tb = NULL;
1177 if (env) {
1178 saved_tb = env->current_tb;
1179 env->current_tb = NULL;
1180 }
1181 tb_phys_invalidate(tb, -1);
1182 if (env) {
1183 env->current_tb = saved_tb;
1184 if (env->interrupt_request && env->current_tb)
1185 cpu_interrupt(env, env->interrupt_request);
1186 }
1187 }
1188 tb = tb_next;
1189 }
1190#if !defined(CONFIG_USER_ONLY)
1191 /* if no code remaining, no need to continue to use slow writes */
1192 if (!p->first_tb) {
1193 invalidate_page_bitmap(p);
1194 if (is_cpu_write_access) {
1195 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1196 }
1197 }
1198#endif
1199#ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1203 itself */
1204 env->current_tb = NULL;
1205 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1206 cpu_resume_from_signal(env, NULL);
1207 }
1208#endif
1209}
1210
1211/* len must be <= 8 and start must be a multiple of len */
1212static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1213{
1214 PageDesc *p;
1215 int offset, b;
1216#if 0
1217 if (1) {
1218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1219 cpu_single_env->mem_io_vaddr, len,
1220 cpu_single_env->eip,
1221 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1222 }
1223#endif
1224 p = page_find(start >> TARGET_PAGE_BITS);
1225 if (!p)
1226 return;
1227 if (p->code_bitmap) {
1228 offset = start & ~TARGET_PAGE_MASK;
1229 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1230 if (b & ((1 << len) - 1))
1231 goto do_invalidate;
1232 } else {
1233 do_invalidate:
1234 tb_invalidate_phys_page_range(start, start + len, 1);
1235 }
1236}
1237
1238#if !defined(CONFIG_SOFTMMU)
1239static void tb_invalidate_phys_page(target_phys_addr_t addr,
1240 unsigned long pc, void *puc)
1241{
1242 TranslationBlock *tb;
1243 PageDesc *p;
1244 int n;
1245#ifdef TARGET_HAS_PRECISE_SMC
1246 TranslationBlock *current_tb = NULL;
1247 CPUState *env = cpu_single_env;
1248 int current_tb_modified = 0;
1249 target_ulong current_pc = 0;
1250 target_ulong current_cs_base = 0;
1251 int current_flags = 0;
1252#endif
1253
1254 addr &= TARGET_PAGE_MASK;
1255 p = page_find(addr >> TARGET_PAGE_BITS);
1256 if (!p)
1257 return;
1258 tb = p->first_tb;
1259#ifdef TARGET_HAS_PRECISE_SMC
1260 if (tb && pc != 0) {
1261 current_tb = tb_find_pc(pc);
1262 }
1263#endif
1264 while (tb != NULL) {
1265 n = (long)tb & 3;
1266 tb = (TranslationBlock *)((long)tb & ~3);
1267#ifdef TARGET_HAS_PRECISE_SMC
1268 if (current_tb == tb &&
1269 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1270 /* If we are modifying the current TB, we must stop
1271 its execution. We could be more precise by checking
1272 that the modification is after the current PC, but it
1273 would require a specialized function to partially
1274 restore the CPU state */
1275
1276 current_tb_modified = 1;
1277 cpu_restore_state(current_tb, env, pc, puc);
1278 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1279 &current_flags);
1280 }
1281#endif /* TARGET_HAS_PRECISE_SMC */
1282 tb_phys_invalidate(tb, addr);
1283 tb = tb->page_next[n];
1284 }
1285 p->first_tb = NULL;
1286#ifdef TARGET_HAS_PRECISE_SMC
1287 if (current_tb_modified) {
1288 /* we generate a block containing just the instruction
1289 modifying the memory. It will ensure that it cannot modify
1290 itself */
1291 env->current_tb = NULL;
1292 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1293 cpu_resume_from_signal(env, puc);
1294 }
1295#endif
1296}
1297#endif
1298
1299/* add the tb in the target page and protect it if necessary */
1300static inline void tb_alloc_page(TranslationBlock *tb,
1301 unsigned int n, target_ulong page_addr)
1302{
1303 PageDesc *p;
1304 TranslationBlock *last_first_tb;
1305
1306 tb->page_addr[n] = page_addr;
1307 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1308 tb->page_next[n] = p->first_tb;
1309 last_first_tb = p->first_tb;
1310 p->first_tb = (TranslationBlock *)((long)tb | n);
1311 invalidate_page_bitmap(p);
1312
1313#if defined(TARGET_HAS_SMC) || 1
1314
1315#if defined(CONFIG_USER_ONLY)
1316 if (p->flags & PAGE_WRITE) {
1317 target_ulong addr;
1318 PageDesc *p2;
1319 int prot;
1320
1321 /* force the host page as non writable (writes will have a
1322 page fault + mprotect overhead) */
1323 page_addr &= qemu_host_page_mask;
1324 prot = 0;
1325 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1326 addr += TARGET_PAGE_SIZE) {
1327
1328 p2 = page_find (addr >> TARGET_PAGE_BITS);
1329 if (!p2)
1330 continue;
1331 prot |= p2->flags;
1332 p2->flags &= ~PAGE_WRITE;
1333 page_get_flags(addr);
1334 }
1335 mprotect(g2h(page_addr), qemu_host_page_size,
1336 (prot & PAGE_BITS) & ~PAGE_WRITE);
1337#ifdef DEBUG_TB_INVALIDATE
1338 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1339 page_addr);
1340#endif
1341 }
1342#else
1343 /* if some code is already present, then the pages are already
1344 protected. So we handle the case where only the first TB is
1345 allocated in a physical page */
1346 if (!last_first_tb) {
1347 tlb_protect_code(page_addr);
1348 }
1349#endif
1350
1351#endif /* TARGET_HAS_SMC */
1352}
1353
1354/* Allocate a new translation block. Flush the translation buffer if
1355 too many translation blocks or too much generated code. */
1356TranslationBlock *tb_alloc(target_ulong pc)
1357{
1358 TranslationBlock *tb;
1359
1360 if (nb_tbs >= code_gen_max_blocks ||
1361#ifndef VBOX
1362 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1363#else
1364 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size)
1365#endif
1366 return NULL;
1367 tb = &tbs[nb_tbs++];
1368 tb->pc = pc;
1369 tb->cflags = 0;
1370 return tb;
1371}
1372
1373void tb_free(TranslationBlock *tb)
1374{
1375 /* In practice this is mostly used for single use temporary TB
1376 Ignore the hard cases and just back up if this TB happens to
1377 be the last one generated. */
1378 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1379 code_gen_ptr = tb->tc_ptr;
1380 nb_tbs--;
1381 }
1382}
1383
1384/* add a new TB and link it to the physical page tables. phys_page2 is
1385 (-1) to indicate that only one page contains the TB. */
1386void tb_link_phys(TranslationBlock *tb,
1387 target_ulong phys_pc, target_ulong phys_page2)
1388{
1389 unsigned int h;
1390 TranslationBlock **ptb;
1391
1392 /* Grab the mmap lock to stop another thread invalidating this TB
1393 before we are done. */
1394 mmap_lock();
1395 /* add in the physical hash table */
1396 h = tb_phys_hash_func(phys_pc);
1397 ptb = &tb_phys_hash[h];
1398 tb->phys_hash_next = *ptb;
1399 *ptb = tb;
1400
1401 /* add in the page list */
1402 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1403 if (phys_page2 != -1)
1404 tb_alloc_page(tb, 1, phys_page2);
1405 else
1406 tb->page_addr[1] = -1;
1407
1408 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1409 tb->jmp_next[0] = NULL;
1410 tb->jmp_next[1] = NULL;
1411
1412 /* init original jump addresses */
1413 if (tb->tb_next_offset[0] != 0xffff)
1414 tb_reset_jump(tb, 0);
1415 if (tb->tb_next_offset[1] != 0xffff)
1416 tb_reset_jump(tb, 1);
1417
1418#ifdef DEBUG_TB_CHECK
1419 tb_page_check();
1420#endif
1421 mmap_unlock();
1422}
1423
1424/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1425 tb[1].tc_ptr. Return NULL if not found */
1426TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1427{
1428 int m_min, m_max, m;
1429 unsigned long v;
1430 TranslationBlock *tb;
1431
1432 if (nb_tbs <= 0)
1433 return NULL;
1434 if (tc_ptr < (unsigned long)code_gen_buffer ||
1435 tc_ptr >= (unsigned long)code_gen_ptr)
1436 return NULL;
1437 /* binary search (cf Knuth) */
1438 m_min = 0;
1439 m_max = nb_tbs - 1;
1440 while (m_min <= m_max) {
1441 m = (m_min + m_max) >> 1;
1442 tb = &tbs[m];
1443 v = (unsigned long)tb->tc_ptr;
1444 if (v == tc_ptr)
1445 return tb;
1446 else if (tc_ptr < v) {
1447 m_max = m - 1;
1448 } else {
1449 m_min = m + 1;
1450 }
1451 }
1452 return &tbs[m_max];
1453}
1454
1455static void tb_reset_jump_recursive(TranslationBlock *tb);
1456
1457static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1458{
1459 TranslationBlock *tb1, *tb_next, **ptb;
1460 unsigned int n1;
1461
1462 tb1 = tb->jmp_next[n];
1463 if (tb1 != NULL) {
1464 /* find head of list */
1465 for(;;) {
1466 n1 = (long)tb1 & 3;
1467 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1468 if (n1 == 2)
1469 break;
1470 tb1 = tb1->jmp_next[n1];
1471 }
1472 /* we are now sure now that tb jumps to tb1 */
1473 tb_next = tb1;
1474
1475 /* remove tb from the jmp_first list */
1476 ptb = &tb_next->jmp_first;
1477 for(;;) {
1478 tb1 = *ptb;
1479 n1 = (long)tb1 & 3;
1480 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1481 if (n1 == n && tb1 == tb)
1482 break;
1483 ptb = &tb1->jmp_next[n1];
1484 }
1485 *ptb = tb->jmp_next[n];
1486 tb->jmp_next[n] = NULL;
1487
1488 /* suppress the jump to next tb in generated code */
1489 tb_reset_jump(tb, n);
1490
1491 /* suppress jumps in the tb on which we could have jumped */
1492 tb_reset_jump_recursive(tb_next);
1493 }
1494}
1495
1496static void tb_reset_jump_recursive(TranslationBlock *tb)
1497{
1498 tb_reset_jump_recursive2(tb, 0);
1499 tb_reset_jump_recursive2(tb, 1);
1500}
1501
1502#if defined(TARGET_HAS_ICE)
1503static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1504{
1505 target_phys_addr_t addr;
1506 target_ulong pd;
1507 ram_addr_t ram_addr;
1508 PhysPageDesc *p;
1509
1510 addr = cpu_get_phys_page_debug(env, pc);
1511 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1512 if (!p) {
1513 pd = IO_MEM_UNASSIGNED;
1514 } else {
1515 pd = p->phys_offset;
1516 }
1517 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1518 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1519}
1520#endif
1521
1522/* Add a watchpoint. */
1523int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1524 int flags, CPUWatchpoint **watchpoint)
1525{
1526 target_ulong len_mask = ~(len - 1);
1527 CPUWatchpoint *wp;
1528
1529 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1530 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1531 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1532 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1533#ifndef VBOX
1534 return -EINVAL;
1535#else
1536 return VERR_INVALID_PARAMETER;
1537#endif
1538 }
1539 wp = qemu_malloc(sizeof(*wp));
1540
1541 wp->vaddr = addr;
1542 wp->len_mask = len_mask;
1543 wp->flags = flags;
1544
1545 /* keep all GDB-injected watchpoints in front */
1546 if (flags & BP_GDB)
1547 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1548 else
1549 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1550
1551 tlb_flush_page(env, addr);
1552
1553 if (watchpoint)
1554 *watchpoint = wp;
1555 return 0;
1556}
1557
1558/* Remove a specific watchpoint. */
1559int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1560 int flags)
1561{
1562 target_ulong len_mask = ~(len - 1);
1563 CPUWatchpoint *wp;
1564
1565 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1566 if (addr == wp->vaddr && len_mask == wp->len_mask
1567 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1568 cpu_watchpoint_remove_by_ref(env, wp);
1569 return 0;
1570 }
1571 }
1572#ifndef VBOX
1573 return -ENOENT;
1574#else
1575 return VERR_NOT_FOUND;
1576#endif
1577}
1578
1579/* Remove a specific watchpoint by reference. */
1580void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1581{
1582 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1583
1584 tlb_flush_page(env, watchpoint->vaddr);
1585
1586 qemu_free(watchpoint);
1587}
1588
1589/* Remove all matching watchpoints. */
1590void cpu_watchpoint_remove_all(CPUState *env, int mask)
1591{
1592 CPUWatchpoint *wp, *next;
1593
1594 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1595 if (wp->flags & mask)
1596 cpu_watchpoint_remove_by_ref(env, wp);
1597 }
1598}
1599
1600/* Add a breakpoint. */
1601int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1602 CPUBreakpoint **breakpoint)
1603{
1604#if defined(TARGET_HAS_ICE)
1605 CPUBreakpoint *bp;
1606
1607 bp = qemu_malloc(sizeof(*bp));
1608
1609 bp->pc = pc;
1610 bp->flags = flags;
1611
1612 /* keep all GDB-injected breakpoints in front */
1613 if (flags & BP_GDB)
1614 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1615 else
1616 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1617
1618 breakpoint_invalidate(env, pc);
1619
1620 if (breakpoint)
1621 *breakpoint = bp;
1622 return 0;
1623#else
1624 return -ENOSYS;
1625#endif
1626}
1627
1628/* Remove a specific breakpoint. */
1629int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1630{
1631#if defined(TARGET_HAS_ICE)
1632 CPUBreakpoint *bp;
1633
1634 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1635 if (bp->pc == pc && bp->flags == flags) {
1636 cpu_breakpoint_remove_by_ref(env, bp);
1637 return 0;
1638 }
1639 }
1640# ifndef VBOX
1641 return -ENOENT;
1642# else
1643 return VERR_NOT_FOUND;
1644# endif
1645#else
1646 return -ENOSYS;
1647#endif
1648}
1649
1650/* Remove a specific breakpoint by reference. */
1651void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1652{
1653#if defined(TARGET_HAS_ICE)
1654 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1655
1656 breakpoint_invalidate(env, breakpoint->pc);
1657
1658 qemu_free(breakpoint);
1659#endif
1660}
1661
1662/* Remove all matching breakpoints. */
1663void cpu_breakpoint_remove_all(CPUState *env, int mask)
1664{
1665#if defined(TARGET_HAS_ICE)
1666 CPUBreakpoint *bp, *next;
1667
1668 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1669 if (bp->flags & mask)
1670 cpu_breakpoint_remove_by_ref(env, bp);
1671 }
1672#endif
1673}
1674
1675/* enable or disable single step mode. EXCP_DEBUG is returned by the
1676 CPU loop after each instruction */
1677void cpu_single_step(CPUState *env, int enabled)
1678{
1679#if defined(TARGET_HAS_ICE)
1680 if (env->singlestep_enabled != enabled) {
1681 env->singlestep_enabled = enabled;
1682 /* must flush all the translated code to avoid inconsistancies */
1683 /* XXX: only flush what is necessary */
1684 tb_flush(env);
1685 }
1686#endif
1687}
1688
1689#ifndef VBOX
1690/* enable or disable low levels log */
1691void cpu_set_log(int log_flags)
1692{
1693 loglevel = log_flags;
1694 if (loglevel && !logfile) {
1695 logfile = fopen(logfilename, log_append ? "a" : "w");
1696 if (!logfile) {
1697 perror(logfilename);
1698 _exit(1);
1699 }
1700#if !defined(CONFIG_SOFTMMU)
1701 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1702 {
1703 static char logfile_buf[4096];
1704 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1705 }
1706#else
1707 setvbuf(logfile, NULL, _IOLBF, 0);
1708#endif
1709 log_append = 1;
1710 }
1711 if (!loglevel && logfile) {
1712 fclose(logfile);
1713 logfile = NULL;
1714 }
1715}
1716
1717void cpu_set_log_filename(const char *filename)
1718{
1719 logfilename = strdup(filename);
1720 if (logfile) {
1721 fclose(logfile);
1722 logfile = NULL;
1723 }
1724 cpu_set_log(loglevel);
1725}
1726#endif /* !VBOX */
1727
1728/* mask must never be zero, except for A20 change call */
1729void cpu_interrupt(CPUState *env, int mask)
1730{
1731#if !defined(USE_NPTL)
1732 TranslationBlock *tb;
1733 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1734#endif
1735 int old_mask;
1736
1737 old_mask = env->interrupt_request;
1738#ifdef VBOX
1739 VM_ASSERT_EMT(env->pVM);
1740 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, mask);
1741#else /* !VBOX */
1742 /* FIXME: This is probably not threadsafe. A different thread could
1743 be in the middle of a read-modify-write operation. */
1744 env->interrupt_request |= mask;
1745#endif /* !VBOX */
1746#if defined(USE_NPTL)
1747 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1748 problem and hope the cpu will stop of its own accord. For userspace
1749 emulation this often isn't actually as bad as it sounds. Often
1750 signals are used primarily to interrupt blocking syscalls. */
1751#else
1752 if (use_icount) {
1753 env->icount_decr.u16.high = 0xffff;
1754#ifndef CONFIG_USER_ONLY
1755 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1756 an async event happened and we need to process it. */
1757 if (!can_do_io(env)
1758 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1759 cpu_abort(env, "Raised interrupt while not in I/O function");
1760 }
1761#endif
1762 } else {
1763 tb = env->current_tb;
1764 /* if the cpu is currently executing code, we must unlink it and
1765 all the potentially executing TB */
1766 if (tb && !testandset(&interrupt_lock)) {
1767 env->current_tb = NULL;
1768 tb_reset_jump_recursive(tb);
1769 resetlock(&interrupt_lock);
1770 }
1771 }
1772#endif
1773}
1774
1775void cpu_reset_interrupt(CPUState *env, int mask)
1776{
1777#ifdef VBOX
1778 /*
1779 * Note: the current implementation can be executed by another thread without problems; make sure this remains true
1780 * for future changes!
1781 */
1782 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~mask);
1783#else /* !VBOX */
1784 env->interrupt_request &= ~mask;
1785#endif /* !VBOX */
1786}
1787
1788#ifndef VBOX
1789const CPULogItem cpu_log_items[] = {
1790 { CPU_LOG_TB_OUT_ASM, "out_asm",
1791 "show generated host assembly code for each compiled TB" },
1792 { CPU_LOG_TB_IN_ASM, "in_asm",
1793 "show target assembly code for each compiled TB" },
1794 { CPU_LOG_TB_OP, "op",
1795 "show micro ops for each compiled TB" },
1796 { CPU_LOG_TB_OP_OPT, "op_opt",
1797 "show micro ops "
1798#ifdef TARGET_I386
1799 "before eflags optimization and "
1800#endif
1801 "after liveness analysis" },
1802 { CPU_LOG_INT, "int",
1803 "show interrupts/exceptions in short format" },
1804 { CPU_LOG_EXEC, "exec",
1805 "show trace before each executed TB (lots of logs)" },
1806 { CPU_LOG_TB_CPU, "cpu",
1807 "show CPU state before block translation" },
1808#ifdef TARGET_I386
1809 { CPU_LOG_PCALL, "pcall",
1810 "show protected mode far calls/returns/exceptions" },
1811 { CPU_LOG_RESET, "cpu_reset",
1812 "show CPU state before CPU resets" },
1813#endif
1814#ifdef DEBUG_IOPORT
1815 { CPU_LOG_IOPORT, "ioport",
1816 "show all i/o ports accesses" },
1817#endif
1818 { 0, NULL, NULL },
1819};
1820
1821static int cmp1(const char *s1, int n, const char *s2)
1822{
1823 if (strlen(s2) != n)
1824 return 0;
1825 return memcmp(s1, s2, n) == 0;
1826}
1827
1828/* takes a comma separated list of log masks. Return 0 if error. */
1829int cpu_str_to_log_mask(const char *str)
1830{
1831 const CPULogItem *item;
1832 int mask;
1833 const char *p, *p1;
1834
1835 p = str;
1836 mask = 0;
1837 for(;;) {
1838 p1 = strchr(p, ',');
1839 if (!p1)
1840 p1 = p + strlen(p);
1841 if(cmp1(p,p1-p,"all")) {
1842 for(item = cpu_log_items; item->mask != 0; item++) {
1843 mask |= item->mask;
1844 }
1845 } else {
1846 for(item = cpu_log_items; item->mask != 0; item++) {
1847 if (cmp1(p, p1 - p, item->name))
1848 goto found;
1849 }
1850 return 0;
1851 }
1852 found:
1853 mask |= item->mask;
1854 if (*p1 != ',')
1855 break;
1856 p = p1 + 1;
1857 }
1858 return mask;
1859}
1860#endif /* !VBOX */
1861
1862#ifndef VBOX /* VBOX: we have our own routine. */
1863void cpu_abort(CPUState *env, const char *fmt, ...)
1864{
1865 va_list ap;
1866 va_list ap2;
1867
1868 va_start(ap, fmt);
1869 va_copy(ap2, ap);
1870 fprintf(stderr, "qemu: fatal: ");
1871 vfprintf(stderr, fmt, ap);
1872 fprintf(stderr, "\n");
1873#ifdef TARGET_I386
1874 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1875#else
1876 cpu_dump_state(env, stderr, fprintf, 0);
1877#endif
1878 if (qemu_log_enabled()) {
1879 qemu_log("qemu: fatal: ");
1880 qemu_log_vprintf(fmt, ap2);
1881 qemu_log("\n");
1882#ifdef TARGET_I386
1883 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1884#else
1885 log_cpu_state(env, 0);
1886#endif
1887 qemu_log_flush();
1888 qemu_log_close();
1889 }
1890 va_end(ap2);
1891 va_end(ap);
1892 abort();
1893}
1894#endif /* !VBOX */
1895
1896#ifndef VBOX
1897CPUState *cpu_copy(CPUState *env)
1898{
1899 CPUState *new_env = cpu_init(env->cpu_model_str);
1900 CPUState *next_cpu = new_env->next_cpu;
1901 int cpu_index = new_env->cpu_index;
1902#if defined(TARGET_HAS_ICE)
1903 CPUBreakpoint *bp;
1904 CPUWatchpoint *wp;
1905#endif
1906
1907 memcpy(new_env, env, sizeof(CPUState));
1908
1909 /* Preserve chaining and index. */
1910 new_env->next_cpu = next_cpu;
1911 new_env->cpu_index = cpu_index;
1912
1913 /* Clone all break/watchpoints.
1914 Note: Once we support ptrace with hw-debug register access, make sure
1915 BP_CPU break/watchpoints are handled correctly on clone. */
1916 TAILQ_INIT(&env->breakpoints);
1917 TAILQ_INIT(&env->watchpoints);
1918#if defined(TARGET_HAS_ICE)
1919 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1920 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1921 }
1922 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1923 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1924 wp->flags, NULL);
1925 }
1926#endif
1927
1928 return new_env;
1929}
1930#endif /* !VBOX */
1931
1932#if !defined(CONFIG_USER_ONLY)
1933
1934static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1935{
1936 unsigned int i;
1937
1938 /* Discard jump cache entries for any tb which might potentially
1939 overlap the flushed page. */
1940 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1941 memset (&env->tb_jmp_cache[i], 0,
1942 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1943
1944 i = tb_jmp_cache_hash_page(addr);
1945 memset (&env->tb_jmp_cache[i], 0,
1946 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1947
1948#ifdef VBOX
1949 /* inform raw mode about TLB page flush */
1950 remR3FlushPage(env, addr);
1951#endif /* VBOX */
1952}
1953
1954#ifdef VBOX
1955static CPUTLBEntry s_cputlb_empty_entry = {
1956 .addr_read = -1,
1957 .addr_write = -1,
1958 .addr_code = -1,
1959 .addend = -1,
1960};
1961#endif /* VBOX */
1962
1963/* NOTE: if flush_global is true, also flush global entries (not
1964 implemented yet) */
1965void tlb_flush(CPUState *env, int flush_global)
1966{
1967 int i;
1968
1969#if defined(DEBUG_TLB)
1970 printf("tlb_flush:\n");
1971#endif
1972 /* must reset current TB so that interrupts cannot modify the
1973 links while we are modifying them */
1974 env->current_tb = NULL;
1975
1976 for(i = 0; i < CPU_TLB_SIZE; i++) {
1977#ifdef VBOX
1978 int mmu_idx;
1979 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1980 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1981 }
1982#else /* !VBOX */
1983 env->tlb_table[0][i].addr_read = -1;
1984 env->tlb_table[0][i].addr_write = -1;
1985 env->tlb_table[0][i].addr_code = -1;
1986 env->tlb_table[1][i].addr_read = -1;
1987 env->tlb_table[1][i].addr_write = -1;
1988 env->tlb_table[1][i].addr_code = -1;
1989#if (NB_MMU_MODES >= 3)
1990 env->tlb_table[2][i].addr_read = -1;
1991 env->tlb_table[2][i].addr_write = -1;
1992 env->tlb_table[2][i].addr_code = -1;
1993#if (NB_MMU_MODES == 4)
1994 env->tlb_table[3][i].addr_read = -1;
1995 env->tlb_table[3][i].addr_write = -1;
1996 env->tlb_table[3][i].addr_code = -1;
1997#endif
1998#endif
1999#endif /* !VBOX */
2000 }
2001
2002 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
2003
2004#ifdef VBOX
2005 /* inform raw mode about TLB flush */
2006 remR3FlushTLB(env, flush_global);
2007#endif
2008#ifdef USE_KQEMU
2009 if (env->kqemu_enabled) {
2010 kqemu_flush(env, flush_global);
2011 }
2012#endif
2013 tlb_flush_count++;
2014}
2015
2016static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
2017{
2018 if (addr == (tlb_entry->addr_read &
2019 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2020 addr == (tlb_entry->addr_write &
2021 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
2022 addr == (tlb_entry->addr_code &
2023 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
2024 tlb_entry->addr_read = -1;
2025 tlb_entry->addr_write = -1;
2026 tlb_entry->addr_code = -1;
2027 }
2028}
2029
2030void tlb_flush_page(CPUState *env, target_ulong addr)
2031{
2032 int i;
2033
2034#if defined(DEBUG_TLB)
2035 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
2036#endif
2037 /* must reset current TB so that interrupts cannot modify the
2038 links while we are modifying them */
2039 env->current_tb = NULL;
2040
2041 addr &= TARGET_PAGE_MASK;
2042 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2043 tlb_flush_entry(&env->tlb_table[0][i], addr);
2044 tlb_flush_entry(&env->tlb_table[1][i], addr);
2045#if (NB_MMU_MODES >= 3)
2046 tlb_flush_entry(&env->tlb_table[2][i], addr);
2047#if (NB_MMU_MODES == 4)
2048 tlb_flush_entry(&env->tlb_table[3][i], addr);
2049#endif
2050#endif
2051
2052 tlb_flush_jmp_cache(env, addr);
2053
2054#ifdef USE_KQEMU
2055 if (env->kqemu_enabled) {
2056 kqemu_flush_page(env, addr);
2057 }
2058#endif
2059}
2060
2061/* update the TLBs so that writes to code in the virtual page 'addr'
2062 can be detected */
2063static void tlb_protect_code(ram_addr_t ram_addr)
2064{
2065 cpu_physical_memory_reset_dirty(ram_addr,
2066 ram_addr + TARGET_PAGE_SIZE,
2067 CODE_DIRTY_FLAG);
2068#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
2069 /** @todo Retest this? This function has changed... */
2070 remR3ProtectCode(cpu_single_env, ram_addr);
2071#endif
2072}
2073
2074/* update the TLB so that writes in physical page 'phys_addr' are no longer
2075 tested for self modifying code */
2076static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2077 target_ulong vaddr)
2078{
2079#ifdef VBOX
2080 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2081#endif
2082 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
2083}
2084
2085static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2086 unsigned long start, unsigned long length)
2087{
2088 unsigned long addr;
2089
2090#ifdef VBOX
2091 if (start & 3)
2092 return;
2093#endif
2094 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2095 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2096 if ((addr - start) < length) {
2097 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2098 }
2099 }
2100}
2101
2102void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2103 int dirty_flags)
2104{
2105 CPUState *env;
2106 unsigned long length, start1;
2107 int i, mask, len;
2108 uint8_t *p;
2109
2110 start &= TARGET_PAGE_MASK;
2111 end = TARGET_PAGE_ALIGN(end);
2112
2113 length = end - start;
2114 if (length == 0)
2115 return;
2116 len = length >> TARGET_PAGE_BITS;
2117#ifdef USE_KQEMU
2118 /* XXX: should not depend on cpu context */
2119 env = first_cpu;
2120 if (env->kqemu_enabled) {
2121 ram_addr_t addr;
2122 addr = start;
2123 for(i = 0; i < len; i++) {
2124 kqemu_set_notdirty(env, addr);
2125 addr += TARGET_PAGE_SIZE;
2126 }
2127 }
2128#endif
2129 mask = ~dirty_flags;
2130 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2131#ifdef VBOX
2132 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2133#endif
2134 for(i = 0; i < len; i++)
2135 p[i] &= mask;
2136
2137 /* we modify the TLB cache so that the dirty bit will be set again
2138 when accessing the range */
2139#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2140 start1 = start;
2141#elif !defined(VBOX)
2142 start1 = start + (unsigned long)phys_ram_base;
2143#else
2144 start1 = (unsigned long)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
2145#endif
2146 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2147 for(i = 0; i < CPU_TLB_SIZE; i++)
2148 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
2149 for(i = 0; i < CPU_TLB_SIZE; i++)
2150 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
2151#if (NB_MMU_MODES >= 3)
2152 for(i = 0; i < CPU_TLB_SIZE; i++)
2153 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
2154#if (NB_MMU_MODES == 4)
2155 for(i = 0; i < CPU_TLB_SIZE; i++)
2156 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
2157#endif
2158#endif
2159 }
2160}
2161
2162#ifndef VBOX
2163int cpu_physical_memory_set_dirty_tracking(int enable)
2164{
2165 in_migration = enable;
2166 return 0;
2167}
2168
2169int cpu_physical_memory_get_dirty_tracking(void)
2170{
2171 return in_migration;
2172}
2173#endif /* !VBOX */
2174
2175void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
2176{
2177 if (kvm_enabled())
2178 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
2179}
2180
2181#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2182DECLINLINE(void) tlb_update_dirty(CPUTLBEntry *tlb_entry, target_phys_addr_t phys_addend)
2183#else
2184static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2185#endif
2186{
2187 ram_addr_t ram_addr;
2188
2189 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2190 /* RAM case */
2191#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2192 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2193#elif !defined(VBOX)
2194 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
2195 tlb_entry->addend - (unsigned long)phys_ram_base;
2196#else
2197 Assert(phys_addend != -1);
2198 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + phys_addend;
2199#endif
2200 if (!cpu_physical_memory_is_dirty(ram_addr)) {
2201 tlb_entry->addr_write |= TLB_NOTDIRTY;
2202 }
2203 }
2204}
2205
2206/* update the TLB according to the current state of the dirty bits */
2207void cpu_tlb_update_dirty(CPUState *env)
2208{
2209 int i;
2210#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2211 for(i = 0; i < CPU_TLB_SIZE; i++)
2212 tlb_update_dirty(&env->tlb_table[0][i], env->phys_addends[0][i]);
2213 for(i = 0; i < CPU_TLB_SIZE; i++)
2214 tlb_update_dirty(&env->tlb_table[1][i], env->phys_addends[1][i]);
2215# if (NB_MMU_MODES >= 3)
2216 for(i = 0; i < CPU_TLB_SIZE; i++)
2217 tlb_update_dirty(&env->tlb_table[2][i], env->phys_addends[2][i]);
2218# if (NB_MMU_MODES == 4)
2219 for(i = 0; i < CPU_TLB_SIZE; i++)
2220 tlb_update_dirty(&env->tlb_table[3][i], env->phys_addends[3][i]);
2221# endif
2222# endif
2223#else /* VBOX */
2224 for(i = 0; i < CPU_TLB_SIZE; i++)
2225 tlb_update_dirty(&env->tlb_table[0][i]);
2226 for(i = 0; i < CPU_TLB_SIZE; i++)
2227 tlb_update_dirty(&env->tlb_table[1][i]);
2228#if (NB_MMU_MODES >= 3)
2229 for(i = 0; i < CPU_TLB_SIZE; i++)
2230 tlb_update_dirty(&env->tlb_table[2][i]);
2231#if (NB_MMU_MODES == 4)
2232 for(i = 0; i < CPU_TLB_SIZE; i++)
2233 tlb_update_dirty(&env->tlb_table[3][i]);
2234#endif
2235#endif
2236#endif /* VBOX */
2237}
2238
2239static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2240{
2241 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2242 tlb_entry->addr_write = vaddr;
2243}
2244
2245/* update the TLB corresponding to virtual page vaddr
2246 so that it is no longer dirty */
2247static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2248{
2249 int i;
2250
2251 vaddr &= TARGET_PAGE_MASK;
2252 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2253 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2254 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2255#if (NB_MMU_MODES >= 3)
2256 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2257#if (NB_MMU_MODES == 4)
2258 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2259#endif
2260#endif
2261}
2262
2263/* add a new TLB entry. At most one entry for a given virtual address
2264 is permitted. Return 0 if OK or 2 if the page could not be mapped
2265 (can only happen in non SOFTMMU mode for I/O pages or pages
2266 conflicting with the host address space). */
2267int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2268 target_phys_addr_t paddr, int prot,
2269 int mmu_idx, int is_softmmu)
2270{
2271 PhysPageDesc *p;
2272 unsigned long pd;
2273 unsigned int index;
2274 target_ulong address;
2275 target_ulong code_address;
2276 target_phys_addr_t addend;
2277 int ret;
2278 CPUTLBEntry *te;
2279 CPUWatchpoint *wp;
2280 target_phys_addr_t iotlb;
2281#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2282 int read_mods = 0, write_mods = 0, code_mods = 0;
2283#endif
2284
2285 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2286 if (!p) {
2287 pd = IO_MEM_UNASSIGNED;
2288 } else {
2289 pd = p->phys_offset;
2290 }
2291#if defined(DEBUG_TLB)
2292 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2293 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2294#endif
2295
2296 ret = 0;
2297 address = vaddr;
2298 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2299 /* IO memory case (romd handled later) */
2300 address |= TLB_MMIO;
2301 }
2302#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
2303 addend = pd & TARGET_PAGE_MASK;
2304#elif !defined(VBOX)
2305 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2306#else
2307 /** @todo this is racing the phys_page_find call above since it may register
2308 * a new chunk of memory... */
2309 addend = (unsigned long)remR3TlbGCPhys2Ptr(env,
2310 pd & TARGET_PAGE_MASK,
2311 !!(prot & PAGE_WRITE));
2312#endif
2313
2314 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2315 /* Normal RAM. */
2316 iotlb = pd & TARGET_PAGE_MASK;
2317 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2318 iotlb |= IO_MEM_NOTDIRTY;
2319 else
2320 iotlb |= IO_MEM_ROM;
2321 } else {
2322 /* IO handlers are currently passed a phsical address.
2323 It would be nice to pass an offset from the base address
2324 of that region. This would avoid having to special case RAM,
2325 and avoid full address decoding in every device.
2326 We can't use the high bits of pd for this because
2327 IO_MEM_ROMD uses these as a ram address. */
2328 iotlb = (pd & ~TARGET_PAGE_MASK);
2329#ifndef VBOX
2330 if (p) {
2331#else
2332 if ( p->phys_offset
2333 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iMMIOMemType
2334 && (pd & ~TARGET_PAGE_MASK) != env->pVM->rem.s.iHandlerMemType) {
2335#endif
2336 iotlb += p->region_offset;
2337 } else {
2338 iotlb += paddr;
2339 }
2340 }
2341
2342 code_address = address;
2343
2344#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2345 if (addend & 0x3)
2346 {
2347 if (addend & 0x2)
2348 {
2349 /* catch write */
2350 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2351 write_mods |= TLB_MMIO;
2352 }
2353 else if (addend & 0x1)
2354 {
2355 /* catch all */
2356 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
2357 {
2358 read_mods |= TLB_MMIO;
2359 write_mods |= TLB_MMIO;
2360 code_mods |= TLB_MMIO;
2361 }
2362 }
2363 if ((iotlb & ~TARGET_PAGE_MASK) == 0)
2364 iotlb = env->pVM->rem.s.iHandlerMemType + paddr;
2365 addend &= ~(target_ulong)0x3;
2366 }
2367#endif
2368
2369 /* Make accesses to pages with watchpoints go via the
2370 watchpoint trap routines. */
2371 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2372 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2373 iotlb = io_mem_watch + paddr;
2374 /* TODO: The memory case can be optimized by not trapping
2375 reads of pages with a write breakpoint. */
2376 address |= TLB_MMIO;
2377 }
2378 }
2379
2380 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2381 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2382 te = &env->tlb_table[mmu_idx][index];
2383 te->addend = addend - vaddr;
2384 if (prot & PAGE_READ) {
2385 te->addr_read = address;
2386 } else {
2387 te->addr_read = -1;
2388 }
2389
2390 if (prot & PAGE_EXEC) {
2391 te->addr_code = code_address;
2392 } else {
2393 te->addr_code = -1;
2394 }
2395 if (prot & PAGE_WRITE) {
2396 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2397 (pd & IO_MEM_ROMD)) {
2398 /* Write access calls the I/O callback. */
2399 te->addr_write = address | TLB_MMIO;
2400 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2401 !cpu_physical_memory_is_dirty(pd)) {
2402 te->addr_write = address | TLB_NOTDIRTY;
2403 } else {
2404 te->addr_write = address;
2405 }
2406 } else {
2407 te->addr_write = -1;
2408 }
2409
2410#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2411 if (prot & PAGE_READ)
2412 te->addr_read |= read_mods;
2413 if (prot & PAGE_EXEC)
2414 te->addr_code |= code_mods;
2415 if (prot & PAGE_WRITE)
2416 te->addr_write |= write_mods;
2417
2418 env->phys_addends[mmu_idx][index] = (pd & TARGET_PAGE_MASK)- vaddr;
2419#endif
2420
2421#ifdef VBOX
2422 /* inform raw mode about TLB page change */
2423 remR3FlushPage(env, vaddr);
2424#endif
2425 return ret;
2426}
2427
2428#else
2429
2430void tlb_flush(CPUState *env, int flush_global)
2431{
2432}
2433
2434void tlb_flush_page(CPUState *env, target_ulong addr)
2435{
2436}
2437
2438int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2439 target_phys_addr_t paddr, int prot,
2440 int mmu_idx, int is_softmmu)
2441{
2442 return 0;
2443}
2444
2445#ifndef VBOX
2446/* dump memory mappings */
2447void page_dump(FILE *f)
2448{
2449 unsigned long start, end;
2450 int i, j, prot, prot1;
2451 PageDesc *p;
2452
2453 fprintf(f, "%-8s %-8s %-8s %s\n",
2454 "start", "end", "size", "prot");
2455 start = -1;
2456 end = -1;
2457 prot = 0;
2458 for(i = 0; i <= L1_SIZE; i++) {
2459 if (i < L1_SIZE)
2460 p = l1_map[i];
2461 else
2462 p = NULL;
2463 for(j = 0;j < L2_SIZE; j++) {
2464 if (!p)
2465 prot1 = 0;
2466 else
2467 prot1 = p[j].flags;
2468 if (prot1 != prot) {
2469 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2470 if (start != -1) {
2471 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2472 start, end, end - start,
2473 prot & PAGE_READ ? 'r' : '-',
2474 prot & PAGE_WRITE ? 'w' : '-',
2475 prot & PAGE_EXEC ? 'x' : '-');
2476 }
2477 if (prot1 != 0)
2478 start = end;
2479 else
2480 start = -1;
2481 prot = prot1;
2482 }
2483 if (!p)
2484 break;
2485 }
2486 }
2487}
2488#endif /* !VBOX */
2489
2490int page_get_flags(target_ulong address)
2491{
2492 PageDesc *p;
2493
2494 p = page_find(address >> TARGET_PAGE_BITS);
2495 if (!p)
2496 return 0;
2497 return p->flags;
2498}
2499
2500/* modify the flags of a page and invalidate the code if
2501 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2502 depending on PAGE_WRITE */
2503void page_set_flags(target_ulong start, target_ulong end, int flags)
2504{
2505 PageDesc *p;
2506 target_ulong addr;
2507
2508 /* mmap_lock should already be held. */
2509 start = start & TARGET_PAGE_MASK;
2510 end = TARGET_PAGE_ALIGN(end);
2511 if (flags & PAGE_WRITE)
2512 flags |= PAGE_WRITE_ORG;
2513#ifdef VBOX
2514 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
2515#endif
2516 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2517 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2518 /* We may be called for host regions that are outside guest
2519 address space. */
2520 if (!p)
2521 return;
2522 /* if the write protection is set, then we invalidate the code
2523 inside */
2524 if (!(p->flags & PAGE_WRITE) &&
2525 (flags & PAGE_WRITE) &&
2526 p->first_tb) {
2527 tb_invalidate_phys_page(addr, 0, NULL);
2528 }
2529 p->flags = flags;
2530 }
2531}
2532
2533int page_check_range(target_ulong start, target_ulong len, int flags)
2534{
2535 PageDesc *p;
2536 target_ulong end;
2537 target_ulong addr;
2538
2539 if (start + len < start)
2540 /* we've wrapped around */
2541 return -1;
2542
2543 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2544 start = start & TARGET_PAGE_MASK;
2545
2546 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2547 p = page_find(addr >> TARGET_PAGE_BITS);
2548 if( !p )
2549 return -1;
2550 if( !(p->flags & PAGE_VALID) )
2551 return -1;
2552
2553 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2554 return -1;
2555 if (flags & PAGE_WRITE) {
2556 if (!(p->flags & PAGE_WRITE_ORG))
2557 return -1;
2558 /* unprotect the page if it was put read-only because it
2559 contains translated code */
2560 if (!(p->flags & PAGE_WRITE)) {
2561 if (!page_unprotect(addr, 0, NULL))
2562 return -1;
2563 }
2564 return 0;
2565 }
2566 }
2567 return 0;
2568}
2569
2570/* called from signal handler: invalidate the code and unprotect the
2571 page. Return TRUE if the fault was succesfully handled. */
2572int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2573{
2574 unsigned int page_index, prot, pindex;
2575 PageDesc *p, *p1;
2576 target_ulong host_start, host_end, addr;
2577
2578 /* Technically this isn't safe inside a signal handler. However we
2579 know this only ever happens in a synchronous SEGV handler, so in
2580 practice it seems to be ok. */
2581 mmap_lock();
2582
2583 host_start = address & qemu_host_page_mask;
2584 page_index = host_start >> TARGET_PAGE_BITS;
2585 p1 = page_find(page_index);
2586 if (!p1) {
2587 mmap_unlock();
2588 return 0;
2589 }
2590 host_end = host_start + qemu_host_page_size;
2591 p = p1;
2592 prot = 0;
2593 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2594 prot |= p->flags;
2595 p++;
2596 }
2597 /* if the page was really writable, then we change its
2598 protection back to writable */
2599 if (prot & PAGE_WRITE_ORG) {
2600 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2601 if (!(p1[pindex].flags & PAGE_WRITE)) {
2602 mprotect((void *)g2h(host_start), qemu_host_page_size,
2603 (prot & PAGE_BITS) | PAGE_WRITE);
2604 p1[pindex].flags |= PAGE_WRITE;
2605 /* and since the content will be modified, we must invalidate
2606 the corresponding translated code. */
2607 tb_invalidate_phys_page(address, pc, puc);
2608#ifdef DEBUG_TB_CHECK
2609 tb_invalidate_check(address);
2610#endif
2611 mmap_unlock();
2612 return 1;
2613 }
2614 }
2615 mmap_unlock();
2616 return 0;
2617}
2618
2619static inline void tlb_set_dirty(CPUState *env,
2620 unsigned long addr, target_ulong vaddr)
2621{
2622}
2623#endif /* defined(CONFIG_USER_ONLY) */
2624
2625#if !defined(CONFIG_USER_ONLY)
2626
2627static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2628 ram_addr_t memory, ram_addr_t region_offset);
2629static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2630 ram_addr_t orig_memory, ram_addr_t region_offset);
2631#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2632 need_subpage) \
2633 do { \
2634 if (addr > start_addr) \
2635 start_addr2 = 0; \
2636 else { \
2637 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2638 if (start_addr2 > 0) \
2639 need_subpage = 1; \
2640 } \
2641 \
2642 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2643 end_addr2 = TARGET_PAGE_SIZE - 1; \
2644 else { \
2645 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2646 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2647 need_subpage = 1; \
2648 } \
2649 } while (0)
2650
2651/* register physical memory. 'size' must be a multiple of the target
2652 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2653 io memory page. The address used when calling the IO function is
2654 the offset from the start of the region, plus region_offset. Both
2655 start_region and regon_offset are rounded down to a page boundary
2656 before calculating this offset. This should not be a problem unless
2657 the low bits of start_addr and region_offset differ. */
2658void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2659 ram_addr_t size,
2660 ram_addr_t phys_offset,
2661 ram_addr_t region_offset)
2662{
2663 target_phys_addr_t addr, end_addr;
2664 PhysPageDesc *p;
2665 CPUState *env;
2666 ram_addr_t orig_size = size;
2667 void *subpage;
2668
2669#ifdef USE_KQEMU
2670 /* XXX: should not depend on cpu context */
2671 env = first_cpu;
2672 if (env->kqemu_enabled) {
2673 kqemu_set_phys_mem(start_addr, size, phys_offset);
2674 }
2675#endif
2676 if (kvm_enabled())
2677 kvm_set_phys_mem(start_addr, size, phys_offset);
2678
2679 if (phys_offset == IO_MEM_UNASSIGNED) {
2680 region_offset = start_addr;
2681 }
2682 region_offset &= TARGET_PAGE_MASK;
2683 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2684 end_addr = start_addr + (target_phys_addr_t)size;
2685 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
2690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
2694 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2697 &p->phys_offset, orig_memory,
2698 p->region_offset);
2699 } else {
2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2701 >> IO_MEM_SHIFT];
2702 }
2703 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2704 region_offset);
2705 p->region_offset = 0;
2706 } else {
2707 p->phys_offset = phys_offset;
2708 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2709 (phys_offset & IO_MEM_ROMD))
2710 phys_offset += TARGET_PAGE_SIZE;
2711 }
2712 } else {
2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2714 p->phys_offset = phys_offset;
2715 p->region_offset = region_offset;
2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2717 (phys_offset & IO_MEM_ROMD)) {
2718 phys_offset += TARGET_PAGE_SIZE;
2719 } else {
2720 target_phys_addr_t start_addr2, end_addr2;
2721 int need_subpage = 0;
2722
2723 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2724 end_addr2, need_subpage);
2725
2726 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2727 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2728 &p->phys_offset, IO_MEM_UNASSIGNED,
2729 addr & TARGET_PAGE_MASK);
2730 subpage_register(subpage, start_addr2, end_addr2,
2731 phys_offset, region_offset);
2732 p->region_offset = 0;
2733 }
2734 }
2735 }
2736 region_offset += TARGET_PAGE_SIZE;
2737 }
2738
2739 /* since each CPU stores ram addresses in its TLB cache, we must
2740 reset the modified entries */
2741 /* XXX: slow ! */
2742 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2743 tlb_flush(env, 1);
2744 }
2745}
2746
2747/* XXX: temporary until new memory mapping API */
2748ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2749{
2750 PhysPageDesc *p;
2751
2752 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2753 if (!p)
2754 return IO_MEM_UNASSIGNED;
2755 return p->phys_offset;
2756}
2757
2758#ifndef VBOX
2759void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2760{
2761 if (kvm_enabled())
2762 kvm_coalesce_mmio_region(addr, size);
2763}
2764
2765void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2766{
2767 if (kvm_enabled())
2768 kvm_uncoalesce_mmio_region(addr, size);
2769}
2770
2771/* XXX: better than nothing */
2772ram_addr_t qemu_ram_alloc(ram_addr_t size)
2773{
2774 ram_addr_t addr;
2775 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2776 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2777 (uint64_t)size, (uint64_t)phys_ram_size);
2778 abort();
2779 }
2780 addr = phys_ram_alloc_offset;
2781 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2782 return addr;
2783}
2784
2785void qemu_ram_free(ram_addr_t addr)
2786{
2787}
2788#endif /* !VBOX */
2789
2790static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2791{
2792#ifdef DEBUG_UNASSIGNED
2793 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2794#endif
2795#if defined(TARGET_SPARC)
2796 do_unassigned_access(addr, 0, 0, 0, 1);
2797#endif
2798 return 0;
2799}
2800
2801static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2802{
2803#ifdef DEBUG_UNASSIGNED
2804 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2805#endif
2806#if defined(TARGET_SPARC)
2807 do_unassigned_access(addr, 0, 0, 0, 2);
2808#endif
2809 return 0;
2810}
2811
2812static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2813{
2814#ifdef DEBUG_UNASSIGNED
2815 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2816#endif
2817#if defined(TARGET_SPARC)
2818 do_unassigned_access(addr, 0, 0, 0, 4);
2819#endif
2820 return 0;
2821}
2822
2823static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2824{
2825#ifdef DEBUG_UNASSIGNED
2826 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2827#endif
2828#if defined(TARGET_SPARC)
2829 do_unassigned_access(addr, 1, 0, 0, 1);
2830#endif
2831}
2832
2833static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2834{
2835#ifdef DEBUG_UNASSIGNED
2836 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2837#endif
2838#if defined(TARGET_SPARC)
2839 do_unassigned_access(addr, 1, 0, 0, 2);
2840#endif
2841}
2842
2843static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2844{
2845#ifdef DEBUG_UNASSIGNED
2846 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2847#endif
2848#if defined(TARGET_SPARC)
2849 do_unassigned_access(addr, 1, 0, 0, 4);
2850#endif
2851}
2852
2853static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2854 unassigned_mem_readb,
2855 unassigned_mem_readw,
2856 unassigned_mem_readl,
2857};
2858
2859static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2860 unassigned_mem_writeb,
2861 unassigned_mem_writew,
2862 unassigned_mem_writel,
2863};
2864
2865static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2866 uint32_t val)
2867{
2868 int dirty_flags;
2869#ifdef VBOX
2870 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2871 dirty_flags = 0xff;
2872 else
2873#endif /* VBOX */
2874 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2875 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2876#if !defined(CONFIG_USER_ONLY)
2877 tb_invalidate_phys_page_fast(ram_addr, 1);
2878# ifdef VBOX
2879 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2880 dirty_flags = 0xff;
2881 else
2882# endif /* VBOX */
2883 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2884#endif
2885 }
2886#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2887 remR3PhysWriteU8(ram_addr, val);
2888#else
2889 stb_p(phys_ram_base + ram_addr, val);
2890#endif
2891#ifdef USE_KQEMU
2892 if (cpu_single_env->kqemu_enabled &&
2893 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2894 kqemu_modify_page(cpu_single_env, ram_addr);
2895#endif
2896 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2897#ifdef VBOX
2898 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2899#endif /* !VBOX */
2900 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2901 /* we remove the notdirty callback only if the code has been
2902 flushed */
2903 if (dirty_flags == 0xff)
2904 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2905}
2906
2907static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2908 uint32_t val)
2909{
2910 int dirty_flags;
2911#ifdef VBOX
2912 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2913 dirty_flags = 0xff;
2914 else
2915#endif /* VBOX */
2916 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2917 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2918#if !defined(CONFIG_USER_ONLY)
2919 tb_invalidate_phys_page_fast(ram_addr, 2);
2920# ifdef VBOX
2921 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2922 dirty_flags = 0xff;
2923 else
2924# endif /* VBOX */
2925 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2926#endif
2927 }
2928#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2929 remR3PhysWriteU16(ram_addr, val);
2930#else
2931 stw_p(phys_ram_base + ram_addr, val);
2932#endif
2933#ifdef USE_KQEMU
2934 if (cpu_single_env->kqemu_enabled &&
2935 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2936 kqemu_modify_page(cpu_single_env, ram_addr);
2937#endif
2938 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2939#ifdef VBOX
2940 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2941#endif
2942 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2943 /* we remove the notdirty callback only if the code has been
2944 flushed */
2945 if (dirty_flags == 0xff)
2946 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2947}
2948
2949static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2950 uint32_t val)
2951{
2952 int dirty_flags;
2953#ifdef VBOX
2954 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2955 dirty_flags = 0xff;
2956 else
2957#endif /* VBOX */
2958 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2959 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2960#if !defined(CONFIG_USER_ONLY)
2961 tb_invalidate_phys_page_fast(ram_addr, 4);
2962# ifdef VBOX
2963 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
2964 dirty_flags = 0xff;
2965 else
2966# endif /* VBOX */
2967 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2968#endif
2969 }
2970#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
2971 remR3PhysWriteU32(ram_addr, val);
2972#else
2973 stl_p(phys_ram_base + ram_addr, val);
2974#endif
2975#ifdef USE_KQEMU
2976 if (cpu_single_env->kqemu_enabled &&
2977 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2978 kqemu_modify_page(cpu_single_env, ram_addr);
2979#endif
2980 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2981#ifdef VBOX
2982 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
2983#endif
2984 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2985 /* we remove the notdirty callback only if the code has been
2986 flushed */
2987 if (dirty_flags == 0xff)
2988 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2989}
2990
2991static CPUReadMemoryFunc *error_mem_read[3] = {
2992 NULL, /* never used */
2993 NULL, /* never used */
2994 NULL, /* never used */
2995};
2996
2997static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2998 notdirty_mem_writeb,
2999 notdirty_mem_writew,
3000 notdirty_mem_writel,
3001};
3002
3003/* Generate a debug exception if a watchpoint has been hit. */
3004static void check_watchpoint(int offset, int len_mask, int flags)
3005{
3006 CPUState *env = cpu_single_env;
3007 target_ulong pc, cs_base;
3008 TranslationBlock *tb;
3009 target_ulong vaddr;
3010 CPUWatchpoint *wp;
3011 int cpu_flags;
3012
3013 if (env->watchpoint_hit) {
3014 /* We re-entered the check after replacing the TB. Now raise
3015 * the debug interrupt so that is will trigger after the
3016 * current instruction. */
3017 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3018 return;
3019 }
3020 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3021 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
3022 if ((vaddr == (wp->vaddr & len_mask) ||
3023 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3024 wp->flags |= BP_WATCHPOINT_HIT;
3025 if (!env->watchpoint_hit) {
3026 env->watchpoint_hit = wp;
3027 tb = tb_find_pc(env->mem_io_pc);
3028 if (!tb) {
3029 cpu_abort(env, "check_watchpoint: could not find TB for "
3030 "pc=%p", (void *)env->mem_io_pc);
3031 }
3032 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3033 tb_phys_invalidate(tb, -1);
3034 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3035 env->exception_index = EXCP_DEBUG;
3036 } else {
3037 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3038 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3039 }
3040 cpu_resume_from_signal(env, NULL);
3041 }
3042 } else {
3043 wp->flags &= ~BP_WATCHPOINT_HIT;
3044 }
3045 }
3046}
3047
3048/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3049 so these check for a hit then pass through to the normal out-of-line
3050 phys routines. */
3051static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3052{
3053 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3054 return ldub_phys(addr);
3055}
3056
3057static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3058{
3059 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3060 return lduw_phys(addr);
3061}
3062
3063static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3064{
3065 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3066 return ldl_phys(addr);
3067}
3068
3069static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3070 uint32_t val)
3071{
3072 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3073 stb_phys(addr, val);
3074}
3075
3076static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3077 uint32_t val)
3078{
3079 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3080 stw_phys(addr, val);
3081}
3082
3083static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3084 uint32_t val)
3085{
3086 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3087 stl_phys(addr, val);
3088}
3089
3090static CPUReadMemoryFunc *watch_mem_read[3] = {
3091 watch_mem_readb,
3092 watch_mem_readw,
3093 watch_mem_readl,
3094};
3095
3096static CPUWriteMemoryFunc *watch_mem_write[3] = {
3097 watch_mem_writeb,
3098 watch_mem_writew,
3099 watch_mem_writel,
3100};
3101
3102static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3103 unsigned int len)
3104{
3105 uint32_t ret;
3106 unsigned int idx;
3107
3108 idx = SUBPAGE_IDX(addr);
3109#if defined(DEBUG_SUBPAGE)
3110 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3111 mmio, len, addr, idx);
3112#endif
3113 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3114 addr + mmio->region_offset[idx][0][len]);
3115
3116 return ret;
3117}
3118
3119static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3120 uint32_t value, unsigned int len)
3121{
3122 unsigned int idx;
3123
3124 idx = SUBPAGE_IDX(addr);
3125#if defined(DEBUG_SUBPAGE)
3126 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3127 mmio, len, addr, idx, value);
3128#endif
3129 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3130 addr + mmio->region_offset[idx][1][len],
3131 value);
3132}
3133
3134static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3135{
3136#if defined(DEBUG_SUBPAGE)
3137 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3138#endif
3139
3140 return subpage_readlen(opaque, addr, 0);
3141}
3142
3143static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3144 uint32_t value)
3145{
3146#if defined(DEBUG_SUBPAGE)
3147 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3148#endif
3149 subpage_writelen(opaque, addr, value, 0);
3150}
3151
3152static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3153{
3154#if defined(DEBUG_SUBPAGE)
3155 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3156#endif
3157
3158 return subpage_readlen(opaque, addr, 1);
3159}
3160
3161static void subpage_writew (void *opaque, target_phys_addr_t addr,
3162 uint32_t value)
3163{
3164#if defined(DEBUG_SUBPAGE)
3165 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3166#endif
3167 subpage_writelen(opaque, addr, value, 1);
3168}
3169
3170static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3171{
3172#if defined(DEBUG_SUBPAGE)
3173 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3174#endif
3175
3176 return subpage_readlen(opaque, addr, 2);
3177}
3178
3179static void subpage_writel (void *opaque,
3180 target_phys_addr_t addr, uint32_t value)
3181{
3182#if defined(DEBUG_SUBPAGE)
3183 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3184#endif
3185 subpage_writelen(opaque, addr, value, 2);
3186}
3187
3188static CPUReadMemoryFunc *subpage_read[] = {
3189 &subpage_readb,
3190 &subpage_readw,
3191 &subpage_readl,
3192};
3193
3194static CPUWriteMemoryFunc *subpage_write[] = {
3195 &subpage_writeb,
3196 &subpage_writew,
3197 &subpage_writel,
3198};
3199
3200static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3201 ram_addr_t memory, ram_addr_t region_offset)
3202{
3203 int idx, eidx;
3204 unsigned int i;
3205
3206 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3207 return -1;
3208 idx = SUBPAGE_IDX(start);
3209 eidx = SUBPAGE_IDX(end);
3210#if defined(DEBUG_SUBPAGE)
3211 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
3212 mmio, start, end, idx, eidx, memory);
3213#endif
3214 memory >>= IO_MEM_SHIFT;
3215 for (; idx <= eidx; idx++) {
3216 for (i = 0; i < 4; i++) {
3217 if (io_mem_read[memory][i]) {
3218 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3219 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3220 mmio->region_offset[idx][0][i] = region_offset;
3221 }
3222 if (io_mem_write[memory][i]) {
3223 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3224 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3225 mmio->region_offset[idx][1][i] = region_offset;
3226 }
3227 }
3228 }
3229
3230 return 0;
3231}
3232
3233static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3234 ram_addr_t orig_memory, ram_addr_t region_offset)
3235{
3236 subpage_t *mmio;
3237 int subpage_memory;
3238
3239 mmio = qemu_mallocz(sizeof(subpage_t));
3240
3241 mmio->base = base;
3242 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
3243#if defined(DEBUG_SUBPAGE)
3244 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3245 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3246#endif
3247 *phys = subpage_memory | IO_MEM_SUBPAGE;
3248 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3249 region_offset);
3250
3251 return mmio;
3252}
3253
3254static int get_free_io_mem_idx(void)
3255{
3256 int i;
3257
3258 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3259 if (!io_mem_used[i]) {
3260 io_mem_used[i] = 1;
3261 return i;
3262 }
3263
3264 return -1;
3265}
3266
3267static void io_mem_init(void)
3268{
3269 int i;
3270
3271 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3272 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3273 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3274 for (i=0; i<5; i++)
3275 io_mem_used[i] = 1;
3276
3277 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3278 watch_mem_write, NULL);
3279
3280#ifndef VBOX /* VBOX: we do this later when the RAM is allocated. */
3281 /* alloc dirty bits array */
3282 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3283 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
3284#endif /* !VBOX */
3285}
3286
3287/* mem_read and mem_write are arrays of functions containing the
3288 function to access byte (index 0), word (index 1) and dword (index
3289 2). Functions can be omitted with a NULL function pointer. The
3290 registered functions may be modified dynamically later.
3291 If io_index is non zero, the corresponding io zone is
3292 modified. If it is zero, a new io zone is allocated. The return
3293 value can be used with cpu_register_physical_memory(). (-1) is
3294 returned if error. */
3295int cpu_register_io_memory(int io_index,
3296 CPUReadMemoryFunc **mem_read,
3297 CPUWriteMemoryFunc **mem_write,
3298 void *opaque)
3299{
3300 int i, subwidth = 0;
3301
3302 if (io_index <= 0) {
3303 io_index = get_free_io_mem_idx();
3304 if (io_index == -1)
3305 return io_index;
3306 } else {
3307 if (io_index >= IO_MEM_NB_ENTRIES)
3308 return -1;
3309 }
3310
3311 for(i = 0;i < 3; i++) {
3312 if (!mem_read[i] || !mem_write[i])
3313 subwidth = IO_MEM_SUBWIDTH;
3314 io_mem_read[io_index][i] = mem_read[i];
3315 io_mem_write[io_index][i] = mem_write[i];
3316 }
3317 io_mem_opaque[io_index] = opaque;
3318 return (io_index << IO_MEM_SHIFT) | subwidth;
3319}
3320
3321void cpu_unregister_io_memory(int io_table_address)
3322{
3323 int i;
3324 int io_index = io_table_address >> IO_MEM_SHIFT;
3325
3326 for (i=0;i < 3; i++) {
3327 io_mem_read[io_index][i] = unassigned_mem_read[i];
3328 io_mem_write[io_index][i] = unassigned_mem_write[i];
3329 }
3330 io_mem_opaque[io_index] = NULL;
3331 io_mem_used[io_index] = 0;
3332}
3333
3334CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3335{
3336 return io_mem_write[io_index >> IO_MEM_SHIFT];
3337}
3338
3339CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3340{
3341 return io_mem_read[io_index >> IO_MEM_SHIFT];
3342}
3343
3344#endif /* !defined(CONFIG_USER_ONLY) */
3345
3346/* physical memory access (slow version, mainly for debug) */
3347#if defined(CONFIG_USER_ONLY)
3348void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3349 int len, int is_write)
3350{
3351 int l, flags;
3352 target_ulong page;
3353 void * p;
3354
3355 while (len > 0) {
3356 page = addr & TARGET_PAGE_MASK;
3357 l = (page + TARGET_PAGE_SIZE) - addr;
3358 if (l > len)
3359 l = len;
3360 flags = page_get_flags(page);
3361 if (!(flags & PAGE_VALID))
3362 return;
3363 if (is_write) {
3364 if (!(flags & PAGE_WRITE))
3365 return;
3366 /* XXX: this code should not depend on lock_user */
3367 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3368 /* FIXME - should this return an error rather than just fail? */
3369 return;
3370 memcpy(p, buf, l);
3371 unlock_user(p, addr, l);
3372 } else {
3373 if (!(flags & PAGE_READ))
3374 return;
3375 /* XXX: this code should not depend on lock_user */
3376 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3377 /* FIXME - should this return an error rather than just fail? */
3378 return;
3379 memcpy(buf, p, l);
3380 unlock_user(p, addr, 0);
3381 }
3382 len -= l;
3383 buf += l;
3384 addr += l;
3385 }
3386}
3387
3388#else
3389void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3390 int len, int is_write)
3391{
3392 int l, io_index;
3393 uint8_t *ptr;
3394 uint32_t val;
3395 target_phys_addr_t page;
3396 unsigned long pd;
3397 PhysPageDesc *p;
3398
3399 while (len > 0) {
3400 page = addr & TARGET_PAGE_MASK;
3401 l = (page + TARGET_PAGE_SIZE) - addr;
3402 if (l > len)
3403 l = len;
3404 p = phys_page_find(page >> TARGET_PAGE_BITS);
3405 if (!p) {
3406 pd = IO_MEM_UNASSIGNED;
3407 } else {
3408 pd = p->phys_offset;
3409 }
3410
3411 if (is_write) {
3412 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3413 target_phys_addr_t addr1 = addr;
3414 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3415 if (p)
3416 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3417 /* XXX: could force cpu_single_env to NULL to avoid
3418 potential bugs */
3419 if (l >= 4 && ((addr1 & 3) == 0)) {
3420 /* 32 bit write access */
3421#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3422 val = ldl_p(buf);
3423#else
3424 val = *(const uint32_t *)buf;
3425#endif
3426 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3427 l = 4;
3428 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3429 /* 16 bit write access */
3430#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3431 val = lduw_p(buf);
3432#else
3433 val = *(const uint16_t *)buf;
3434#endif
3435 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3436 l = 2;
3437 } else {
3438 /* 8 bit write access */
3439#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3440 val = ldub_p(buf);
3441#else
3442 val = *(const uint8_t *)buf;
3443#endif
3444 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3445 l = 1;
3446 }
3447 } else {
3448 unsigned long addr1;
3449 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3450 /* RAM case */
3451#ifdef VBOX
3452 remR3PhysWrite(addr1, buf, l); NOREF(ptr);
3453#else
3454 ptr = phys_ram_base + addr1;
3455 memcpy(ptr, buf, l);
3456#endif
3457 if (!cpu_physical_memory_is_dirty(addr1)) {
3458 /* invalidate code */
3459 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3460 /* set dirty bit */
3461#ifdef VBOX
3462 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3463#endif
3464 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3465 (0xff & ~CODE_DIRTY_FLAG);
3466 }
3467 }
3468 } else {
3469 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3470 !(pd & IO_MEM_ROMD)) {
3471 target_phys_addr_t addr1 = addr;
3472 /* I/O case */
3473 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3474 if (p)
3475 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3476 if (l >= 4 && ((addr1 & 3) == 0)) {
3477 /* 32 bit read access */
3478 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3479#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3480 stl_p(buf, val);
3481#else
3482 *(uint32_t *)buf = val;
3483#endif
3484 l = 4;
3485 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3486 /* 16 bit read access */
3487 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3488#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3489 stw_p(buf, val);
3490#else
3491 *(uint16_t *)buf = val;
3492#endif
3493 l = 2;
3494 } else {
3495 /* 8 bit read access */
3496 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3497#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
3498 stb_p(buf, val);
3499#else
3500 *(uint8_t *)buf = val;
3501#endif
3502 l = 1;
3503 }
3504 } else {
3505 /* RAM case */
3506#ifdef VBOX
3507 remR3PhysRead((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), buf, l); NOREF(ptr);
3508#else
3509 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3510 (addr & ~TARGET_PAGE_MASK);
3511 memcpy(buf, ptr, l);
3512#endif
3513 }
3514 }
3515 len -= l;
3516 buf += l;
3517 addr += l;
3518 }
3519}
3520
3521#ifndef VBOX
3522
3523/* used for ROM loading : can write in RAM and ROM */
3524void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3525 const uint8_t *buf, int len)
3526{
3527 int l;
3528 uint8_t *ptr;
3529 target_phys_addr_t page;
3530 unsigned long pd;
3531 PhysPageDesc *p;
3532
3533 while (len > 0) {
3534 page = addr & TARGET_PAGE_MASK;
3535 l = (page + TARGET_PAGE_SIZE) - addr;
3536 if (l > len)
3537 l = len;
3538 p = phys_page_find(page >> TARGET_PAGE_BITS);
3539 if (!p) {
3540 pd = IO_MEM_UNASSIGNED;
3541 } else {
3542 pd = p->phys_offset;
3543 }
3544
3545 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3546 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3547 !(pd & IO_MEM_ROMD)) {
3548 /* do nothing */
3549 } else {
3550 unsigned long addr1;
3551 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3552 /* ROM/RAM case */
3553 ptr = phys_ram_base + addr1;
3554 memcpy(ptr, buf, l);
3555 }
3556 len -= l;
3557 buf += l;
3558 addr += l;
3559 }
3560}
3561
3562typedef struct {
3563 void *buffer;
3564 target_phys_addr_t addr;
3565 target_phys_addr_t len;
3566} BounceBuffer;
3567
3568static BounceBuffer bounce;
3569
3570typedef struct MapClient {
3571 void *opaque;
3572 void (*callback)(void *opaque);
3573 LIST_ENTRY(MapClient) link;
3574} MapClient;
3575
3576static LIST_HEAD(map_client_list, MapClient) map_client_list
3577 = LIST_HEAD_INITIALIZER(map_client_list);
3578
3579void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3580{
3581 MapClient *client = qemu_malloc(sizeof(*client));
3582
3583 client->opaque = opaque;
3584 client->callback = callback;
3585 LIST_INSERT_HEAD(&map_client_list, client, link);
3586 return client;
3587}
3588
3589void cpu_unregister_map_client(void *_client)
3590{
3591 MapClient *client = (MapClient *)_client;
3592
3593 LIST_REMOVE(client, link);
3594}
3595
3596static void cpu_notify_map_clients(void)
3597{
3598 MapClient *client;
3599
3600 while (!LIST_EMPTY(&map_client_list)) {
3601 client = LIST_FIRST(&map_client_list);
3602 client->callback(client->opaque);
3603 LIST_REMOVE(client, link);
3604 }
3605}
3606
3607/* Map a physical memory region into a host virtual address.
3608 * May map a subset of the requested range, given by and returned in *plen.
3609 * May return NULL if resources needed to perform the mapping are exhausted.
3610 * Use only for reads OR writes - not for read-modify-write operations.
3611 * Use cpu_register_map_client() to know when retrying the map operation is
3612 * likely to succeed.
3613 */
3614void *cpu_physical_memory_map(target_phys_addr_t addr,
3615 target_phys_addr_t *plen,
3616 int is_write)
3617{
3618 target_phys_addr_t len = *plen;
3619 target_phys_addr_t done = 0;
3620 int l;
3621 uint8_t *ret = NULL;
3622 uint8_t *ptr;
3623 target_phys_addr_t page;
3624 unsigned long pd;
3625 PhysPageDesc *p;
3626 unsigned long addr1;
3627
3628 while (len > 0) {
3629 page = addr & TARGET_PAGE_MASK;
3630 l = (page + TARGET_PAGE_SIZE) - addr;
3631 if (l > len)
3632 l = len;
3633 p = phys_page_find(page >> TARGET_PAGE_BITS);
3634 if (!p) {
3635 pd = IO_MEM_UNASSIGNED;
3636 } else {
3637 pd = p->phys_offset;
3638 }
3639
3640 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3641 if (done || bounce.buffer) {
3642 break;
3643 }
3644 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3645 bounce.addr = addr;
3646 bounce.len = l;
3647 if (!is_write) {
3648 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3649 }
3650 ptr = bounce.buffer;
3651 } else {
3652 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3653 ptr = phys_ram_base + addr1;
3654 }
3655 if (!done) {
3656 ret = ptr;
3657 } else if (ret + done != ptr) {
3658 break;
3659 }
3660
3661 len -= l;
3662 addr += l;
3663 done += l;
3664 }
3665 *plen = done;
3666 return ret;
3667}
3668
3669/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3670 * Will also mark the memory as dirty if is_write == 1. access_len gives
3671 * the amount of memory that was actually read or written by the caller.
3672 */
3673void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3674 int is_write, target_phys_addr_t access_len)
3675{
3676 if (buffer != bounce.buffer) {
3677 if (is_write) {
3678 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3679 while (access_len) {
3680 unsigned l;
3681 l = TARGET_PAGE_SIZE;
3682 if (l > access_len)
3683 l = access_len;
3684 if (!cpu_physical_memory_is_dirty(addr1)) {
3685 /* invalidate code */
3686 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3687 /* set dirty bit */
3688 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3689 (0xff & ~CODE_DIRTY_FLAG);
3690 }
3691 addr1 += l;
3692 access_len -= l;
3693 }
3694 }
3695 return;
3696 }
3697 if (is_write) {
3698 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3699 }
3700 qemu_free(bounce.buffer);
3701 bounce.buffer = NULL;
3702 cpu_notify_map_clients();
3703}
3704
3705#endif /* !VBOX */
3706
3707/* warning: addr must be aligned */
3708uint32_t ldl_phys(target_phys_addr_t addr)
3709{
3710 int io_index;
3711 uint8_t *ptr;
3712 uint32_t val;
3713 unsigned long pd;
3714 PhysPageDesc *p;
3715
3716 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3717 if (!p) {
3718 pd = IO_MEM_UNASSIGNED;
3719 } else {
3720 pd = p->phys_offset;
3721 }
3722
3723 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3724 !(pd & IO_MEM_ROMD)) {
3725 /* I/O case */
3726 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3727 if (p)
3728 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3729 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3730 } else {
3731 /* RAM case */
3732#ifndef VBOX
3733 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3734 (addr & ~TARGET_PAGE_MASK);
3735 val = ldl_p(ptr);
3736#else
3737 val = remR3PhysReadU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3738#endif
3739 }
3740 return val;
3741}
3742
3743/* warning: addr must be aligned */
3744uint64_t ldq_phys(target_phys_addr_t addr)
3745{
3746 int io_index;
3747 uint8_t *ptr;
3748 uint64_t val;
3749 unsigned long pd;
3750 PhysPageDesc *p;
3751
3752 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3753 if (!p) {
3754 pd = IO_MEM_UNASSIGNED;
3755 } else {
3756 pd = p->phys_offset;
3757 }
3758
3759 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3760 !(pd & IO_MEM_ROMD)) {
3761 /* I/O case */
3762 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3763 if (p)
3764 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3765#ifdef TARGET_WORDS_BIGENDIAN
3766 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3767 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3768#else
3769 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3770 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3771#endif
3772 } else {
3773 /* RAM case */
3774#ifndef VBOX
3775 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3776 (addr & ~TARGET_PAGE_MASK);
3777 val = ldq_p(ptr);
3778#else
3779 val = remR3PhysReadU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK)); NOREF(ptr);
3780#endif
3781 }
3782 return val;
3783}
3784
3785/* XXX: optimize */
3786uint32_t ldub_phys(target_phys_addr_t addr)
3787{
3788 uint8_t val;
3789 cpu_physical_memory_read(addr, &val, 1);
3790 return val;
3791}
3792
3793/* XXX: optimize */
3794uint32_t lduw_phys(target_phys_addr_t addr)
3795{
3796 uint16_t val;
3797 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3798 return tswap16(val);
3799}
3800
3801/* warning: addr must be aligned. The ram page is not masked as dirty
3802 and the code inside is not invalidated. It is useful if the dirty
3803 bits are used to track modified PTEs */
3804void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3805{
3806 int io_index;
3807 uint8_t *ptr;
3808 unsigned long pd;
3809 PhysPageDesc *p;
3810
3811 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3812 if (!p) {
3813 pd = IO_MEM_UNASSIGNED;
3814 } else {
3815 pd = p->phys_offset;
3816 }
3817
3818 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3819 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3820 if (p)
3821 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3822 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3823 } else {
3824#ifndef VBOX
3825 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3826 ptr = phys_ram_base + addr1;
3827 stl_p(ptr, val);
3828#else
3829 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3830#endif
3831
3832#ifndef VBOX
3833 if (unlikely(in_migration)) {
3834 if (!cpu_physical_memory_is_dirty(addr1)) {
3835 /* invalidate code */
3836 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3837 /* set dirty bit */
3838 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3839 (0xff & ~CODE_DIRTY_FLAG);
3840 }
3841 }
3842#endif /* !VBOX */
3843 }
3844}
3845
3846void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3847{
3848 int io_index;
3849 uint8_t *ptr;
3850 unsigned long pd;
3851 PhysPageDesc *p;
3852
3853 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3854 if (!p) {
3855 pd = IO_MEM_UNASSIGNED;
3856 } else {
3857 pd = p->phys_offset;
3858 }
3859
3860 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3861 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3862 if (p)
3863 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3864#ifdef TARGET_WORDS_BIGENDIAN
3865 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3866 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3867#else
3868 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3869 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3870#endif
3871 } else {
3872#ifndef VBOX
3873 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3874 (addr & ~TARGET_PAGE_MASK);
3875 stq_p(ptr, val);
3876#else
3877 remR3PhysWriteU64((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3878#endif
3879 }
3880}
3881
3882/* warning: addr must be aligned */
3883void stl_phys(target_phys_addr_t addr, uint32_t val)
3884{
3885 int io_index;
3886 uint8_t *ptr;
3887 unsigned long pd;
3888 PhysPageDesc *p;
3889
3890 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3891 if (!p) {
3892 pd = IO_MEM_UNASSIGNED;
3893 } else {
3894 pd = p->phys_offset;
3895 }
3896
3897 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3899 if (p)
3900 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3901 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3902 } else {
3903 unsigned long addr1;
3904 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3905 /* RAM case */
3906#ifndef VBOX
3907 ptr = phys_ram_base + addr1;
3908 stl_p(ptr, val);
3909#else
3910 remR3PhysWriteU32((pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK), val); NOREF(ptr);
3911#endif
3912 if (!cpu_physical_memory_is_dirty(addr1)) {
3913 /* invalidate code */
3914 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3915 /* set dirty bit */
3916#ifdef VBOX
3917 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size))
3918#endif
3919 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3920 (0xff & ~CODE_DIRTY_FLAG);
3921 }
3922 }
3923}
3924
3925/* XXX: optimize */
3926void stb_phys(target_phys_addr_t addr, uint32_t val)
3927{
3928 uint8_t v = val;
3929 cpu_physical_memory_write(addr, &v, 1);
3930}
3931
3932/* XXX: optimize */
3933void stw_phys(target_phys_addr_t addr, uint32_t val)
3934{
3935 uint16_t v = tswap16(val);
3936 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3937}
3938
3939/* XXX: optimize */
3940void stq_phys(target_phys_addr_t addr, uint64_t val)
3941{
3942 val = tswap64(val);
3943 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3944}
3945
3946#endif
3947
3948/* virtual memory access for debug */
3949int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3950 uint8_t *buf, int len, int is_write)
3951{
3952 int l;
3953 target_phys_addr_t phys_addr;
3954 target_ulong page;
3955
3956 while (len > 0) {
3957 page = addr & TARGET_PAGE_MASK;
3958 phys_addr = cpu_get_phys_page_debug(env, page);
3959 /* if no physical page mapped, return an error */
3960 if (phys_addr == -1)
3961 return -1;
3962 l = (page + TARGET_PAGE_SIZE) - addr;
3963 if (l > len)
3964 l = len;
3965 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3966 buf, l, is_write);
3967 len -= l;
3968 buf += l;
3969 addr += l;
3970 }
3971 return 0;
3972}
3973
3974/* in deterministic execution mode, instructions doing device I/Os
3975 must be at the end of the TB */
3976void cpu_io_recompile(CPUState *env, void *retaddr)
3977{
3978 TranslationBlock *tb;
3979 uint32_t n, cflags;
3980 target_ulong pc, cs_base;
3981 uint64_t flags;
3982
3983 tb = tb_find_pc((unsigned long)retaddr);
3984 if (!tb) {
3985 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3986 retaddr);
3987 }
3988 n = env->icount_decr.u16.low + tb->icount;
3989 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3990 /* Calculate how many instructions had been executed before the fault
3991 occurred. */
3992 n = n - env->icount_decr.u16.low;
3993 /* Generate a new TB ending on the I/O insn. */
3994 n++;
3995 /* On MIPS and SH, delay slot instructions can only be restarted if
3996 they were already the first instruction in the TB. If this is not
3997 the first instruction in a TB then re-execute the preceding
3998 branch. */
3999#if defined(TARGET_MIPS)
4000 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4001 env->active_tc.PC -= 4;
4002 env->icount_decr.u16.low++;
4003 env->hflags &= ~MIPS_HFLAG_BMASK;
4004 }
4005#elif defined(TARGET_SH4)
4006 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4007 && n > 1) {
4008 env->pc -= 2;
4009 env->icount_decr.u16.low++;
4010 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4011 }
4012#endif
4013 /* This should never happen. */
4014 if (n > CF_COUNT_MASK)
4015 cpu_abort(env, "TB too big during recompile");
4016
4017 cflags = n | CF_LAST_IO;
4018 pc = tb->pc;
4019 cs_base = tb->cs_base;
4020 flags = tb->flags;
4021 tb_phys_invalidate(tb, -1);
4022 /* FIXME: In theory this could raise an exception. In practice
4023 we have already translated the block once so it's probably ok. */
4024 tb_gen_code(env, pc, cs_base, flags, cflags);
4025 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4026 the first in the TB) then we end up generating a whole new TB and
4027 repeating the fault, which is horribly inefficient.
4028 Better would be to execute just this insn uncached, or generate a
4029 second new TB. */
4030 cpu_resume_from_signal(env, NULL);
4031}
4032
4033#ifndef VBOX
4034void dump_exec_info(FILE *f,
4035 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4036{
4037 int i, target_code_size, max_target_code_size;
4038 int direct_jmp_count, direct_jmp2_count, cross_page;
4039 TranslationBlock *tb;
4040
4041 target_code_size = 0;
4042 max_target_code_size = 0;
4043 cross_page = 0;
4044 direct_jmp_count = 0;
4045 direct_jmp2_count = 0;
4046 for(i = 0; i < nb_tbs; i++) {
4047 tb = &tbs[i];
4048 target_code_size += tb->size;
4049 if (tb->size > max_target_code_size)
4050 max_target_code_size = tb->size;
4051 if (tb->page_addr[1] != -1)
4052 cross_page++;
4053 if (tb->tb_next_offset[0] != 0xffff) {
4054 direct_jmp_count++;
4055 if (tb->tb_next_offset[1] != 0xffff) {
4056 direct_jmp2_count++;
4057 }
4058 }
4059 }
4060 /* XXX: avoid using doubles ? */
4061 cpu_fprintf(f, "Translation buffer state:\n");
4062 cpu_fprintf(f, "gen code size %ld/%ld\n",
4063 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4064 cpu_fprintf(f, "TB count %d/%d\n",
4065 nb_tbs, code_gen_max_blocks);
4066 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
4067 nb_tbs ? target_code_size / nb_tbs : 0,
4068 max_target_code_size);
4069 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4070 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4071 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4072 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4073 cross_page,
4074 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4075 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4076 direct_jmp_count,
4077 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4078 direct_jmp2_count,
4079 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4080 cpu_fprintf(f, "\nStatistics:\n");
4081 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4082 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4083 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
4084 tcg_dump_info(f, cpu_fprintf);
4085}
4086#endif /* !VBOX */
4087
4088#if !defined(CONFIG_USER_ONLY)
4089
4090#define MMUSUFFIX _cmmu
4091#define GETPC() NULL
4092#define env cpu_single_env
4093#define SOFTMMU_CODE_ACCESS
4094
4095#define SHIFT 0
4096#include "softmmu_template.h"
4097
4098#define SHIFT 1
4099#include "softmmu_template.h"
4100
4101#define SHIFT 2
4102#include "softmmu_template.h"
4103
4104#define SHIFT 3
4105#include "softmmu_template.h"
4106
4107#undef env
4108
4109#endif
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette