blob: a687712564f8ea3f81eaa556a97f1813cd56d5fe [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
bellard54936002003-05-13 00:25:15 +00003 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdlib.h>
21#include <stdio.h>
22#include <stdarg.h>
23#include <string.h>
24#include <errno.h>
25#include <unistd.h>
26#include <inttypes.h>
bellardfd6ce8f2003-05-14 19:00:11 +000027#include <sys/mman.h>
bellard54936002003-05-13 00:25:15 +000028
bellardea041c02003-06-25 16:16:50 +000029#include "config.h"
bellard6180a182003-09-30 21:04:53 +000030#include "cpu.h"
31#include "exec-all.h"
bellard54936002003-05-13 00:25:15 +000032
bellardfd6ce8f2003-05-14 19:00:11 +000033//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000034//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000035//#define DEBUG_TLB
bellardfd6ce8f2003-05-14 19:00:11 +000036
37/* make various TB consistency checks */
38//#define DEBUG_TB_CHECK
39
40/* threshold to flush the translated code buffer */
41#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
42
bellard9fa3e852004-01-04 18:06:42 +000043#define SMC_BITMAP_USE_THRESHOLD 10
44
45#define MMAP_AREA_START 0x00000000
46#define MMAP_AREA_END 0xa8000000
bellardfd6ce8f2003-05-14 19:00:11 +000047
48TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
49TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
bellard9fa3e852004-01-04 18:06:42 +000050TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bellardfd6ce8f2003-05-14 19:00:11 +000051int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000052/* any access to the tbs or the page table must use this lock */
53spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000054
55uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
56uint8_t *code_gen_ptr;
57
bellard9fa3e852004-01-04 18:06:42 +000058int phys_ram_size;
59int phys_ram_fd;
60uint8_t *phys_ram_base;
61
bellard54936002003-05-13 00:25:15 +000062typedef struct PageDesc {
bellard9fa3e852004-01-04 18:06:42 +000063 /* offset in memory of the page + io_index in the low 12 bits */
64 unsigned long phys_offset;
65 /* list of TBs intersecting this physical page */
bellardfd6ce8f2003-05-14 19:00:11 +000066 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +000067 /* in order to optimize self modifying code, we count the number
68 of lookups we do to a given page to use a bitmap */
69 unsigned int code_write_count;
70 uint8_t *code_bitmap;
71#if defined(CONFIG_USER_ONLY)
72 unsigned long flags;
73#endif
bellard54936002003-05-13 00:25:15 +000074} PageDesc;
75
bellard9fa3e852004-01-04 18:06:42 +000076typedef struct VirtPageDesc {
77 /* physical address of code page. It is valid only if 'valid_tag'
78 matches 'virt_valid_tag' */
79 target_ulong phys_addr;
80 unsigned int valid_tag;
81#if !defined(CONFIG_SOFTMMU)
82 /* original page access rights. It is valid only if 'valid_tag'
83 matches 'virt_valid_tag' */
84 unsigned int prot;
85#endif
86} VirtPageDesc;
87
bellard54936002003-05-13 00:25:15 +000088#define L2_BITS 10
89#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
90
91#define L1_SIZE (1 << L1_BITS)
92#define L2_SIZE (1 << L2_BITS)
93
bellard33417e72003-08-10 21:47:01 +000094static void io_mem_init(void);
bellardfd6ce8f2003-05-14 19:00:11 +000095
bellard54936002003-05-13 00:25:15 +000096unsigned long real_host_page_size;
97unsigned long host_page_bits;
98unsigned long host_page_size;
99unsigned long host_page_mask;
100
101static PageDesc *l1_map[L1_SIZE];
102
bellard9fa3e852004-01-04 18:06:42 +0000103#if !defined(CONFIG_USER_ONLY)
104static VirtPageDesc *l1_virt_map[L1_SIZE];
105static unsigned int virt_valid_tag;
106#endif
107
bellard33417e72003-08-10 21:47:01 +0000108/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000109CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
110CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
111static int io_mem_nb;
112
bellard34865132003-10-05 14:28:56 +0000113/* log support */
114char *logfilename = "/tmp/qemu.log";
115FILE *logfile;
116int loglevel;
117
bellardb346ff42003-06-15 20:05:50 +0000118static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000119{
120 /* NOTE: we can always suppose that host_page_size >=
121 TARGET_PAGE_SIZE */
122 real_host_page_size = getpagesize();
123 if (host_page_size == 0)
124 host_page_size = real_host_page_size;
125 if (host_page_size < TARGET_PAGE_SIZE)
126 host_page_size = TARGET_PAGE_SIZE;
127 host_page_bits = 0;
128 while ((1 << host_page_bits) < host_page_size)
129 host_page_bits++;
130 host_page_mask = ~(host_page_size - 1);
bellard9fa3e852004-01-04 18:06:42 +0000131#if !defined(CONFIG_USER_ONLY)
132 virt_valid_tag = 1;
133#endif
bellard54936002003-05-13 00:25:15 +0000134}
135
bellardfd6ce8f2003-05-14 19:00:11 +0000136static inline PageDesc *page_find_alloc(unsigned int index)
bellard54936002003-05-13 00:25:15 +0000137{
bellard54936002003-05-13 00:25:15 +0000138 PageDesc **lp, *p;
139
bellard54936002003-05-13 00:25:15 +0000140 lp = &l1_map[index >> L2_BITS];
141 p = *lp;
142 if (!p) {
143 /* allocate if not found */
144 p = malloc(sizeof(PageDesc) * L2_SIZE);
bellardfd6ce8f2003-05-14 19:00:11 +0000145 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
bellard54936002003-05-13 00:25:15 +0000146 *lp = p;
147 }
148 return p + (index & (L2_SIZE - 1));
149}
150
bellardfd6ce8f2003-05-14 19:00:11 +0000151static inline PageDesc *page_find(unsigned int index)
bellard54936002003-05-13 00:25:15 +0000152{
bellard54936002003-05-13 00:25:15 +0000153 PageDesc *p;
154
bellard54936002003-05-13 00:25:15 +0000155 p = l1_map[index >> L2_BITS];
156 if (!p)
157 return 0;
bellardfd6ce8f2003-05-14 19:00:11 +0000158 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000159}
160
bellard9fa3e852004-01-04 18:06:42 +0000161#if !defined(CONFIG_USER_ONLY)
162static void tlb_protect_code(CPUState *env, uint32_t addr);
163static void tlb_unprotect_code(CPUState *env, uint32_t addr);
164static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
bellardfd6ce8f2003-05-14 19:00:11 +0000165
bellard9fa3e852004-01-04 18:06:42 +0000166static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
167{
168 VirtPageDesc **lp, *p;
169
170 lp = &l1_virt_map[index >> L2_BITS];
171 p = *lp;
172 if (!p) {
173 /* allocate if not found */
174 p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
175 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
176 *lp = p;
177 }
178 return p + (index & (L2_SIZE - 1));
179}
180
181static inline VirtPageDesc *virt_page_find(unsigned int index)
182{
183 VirtPageDesc *p;
184
185 p = l1_virt_map[index >> L2_BITS];
bellardfd6ce8f2003-05-14 19:00:11 +0000186 if (!p)
187 return 0;
bellard9fa3e852004-01-04 18:06:42 +0000188 return p + (index & (L2_SIZE - 1));
bellardfd6ce8f2003-05-14 19:00:11 +0000189}
190
bellard9fa3e852004-01-04 18:06:42 +0000191static void virt_page_flush(void)
bellard54936002003-05-13 00:25:15 +0000192{
bellard9fa3e852004-01-04 18:06:42 +0000193 int i, j;
194 VirtPageDesc *p;
195
196 virt_valid_tag++;
bellard54936002003-05-13 00:25:15 +0000197
bellard9fa3e852004-01-04 18:06:42 +0000198 if (virt_valid_tag == 0) {
199 virt_valid_tag = 1;
200 for(i = 0; i < L1_SIZE; i++) {
201 p = l1_virt_map[i];
202 if (p) {
203 for(j = 0; j < L2_SIZE; j++)
204 p[j].valid_tag = 0;
205 }
bellardfd6ce8f2003-05-14 19:00:11 +0000206 }
bellard54936002003-05-13 00:25:15 +0000207 }
208}
bellard9fa3e852004-01-04 18:06:42 +0000209#else
210static void virt_page_flush(void)
211{
212}
213#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000214
bellardb346ff42003-06-15 20:05:50 +0000215void cpu_exec_init(void)
bellardfd6ce8f2003-05-14 19:00:11 +0000216{
217 if (!code_gen_ptr) {
218 code_gen_ptr = code_gen_buffer;
bellardb346ff42003-06-15 20:05:50 +0000219 page_init();
bellard33417e72003-08-10 21:47:01 +0000220 io_mem_init();
bellardfd6ce8f2003-05-14 19:00:11 +0000221 }
222}
223
bellard9fa3e852004-01-04 18:06:42 +0000224static inline void invalidate_page_bitmap(PageDesc *p)
225{
226 if (p->code_bitmap) {
227 free(p->code_bitmap);
228 p->code_bitmap = NULL;
229 }
230 p->code_write_count = 0;
231}
232
bellardfd6ce8f2003-05-14 19:00:11 +0000233/* set to NULL all the 'first_tb' fields in all PageDescs */
234static void page_flush_tb(void)
235{
236 int i, j;
237 PageDesc *p;
238
239 for(i = 0; i < L1_SIZE; i++) {
240 p = l1_map[i];
241 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000242 for(j = 0; j < L2_SIZE; j++) {
243 p->first_tb = NULL;
244 invalidate_page_bitmap(p);
245 p++;
246 }
bellardfd6ce8f2003-05-14 19:00:11 +0000247 }
248 }
249}
250
251/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000252/* XXX: tb_flush is currently not thread safe */
bellard01243112004-01-04 15:48:17 +0000253void tb_flush(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000254{
255 int i;
bellard01243112004-01-04 15:48:17 +0000256#if defined(DEBUG_FLUSH)
bellardfd6ce8f2003-05-14 19:00:11 +0000257 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
258 code_gen_ptr - code_gen_buffer,
259 nb_tbs,
bellard01243112004-01-04 15:48:17 +0000260 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000261#endif
bellard01243112004-01-04 15:48:17 +0000262 /* must reset current TB so that interrupts cannot modify the
263 links while we are modifying them */
264 env->current_tb = NULL;
265
bellardfd6ce8f2003-05-14 19:00:11 +0000266 nb_tbs = 0;
267 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
268 tb_hash[i] = NULL;
bellard9fa3e852004-01-04 18:06:42 +0000269 virt_page_flush();
270
271 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
272 tb_phys_hash[i] = NULL;
bellardfd6ce8f2003-05-14 19:00:11 +0000273 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000274
bellardfd6ce8f2003-05-14 19:00:11 +0000275 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000276 /* XXX: flush processor icache at this point if cache flush is
277 expensive */
bellardfd6ce8f2003-05-14 19:00:11 +0000278}
279
280#ifdef DEBUG_TB_CHECK
281
282static void tb_invalidate_check(unsigned long address)
283{
284 TranslationBlock *tb;
285 int i;
286 address &= TARGET_PAGE_MASK;
287 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
288 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
289 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
290 address >= tb->pc + tb->size)) {
291 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
292 address, tb->pc, tb->size);
293 }
294 }
295 }
296}
297
298/* verify that all the pages have correct rights for code */
299static void tb_page_check(void)
300{
301 TranslationBlock *tb;
302 int i, flags1, flags2;
303
304 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
305 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
306 flags1 = page_get_flags(tb->pc);
307 flags2 = page_get_flags(tb->pc + tb->size - 1);
308 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
309 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
310 tb->pc, tb->size, flags1, flags2);
311 }
312 }
313 }
314}
315
bellardd4e81642003-05-25 16:46:15 +0000316void tb_jmp_check(TranslationBlock *tb)
317{
318 TranslationBlock *tb1;
319 unsigned int n1;
320
321 /* suppress any remaining jumps to this TB */
322 tb1 = tb->jmp_first;
323 for(;;) {
324 n1 = (long)tb1 & 3;
325 tb1 = (TranslationBlock *)((long)tb1 & ~3);
326 if (n1 == 2)
327 break;
328 tb1 = tb1->jmp_next[n1];
329 }
330 /* check end of list */
331 if (tb1 != tb) {
332 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
333 }
334}
335
bellardfd6ce8f2003-05-14 19:00:11 +0000336#endif
337
338/* invalidate one TB */
339static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
340 int next_offset)
341{
342 TranslationBlock *tb1;
343 for(;;) {
344 tb1 = *ptb;
345 if (tb1 == tb) {
346 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
347 break;
348 }
349 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
350 }
351}
352
bellard9fa3e852004-01-04 18:06:42 +0000353static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
354{
355 TranslationBlock *tb1;
356 unsigned int n1;
357
358 for(;;) {
359 tb1 = *ptb;
360 n1 = (long)tb1 & 3;
361 tb1 = (TranslationBlock *)((long)tb1 & ~3);
362 if (tb1 == tb) {
363 *ptb = tb1->page_next[n1];
364 break;
365 }
366 ptb = &tb1->page_next[n1];
367 }
368}
369
bellardd4e81642003-05-25 16:46:15 +0000370static inline void tb_jmp_remove(TranslationBlock *tb, int n)
371{
372 TranslationBlock *tb1, **ptb;
373 unsigned int n1;
374
375 ptb = &tb->jmp_next[n];
376 tb1 = *ptb;
377 if (tb1) {
378 /* find tb(n) in circular list */
379 for(;;) {
380 tb1 = *ptb;
381 n1 = (long)tb1 & 3;
382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
383 if (n1 == n && tb1 == tb)
384 break;
385 if (n1 == 2) {
386 ptb = &tb1->jmp_first;
387 } else {
388 ptb = &tb1->jmp_next[n1];
389 }
390 }
391 /* now we can suppress tb(n) from the list */
392 *ptb = tb->jmp_next[n];
393
394 tb->jmp_next[n] = NULL;
395 }
396}
397
398/* reset the jump entry 'n' of a TB so that it is not chained to
399 another TB */
400static inline void tb_reset_jump(TranslationBlock *tb, int n)
401{
402 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
403}
404
bellard9fa3e852004-01-04 18:06:42 +0000405static inline void tb_invalidate(TranslationBlock *tb)
bellardfd6ce8f2003-05-14 19:00:11 +0000406{
bellardd4e81642003-05-25 16:46:15 +0000407 unsigned int h, n1;
bellard9fa3e852004-01-04 18:06:42 +0000408 TranslationBlock *tb1, *tb2, **ptb;
bellardd4e81642003-05-25 16:46:15 +0000409
bellard36bdbe52003-11-19 22:12:02 +0000410 tb_invalidated_flag = 1;
411
bellardfd6ce8f2003-05-14 19:00:11 +0000412 /* remove the TB from the hash list */
413 h = tb_hash_func(tb->pc);
bellard9fa3e852004-01-04 18:06:42 +0000414 ptb = &tb_hash[h];
415 for(;;) {
416 tb1 = *ptb;
417 /* NOTE: the TB is not necessarily linked in the hash. It
418 indicates that it is not currently used */
419 if (tb1 == NULL)
420 return;
421 if (tb1 == tb) {
422 *ptb = tb1->hash_next;
423 break;
424 }
425 ptb = &tb1->hash_next;
bellardfd6ce8f2003-05-14 19:00:11 +0000426 }
bellardd4e81642003-05-25 16:46:15 +0000427
428 /* suppress this TB from the two jump lists */
429 tb_jmp_remove(tb, 0);
430 tb_jmp_remove(tb, 1);
431
432 /* suppress any remaining jumps to this TB */
433 tb1 = tb->jmp_first;
434 for(;;) {
435 n1 = (long)tb1 & 3;
436 if (n1 == 2)
437 break;
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 tb2 = tb1->jmp_next[n1];
440 tb_reset_jump(tb1, n1);
441 tb1->jmp_next[n1] = NULL;
442 tb1 = tb2;
443 }
444 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
bellardfd6ce8f2003-05-14 19:00:11 +0000445}
446
bellard9fa3e852004-01-04 18:06:42 +0000447static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000448{
bellardfd6ce8f2003-05-14 19:00:11 +0000449 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +0000450 unsigned int h;
451 target_ulong phys_pc;
452
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000458
bellard9fa3e852004-01-04 18:06:42 +0000459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
464 }
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
469 }
470
471 tb_invalidate(tb);
472}
473
474static inline void set_bits(uint8_t *tab, int start, int len)
475{
476 int end, mask, end1;
477
478 end = start + len;
479 tab += start >> 3;
480 mask = 0xff << (start & 7);
481 if ((start & ~7) == (end & ~7)) {
482 if (start < end) {
483 mask &= ~(0xff << (end & 7));
484 *tab |= mask;
485 }
486 } else {
487 *tab++ |= mask;
488 start = (start + 8) & ~7;
489 end1 = end & ~7;
490 while (start < end1) {
491 *tab++ = 0xff;
492 start += 8;
493 }
494 if (start < end) {
495 mask = ~(0xff << (end & 7));
496 *tab |= mask;
497 }
498 }
499}
500
501static void build_page_bitmap(PageDesc *p)
502{
503 int n, tb_start, tb_end;
504 TranslationBlock *tb;
505
506 p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
507 if (!p->code_bitmap)
508 return;
509 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
510
511 tb = p->first_tb;
512 while (tb != NULL) {
513 n = (long)tb & 3;
514 tb = (TranslationBlock *)((long)tb & ~3);
515 /* NOTE: this is subtle as a TB may span two physical pages */
516 if (n == 0) {
517 /* NOTE: tb_end may be after the end of the page, but
518 it is not a problem */
519 tb_start = tb->pc & ~TARGET_PAGE_MASK;
520 tb_end = tb_start + tb->size;
521 if (tb_end > TARGET_PAGE_SIZE)
522 tb_end = TARGET_PAGE_SIZE;
523 } else {
524 tb_start = 0;
525 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
526 }
527 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
528 tb = tb->page_next[n];
529 }
530}
531
532/* invalidate all TBs which intersect with the target physical page
533 starting in range [start;end[. NOTE: start and end must refer to
534 the same physical page */
535static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
536{
537 int n;
538 PageDesc *p;
539 TranslationBlock *tb, *tb_next;
540 target_ulong tb_start, tb_end;
541
542 p = page_find(start >> TARGET_PAGE_BITS);
543 if (!p)
544 return;
545 if (!p->code_bitmap &&
546 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
547 /* build code bitmap */
548 build_page_bitmap(p);
549 }
550
551 /* we remove all the TBs in the range [start, end[ */
552 /* XXX: see if in some cases it could be faster to invalidate all the code */
553 tb = p->first_tb;
554 while (tb != NULL) {
555 n = (long)tb & 3;
556 tb = (TranslationBlock *)((long)tb & ~3);
557 tb_next = tb->page_next[n];
558 /* NOTE: this is subtle as a TB may span two physical pages */
559 if (n == 0) {
560 /* NOTE: tb_end may be after the end of the page, but
561 it is not a problem */
562 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
563 tb_end = tb_start + tb->size;
564 } else {
565 tb_start = tb->page_addr[1];
566 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
567 }
568 if (!(tb_end <= start || tb_start >= end)) {
569 tb_phys_invalidate(tb, -1);
570 }
571 tb = tb_next;
572 }
573#if !defined(CONFIG_USER_ONLY)
574 /* if no code remaining, no need to continue to use slow writes */
575 if (!p->first_tb) {
576 invalidate_page_bitmap(p);
577 tlb_unprotect_code_phys(cpu_single_env, start);
578 }
579#endif
580}
581
582/* len must be <= 8 and start must be a multiple of len */
583static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
584{
585 PageDesc *p;
586 int offset, b;
587
588 p = page_find(start >> TARGET_PAGE_BITS);
589 if (!p)
590 return;
591 if (p->code_bitmap) {
592 offset = start & ~TARGET_PAGE_MASK;
593 b = p->code_bitmap[offset >> 3] >> (offset & 7);
594 if (b & ((1 << len) - 1))
595 goto do_invalidate;
596 } else {
597 do_invalidate:
598 tb_invalidate_phys_page_range(start, start + len);
599 }
600}
601
602/* invalidate all TBs which intersect with the target virtual page
603 starting in range [start;end[. This function is usually used when
604 the target processor flushes its I-cache. NOTE: start and end must
605 refer to the same physical page */
606void tb_invalidate_page_range(target_ulong start, target_ulong end)
607{
608 int n;
609 PageDesc *p;
610 TranslationBlock *tb, *tb_next;
611 target_ulong pc;
612 target_ulong phys_start;
613
614#if !defined(CONFIG_USER_ONLY)
615 {
616 VirtPageDesc *vp;
617 vp = virt_page_find(start >> TARGET_PAGE_BITS);
618 if (!vp)
619 return;
620 if (vp->valid_tag != virt_valid_tag)
621 return;
622 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
623 }
624#else
625 phys_start = start;
626#endif
627 p = page_find(phys_start >> TARGET_PAGE_BITS);
628 if (!p)
629 return;
630 /* we remove all the TBs in the range [start, end[ */
631 /* XXX: see if in some cases it could be faster to invalidate all the code */
632 tb = p->first_tb;
633 while (tb != NULL) {
634 n = (long)tb & 3;
635 tb = (TranslationBlock *)((long)tb & ~3);
636 tb_next = tb->page_next[n];
637 pc = tb->pc;
638 if (!((pc + tb->size) <= start || pc >= end)) {
639 tb_phys_invalidate(tb, -1);
640 }
641 tb = tb_next;
642 }
643#if !defined(CONFIG_USER_ONLY)
644 /* if no code remaining, no need to continue to use slow writes */
645 if (!p->first_tb)
646 tlb_unprotect_code(cpu_single_env, start);
647#endif
648}
649
650#if !defined(CONFIG_SOFTMMU)
651static void tb_invalidate_phys_page(target_ulong addr)
652{
653 int n;
654 PageDesc *p;
655 TranslationBlock *tb;
656
657 addr &= TARGET_PAGE_MASK;
658 p = page_find(addr >> TARGET_PAGE_BITS);
659 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +0000660 return;
661 tb = p->first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000662 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +0000663 n = (long)tb & 3;
664 tb = (TranslationBlock *)((long)tb & ~3);
665 tb_phys_invalidate(tb, addr);
666 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +0000667 }
668 p->first_tb = NULL;
669}
bellard9fa3e852004-01-04 18:06:42 +0000670#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000671
672/* add the tb in the target page and protect it if necessary */
bellard9fa3e852004-01-04 18:06:42 +0000673static inline void tb_alloc_page(TranslationBlock *tb,
674 unsigned int n, unsigned int page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000675{
676 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +0000677 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000678
bellard9fa3e852004-01-04 18:06:42 +0000679 tb->page_addr[n] = page_addr;
680 p = page_find(page_addr >> TARGET_PAGE_BITS);
681 tb->page_next[n] = p->first_tb;
682 last_first_tb = p->first_tb;
683 p->first_tb = (TranslationBlock *)((long)tb | n);
684 invalidate_page_bitmap(p);
685
686#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +0000687 if (p->flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +0000688 unsigned long host_start, host_end, addr;
689 int prot;
690
bellardfd6ce8f2003-05-14 19:00:11 +0000691 /* force the host page as non writable (writes will have a
692 page fault + mprotect overhead) */
bellardfd6ce8f2003-05-14 19:00:11 +0000693 host_start = page_addr & host_page_mask;
694 host_end = host_start + host_page_size;
695 prot = 0;
696 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
697 prot |= page_get_flags(addr);
698 mprotect((void *)host_start, host_page_size,
699 (prot & PAGE_BITS) & ~PAGE_WRITE);
700#ifdef DEBUG_TB_INVALIDATE
701 printf("protecting code page: 0x%08lx\n",
702 host_start);
703#endif
704 p->flags &= ~PAGE_WRITE;
bellardfd6ce8f2003-05-14 19:00:11 +0000705 }
bellard9fa3e852004-01-04 18:06:42 +0000706#else
707 /* if some code is already present, then the pages are already
708 protected. So we handle the case where only the first TB is
709 allocated in a physical page */
710 if (!last_first_tb) {
711 target_ulong virt_addr;
712
713 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
714 tlb_protect_code(cpu_single_env, virt_addr);
715 }
716#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000717}
718
719/* Allocate a new translation block. Flush the translation buffer if
720 too many translation blocks or too much generated code. */
bellardd4e81642003-05-25 16:46:15 +0000721TranslationBlock *tb_alloc(unsigned long pc)
bellardfd6ce8f2003-05-14 19:00:11 +0000722{
723 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000724
725 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
726 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
bellardd4e81642003-05-25 16:46:15 +0000727 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +0000728 tb = &tbs[nb_tbs++];
729 tb->pc = pc;
bellardd4e81642003-05-25 16:46:15 +0000730 return tb;
731}
732
bellard9fa3e852004-01-04 18:06:42 +0000733/* add a new TB and link it to the physical page tables. phys_page2 is
734 (-1) to indicate that only one page contains the TB. */
735void tb_link_phys(TranslationBlock *tb,
736 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +0000737{
bellard9fa3e852004-01-04 18:06:42 +0000738 unsigned int h;
739 TranslationBlock **ptb;
740
741 /* add in the physical hash table */
742 h = tb_phys_hash_func(phys_pc);
743 ptb = &tb_phys_hash[h];
744 tb->phys_hash_next = *ptb;
745 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000746
747 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +0000748 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
749 if (phys_page2 != -1)
750 tb_alloc_page(tb, 1, phys_page2);
751 else
752 tb->page_addr[1] = -1;
bellard61382a52003-10-27 21:22:23 +0000753#ifdef DEBUG_TB_CHECK
754 tb_page_check();
755#endif
bellard9fa3e852004-01-04 18:06:42 +0000756}
757
758/* link the tb with the other TBs */
759void tb_link(TranslationBlock *tb)
760{
761#if !defined(CONFIG_USER_ONLY)
762 {
763 VirtPageDesc *vp;
764 target_ulong addr;
765
766 /* save the code memory mappings (needed to invalidate the code) */
767 addr = tb->pc & TARGET_PAGE_MASK;
768 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
769 vp->phys_addr = tb->page_addr[0];
770 vp->valid_tag = virt_valid_tag;
771
772 if (tb->page_addr[1] != -1) {
773 addr += TARGET_PAGE_SIZE;
774 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
775 vp->phys_addr = tb->page_addr[1];
776 vp->valid_tag = virt_valid_tag;
777 }
778 }
779#endif
780
bellardd4e81642003-05-25 16:46:15 +0000781 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
782 tb->jmp_next[0] = NULL;
783 tb->jmp_next[1] = NULL;
784
785 /* init original jump addresses */
786 if (tb->tb_next_offset[0] != 0xffff)
787 tb_reset_jump(tb, 0);
788 if (tb->tb_next_offset[1] != 0xffff)
789 tb_reset_jump(tb, 1);
bellardfd6ce8f2003-05-14 19:00:11 +0000790}
791
bellarda513fe12003-05-27 23:29:48 +0000792/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
793 tb[1].tc_ptr. Return NULL if not found */
794TranslationBlock *tb_find_pc(unsigned long tc_ptr)
795{
796 int m_min, m_max, m;
797 unsigned long v;
798 TranslationBlock *tb;
799
800 if (nb_tbs <= 0)
801 return NULL;
802 if (tc_ptr < (unsigned long)code_gen_buffer ||
803 tc_ptr >= (unsigned long)code_gen_ptr)
804 return NULL;
805 /* binary search (cf Knuth) */
806 m_min = 0;
807 m_max = nb_tbs - 1;
808 while (m_min <= m_max) {
809 m = (m_min + m_max) >> 1;
810 tb = &tbs[m];
811 v = (unsigned long)tb->tc_ptr;
812 if (v == tc_ptr)
813 return tb;
814 else if (tc_ptr < v) {
815 m_max = m - 1;
816 } else {
817 m_min = m + 1;
818 }
819 }
820 return &tbs[m_max];
821}
bellard75012672003-06-21 13:11:07 +0000822
bellardea041c02003-06-25 16:16:50 +0000823static void tb_reset_jump_recursive(TranslationBlock *tb);
824
825static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
826{
827 TranslationBlock *tb1, *tb_next, **ptb;
828 unsigned int n1;
829
830 tb1 = tb->jmp_next[n];
831 if (tb1 != NULL) {
832 /* find head of list */
833 for(;;) {
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == 2)
837 break;
838 tb1 = tb1->jmp_next[n1];
839 }
840 /* we are now sure now that tb jumps to tb1 */
841 tb_next = tb1;
842
843 /* remove tb from the jmp_first list */
844 ptb = &tb_next->jmp_first;
845 for(;;) {
846 tb1 = *ptb;
847 n1 = (long)tb1 & 3;
848 tb1 = (TranslationBlock *)((long)tb1 & ~3);
849 if (n1 == n && tb1 == tb)
850 break;
851 ptb = &tb1->jmp_next[n1];
852 }
853 *ptb = tb->jmp_next[n];
854 tb->jmp_next[n] = NULL;
855
856 /* suppress the jump to next tb in generated code */
857 tb_reset_jump(tb, n);
858
bellard01243112004-01-04 15:48:17 +0000859 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +0000860 tb_reset_jump_recursive(tb_next);
861 }
862}
863
864static void tb_reset_jump_recursive(TranslationBlock *tb)
865{
866 tb_reset_jump_recursive2(tb, 0);
867 tb_reset_jump_recursive2(tb, 1);
868}
869
bellardc33a3462003-07-29 20:50:33 +0000870/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
871 breakpoint is reached */
bellard4c3a88a2003-07-26 12:06:08 +0000872int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
873{
874#if defined(TARGET_I386)
875 int i;
876
877 for(i = 0; i < env->nb_breakpoints; i++) {
878 if (env->breakpoints[i] == pc)
879 return 0;
880 }
881
882 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
883 return -1;
884 env->breakpoints[env->nb_breakpoints++] = pc;
bellard9fa3e852004-01-04 18:06:42 +0000885 tb_invalidate_page_range(pc, pc + 1);
bellard4c3a88a2003-07-26 12:06:08 +0000886 return 0;
887#else
888 return -1;
889#endif
890}
891
892/* remove a breakpoint */
893int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
894{
895#if defined(TARGET_I386)
896 int i;
897 for(i = 0; i < env->nb_breakpoints; i++) {
898 if (env->breakpoints[i] == pc)
899 goto found;
900 }
901 return -1;
902 found:
903 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
904 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
905 env->nb_breakpoints--;
bellard9fa3e852004-01-04 18:06:42 +0000906 tb_invalidate_page_range(pc, pc + 1);
bellard4c3a88a2003-07-26 12:06:08 +0000907 return 0;
908#else
909 return -1;
910#endif
911}
912
bellardc33a3462003-07-29 20:50:33 +0000913/* enable or disable single step mode. EXCP_DEBUG is returned by the
914 CPU loop after each instruction */
915void cpu_single_step(CPUState *env, int enabled)
916{
917#if defined(TARGET_I386)
918 if (env->singlestep_enabled != enabled) {
919 env->singlestep_enabled = enabled;
920 /* must flush all the translated code to avoid inconsistancies */
bellard9fa3e852004-01-04 18:06:42 +0000921 /* XXX: only flush what is necessary */
bellard01243112004-01-04 15:48:17 +0000922 tb_flush(env);
bellardc33a3462003-07-29 20:50:33 +0000923 }
924#endif
925}
926
bellard34865132003-10-05 14:28:56 +0000927/* enable or disable low levels log */
928void cpu_set_log(int log_flags)
929{
930 loglevel = log_flags;
931 if (loglevel && !logfile) {
932 logfile = fopen(logfilename, "w");
933 if (!logfile) {
934 perror(logfilename);
935 _exit(1);
936 }
bellard9fa3e852004-01-04 18:06:42 +0000937#if !defined(CONFIG_SOFTMMU)
938 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
939 {
940 static uint8_t logfile_buf[4096];
941 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
942 }
943#else
bellard34865132003-10-05 14:28:56 +0000944 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +0000945#endif
bellard34865132003-10-05 14:28:56 +0000946 }
947}
948
949void cpu_set_log_filename(const char *filename)
950{
951 logfilename = strdup(filename);
952}
bellardc33a3462003-07-29 20:50:33 +0000953
bellard01243112004-01-04 15:48:17 +0000954/* mask must never be zero, except for A20 change call */
bellard68a79312003-06-30 13:12:32 +0000955void cpu_interrupt(CPUState *env, int mask)
bellardea041c02003-06-25 16:16:50 +0000956{
957 TranslationBlock *tb;
bellard68a79312003-06-30 13:12:32 +0000958
959 env->interrupt_request |= mask;
bellardea041c02003-06-25 16:16:50 +0000960 /* if the cpu is currently executing code, we must unlink it and
961 all the potentially executing TB */
962 tb = env->current_tb;
963 if (tb) {
964 tb_reset_jump_recursive(tb);
965 }
966}
967
968
bellard75012672003-06-21 13:11:07 +0000969void cpu_abort(CPUState *env, const char *fmt, ...)
970{
971 va_list ap;
972
973 va_start(ap, fmt);
974 fprintf(stderr, "qemu: fatal: ");
975 vfprintf(stderr, fmt, ap);
976 fprintf(stderr, "\n");
977#ifdef TARGET_I386
978 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
979#endif
980 va_end(ap);
981 abort();
982}
983
bellard01243112004-01-04 15:48:17 +0000984#if !defined(CONFIG_USER_ONLY)
985
bellard33417e72003-08-10 21:47:01 +0000986void tlb_flush(CPUState *env)
987{
bellard33417e72003-08-10 21:47:01 +0000988 int i;
bellard01243112004-01-04 15:48:17 +0000989
bellard9fa3e852004-01-04 18:06:42 +0000990#if defined(DEBUG_TLB)
991 printf("tlb_flush:\n");
992#endif
bellard01243112004-01-04 15:48:17 +0000993 /* must reset current TB so that interrupts cannot modify the
994 links while we are modifying them */
995 env->current_tb = NULL;
996
bellard33417e72003-08-10 21:47:01 +0000997 for(i = 0; i < CPU_TLB_SIZE; i++) {
998 env->tlb_read[0][i].address = -1;
999 env->tlb_write[0][i].address = -1;
1000 env->tlb_read[1][i].address = -1;
1001 env->tlb_write[1][i].address = -1;
1002 }
bellard9fa3e852004-01-04 18:06:42 +00001003
1004 virt_page_flush();
1005 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1006 tb_hash[i] = NULL;
1007
1008#if !defined(CONFIG_SOFTMMU)
1009 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1010#endif
bellard33417e72003-08-10 21:47:01 +00001011}
1012
bellard61382a52003-10-27 21:22:23 +00001013static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1014{
1015 if (addr == (tlb_entry->address &
1016 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1017 tlb_entry->address = -1;
1018}
1019
bellard33417e72003-08-10 21:47:01 +00001020void tlb_flush_page(CPUState *env, uint32_t addr)
1021{
bellard9fa3e852004-01-04 18:06:42 +00001022 int i, n;
1023 VirtPageDesc *vp;
1024 PageDesc *p;
1025 TranslationBlock *tb;
bellard01243112004-01-04 15:48:17 +00001026
bellard9fa3e852004-01-04 18:06:42 +00001027#if defined(DEBUG_TLB)
1028 printf("tlb_flush_page: 0x%08x\n", addr);
1029#endif
bellard01243112004-01-04 15:48:17 +00001030 /* must reset current TB so that interrupts cannot modify the
1031 links while we are modifying them */
1032 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001033
bellard61382a52003-10-27 21:22:23 +00001034 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001035 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
bellard61382a52003-10-27 21:22:23 +00001036 tlb_flush_entry(&env->tlb_read[0][i], addr);
1037 tlb_flush_entry(&env->tlb_write[0][i], addr);
1038 tlb_flush_entry(&env->tlb_read[1][i], addr);
1039 tlb_flush_entry(&env->tlb_write[1][i], addr);
bellard01243112004-01-04 15:48:17 +00001040
bellard9fa3e852004-01-04 18:06:42 +00001041 /* remove from the virtual pc hash table all the TB at this
1042 virtual address */
1043
1044 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1045 if (vp && vp->valid_tag == virt_valid_tag) {
1046 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1047 if (p) {
1048 /* we remove all the links to the TBs in this virtual page */
1049 tb = p->first_tb;
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
1053 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1054 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1055 tb_invalidate(tb);
1056 }
1057 tb = tb->page_next[n];
1058 }
1059 }
1060 }
1061
bellard01243112004-01-04 15:48:17 +00001062#if !defined(CONFIG_SOFTMMU)
bellard9fa3e852004-01-04 18:06:42 +00001063 if (addr < MMAP_AREA_END)
bellard01243112004-01-04 15:48:17 +00001064 munmap((void *)addr, TARGET_PAGE_SIZE);
bellard61382a52003-10-27 21:22:23 +00001065#endif
bellard9fa3e852004-01-04 18:06:42 +00001066}
1067
1068static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1069{
1070 if (addr == (tlb_entry->address &
1071 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1072 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1073 tlb_entry->address |= IO_MEM_CODE;
1074 tlb_entry->addend -= (unsigned long)phys_ram_base;
bellard01243112004-01-04 15:48:17 +00001075 }
bellard61382a52003-10-27 21:22:23 +00001076}
1077
bellard9fa3e852004-01-04 18:06:42 +00001078/* update the TLBs so that writes to code in the virtual page 'addr'
1079 can be detected */
1080static void tlb_protect_code(CPUState *env, uint32_t addr)
bellard61382a52003-10-27 21:22:23 +00001081{
bellard61382a52003-10-27 21:22:23 +00001082 int i;
1083
1084 addr &= TARGET_PAGE_MASK;
1085 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
bellard9fa3e852004-01-04 18:06:42 +00001086 tlb_protect_code1(&env->tlb_write[0][i], addr);
1087 tlb_protect_code1(&env->tlb_write[1][i], addr);
1088#if !defined(CONFIG_SOFTMMU)
1089 /* NOTE: as we generated the code for this page, it is already at
1090 least readable */
1091 if (addr < MMAP_AREA_END)
1092 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1093#endif
1094}
1095
1096static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1097{
1098 if (addr == (tlb_entry->address &
1099 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1100 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1101 tlb_entry->address &= TARGET_PAGE_MASK;
1102 tlb_entry->addend += (unsigned long)phys_ram_base;
1103 }
1104}
1105
1106/* update the TLB so that writes in virtual page 'addr' are no longer
1107 tested self modifying code */
1108static void tlb_unprotect_code(CPUState *env, uint32_t addr)
1109{
1110 int i;
1111
1112 addr &= TARGET_PAGE_MASK;
1113 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1114 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1115 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1116}
1117
1118static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1119 uint32_t phys_addr)
1120{
1121 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1122 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1123 tlb_entry->address &= TARGET_PAGE_MASK;
1124 tlb_entry->addend += (unsigned long)phys_ram_base;
1125 }
1126}
1127
1128/* update the TLB so that writes in physical page 'phys_addr' are no longer
1129 tested self modifying code */
1130/* XXX: find a way to improve it */
1131static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
1132{
1133 int i;
1134
1135 phys_addr &= TARGET_PAGE_MASK;
1136 for(i = 0; i < CPU_TLB_SIZE; i++)
1137 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1138 for(i = 0; i < CPU_TLB_SIZE; i++)
1139 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1140}
1141
1142/* add a new TLB entry. At most a single entry for a given virtual
1143 address is permitted. */
1144int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1145 int is_user, int is_softmmu)
1146{
1147 PageDesc *p;
1148 target_ulong pd;
1149 TranslationBlock *first_tb;
1150 unsigned int index;
1151 target_ulong address, addend;
1152 int ret;
1153
1154 p = page_find(paddr >> TARGET_PAGE_BITS);
1155 if (!p) {
1156 pd = IO_MEM_UNASSIGNED;
1157 first_tb = NULL;
1158 } else {
1159 pd = p->phys_offset;
1160 first_tb = p->first_tb;
1161 }
1162#if defined(DEBUG_TLB)
1163 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1164 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1165#endif
1166
1167 ret = 0;
1168#if !defined(CONFIG_SOFTMMU)
1169 if (is_softmmu)
1170#endif
1171 {
1172 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1173 /* IO memory case */
1174 address = vaddr | pd;
1175 addend = paddr;
1176 } else {
1177 /* standard memory */
1178 address = vaddr;
1179 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1180 }
1181
1182 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1183 addend -= vaddr;
1184 if (prot & PROT_READ) {
1185 env->tlb_read[is_user][index].address = address;
1186 env->tlb_read[is_user][index].addend = addend;
1187 } else {
1188 env->tlb_read[is_user][index].address = -1;
1189 env->tlb_read[is_user][index].addend = -1;
1190 }
1191 if (prot & PROT_WRITE) {
1192 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1193 /* ROM: access is ignored (same as unassigned) */
1194 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1195 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1196 } else if (first_tb) {
1197 /* if code is present, we use a specific memory
1198 handler. It works only for physical memory access */
1199 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1200 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1201 } else {
1202 env->tlb_write[is_user][index].address = address;
1203 env->tlb_write[is_user][index].addend = addend;
1204 }
1205 } else {
1206 env->tlb_write[is_user][index].address = -1;
1207 env->tlb_write[is_user][index].addend = -1;
1208 }
1209 }
1210#if !defined(CONFIG_SOFTMMU)
1211 else {
1212 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1213 /* IO access: no mapping is done as it will be handled by the
1214 soft MMU */
1215 if (!(env->hflags & HF_SOFTMMU_MASK))
1216 ret = 2;
1217 } else {
1218 void *map_addr;
1219 if (prot & PROT_WRITE) {
1220 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
1221 /* ROM: we do as if code was inside */
1222 /* if code is present, we only map as read only and save the
1223 original mapping */
1224 VirtPageDesc *vp;
1225
1226 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1227 vp->phys_addr = pd;
1228 vp->prot = prot;
1229 vp->valid_tag = virt_valid_tag;
1230 prot &= ~PAGE_WRITE;
1231 }
1232 }
1233 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1234 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1235 if (map_addr == MAP_FAILED) {
1236 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1237 paddr, vaddr);
1238 }
1239 }
1240 }
1241#endif
1242 return ret;
1243}
1244
1245/* called from signal handler: invalidate the code and unprotect the
1246 page. Return TRUE if the fault was succesfully handled. */
1247int page_unprotect(unsigned long addr)
1248{
1249#if !defined(CONFIG_SOFTMMU)
1250 VirtPageDesc *vp;
1251
1252#if defined(DEBUG_TLB)
1253 printf("page_unprotect: addr=0x%08x\n", addr);
1254#endif
1255 addr &= TARGET_PAGE_MASK;
1256 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1257 if (!vp)
1258 return 0;
1259 /* NOTE: in this case, validate_tag is _not_ tested as it
1260 validates only the code TLB */
1261 if (vp->valid_tag != virt_valid_tag)
1262 return 0;
1263 if (!(vp->prot & PAGE_WRITE))
1264 return 0;
1265#if defined(DEBUG_TLB)
1266 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1267 addr, vp->phys_addr, vp->prot);
1268#endif
1269 tb_invalidate_phys_page(vp->phys_addr);
1270 mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
1271 return 1;
1272#else
1273 return 0;
1274#endif
bellard33417e72003-08-10 21:47:01 +00001275}
1276
bellard01243112004-01-04 15:48:17 +00001277#else
1278
1279void tlb_flush(CPUState *env)
1280{
1281}
1282
1283void tlb_flush_page(CPUState *env, uint32_t addr)
1284{
1285}
1286
1287void tlb_flush_page_write(CPUState *env, uint32_t addr)
1288{
1289}
1290
bellard9fa3e852004-01-04 18:06:42 +00001291int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1292 int is_user, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00001293{
bellard9fa3e852004-01-04 18:06:42 +00001294 return 0;
1295}
bellard33417e72003-08-10 21:47:01 +00001296
bellard9fa3e852004-01-04 18:06:42 +00001297/* dump memory mappings */
1298void page_dump(FILE *f)
1299{
1300 unsigned long start, end;
1301 int i, j, prot, prot1;
1302 PageDesc *p;
1303
1304 fprintf(f, "%-8s %-8s %-8s %s\n",
1305 "start", "end", "size", "prot");
1306 start = -1;
1307 end = -1;
1308 prot = 0;
1309 for(i = 0; i <= L1_SIZE; i++) {
1310 if (i < L1_SIZE)
1311 p = l1_map[i];
1312 else
1313 p = NULL;
1314 for(j = 0;j < L2_SIZE; j++) {
1315 if (!p)
1316 prot1 = 0;
1317 else
1318 prot1 = p[j].flags;
1319 if (prot1 != prot) {
1320 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1321 if (start != -1) {
1322 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1323 start, end, end - start,
1324 prot & PAGE_READ ? 'r' : '-',
1325 prot & PAGE_WRITE ? 'w' : '-',
1326 prot & PAGE_EXEC ? 'x' : '-');
1327 }
1328 if (prot1 != 0)
1329 start = end;
1330 else
1331 start = -1;
1332 prot = prot1;
1333 }
1334 if (!p)
1335 break;
1336 }
bellard33417e72003-08-10 21:47:01 +00001337 }
bellard33417e72003-08-10 21:47:01 +00001338}
1339
bellard9fa3e852004-01-04 18:06:42 +00001340int page_get_flags(unsigned long address)
bellard33417e72003-08-10 21:47:01 +00001341{
bellard9fa3e852004-01-04 18:06:42 +00001342 PageDesc *p;
1343
1344 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00001345 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001346 return 0;
1347 return p->flags;
bellard33417e72003-08-10 21:47:01 +00001348}
1349
bellard9fa3e852004-01-04 18:06:42 +00001350/* modify the flags of a page and invalidate the code if
1351 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1352 depending on PAGE_WRITE */
1353void page_set_flags(unsigned long start, unsigned long end, int flags)
1354{
1355 PageDesc *p;
1356 unsigned long addr;
1357
1358 start = start & TARGET_PAGE_MASK;
1359 end = TARGET_PAGE_ALIGN(end);
1360 if (flags & PAGE_WRITE)
1361 flags |= PAGE_WRITE_ORG;
1362 spin_lock(&tb_lock);
1363 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1364 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1365 /* if the write protection is set, then we invalidate the code
1366 inside */
1367 if (!(p->flags & PAGE_WRITE) &&
1368 (flags & PAGE_WRITE) &&
1369 p->first_tb) {
1370 tb_invalidate_phys_page(addr);
1371 }
1372 p->flags = flags;
1373 }
1374 spin_unlock(&tb_lock);
1375}
1376
1377/* called from signal handler: invalidate the code and unprotect the
1378 page. Return TRUE if the fault was succesfully handled. */
1379int page_unprotect(unsigned long address)
1380{
1381 unsigned int page_index, prot, pindex;
1382 PageDesc *p, *p1;
1383 unsigned long host_start, host_end, addr;
1384
1385 host_start = address & host_page_mask;
1386 page_index = host_start >> TARGET_PAGE_BITS;
1387 p1 = page_find(page_index);
1388 if (!p1)
1389 return 0;
1390 host_end = host_start + host_page_size;
1391 p = p1;
1392 prot = 0;
1393 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1394 prot |= p->flags;
1395 p++;
1396 }
1397 /* if the page was really writable, then we change its
1398 protection back to writable */
1399 if (prot & PAGE_WRITE_ORG) {
1400 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1401 if (!(p1[pindex].flags & PAGE_WRITE)) {
1402 mprotect((void *)host_start, host_page_size,
1403 (prot & PAGE_BITS) | PAGE_WRITE);
1404 p1[pindex].flags |= PAGE_WRITE;
1405 /* and since the content will be modified, we must invalidate
1406 the corresponding translated code. */
1407 tb_invalidate_phys_page(address);
1408#ifdef DEBUG_TB_CHECK
1409 tb_invalidate_check(address);
1410#endif
1411 return 1;
1412 }
1413 }
1414 return 0;
1415}
1416
1417/* call this function when system calls directly modify a memory area */
1418void page_unprotect_range(uint8_t *data, unsigned long data_size)
1419{
1420 unsigned long start, end, addr;
1421
1422 start = (unsigned long)data;
1423 end = start + data_size;
1424 start &= TARGET_PAGE_MASK;
1425 end = TARGET_PAGE_ALIGN(end);
1426 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1427 page_unprotect(addr);
1428 }
1429}
1430
1431#endif /* defined(CONFIG_USER_ONLY) */
1432
bellard33417e72003-08-10 21:47:01 +00001433/* register physical memory. 'size' must be a multiple of the target
1434 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1435 io memory page */
1436void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1437 long phys_offset)
1438{
1439 unsigned long addr, end_addr;
bellard9fa3e852004-01-04 18:06:42 +00001440 PageDesc *p;
bellard33417e72003-08-10 21:47:01 +00001441
1442 end_addr = start_addr + size;
1443 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
bellard9fa3e852004-01-04 18:06:42 +00001444 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1445 p->phys_offset = phys_offset;
1446 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
bellard33417e72003-08-10 21:47:01 +00001447 phys_offset += TARGET_PAGE_SIZE;
1448 }
1449}
1450
1451static uint32_t unassigned_mem_readb(uint32_t addr)
1452{
1453 return 0;
1454}
1455
1456static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
1457{
1458}
1459
1460static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1461 unassigned_mem_readb,
1462 unassigned_mem_readb,
1463 unassigned_mem_readb,
1464};
1465
1466static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1467 unassigned_mem_writeb,
1468 unassigned_mem_writeb,
1469 unassigned_mem_writeb,
1470};
1471
bellard9fa3e852004-01-04 18:06:42 +00001472/* self modifying code support in soft mmu mode : writing to a page
1473 containing code comes to these functions */
1474
1475static void code_mem_writeb(uint32_t addr, uint32_t val)
1476{
1477#if !defined(CONFIG_USER_ONLY)
1478 tb_invalidate_phys_page_fast(addr, 1);
1479#endif
1480 stb_raw(phys_ram_base + addr, val);
1481}
1482
1483static void code_mem_writew(uint32_t addr, uint32_t val)
1484{
1485#if !defined(CONFIG_USER_ONLY)
1486 tb_invalidate_phys_page_fast(addr, 2);
1487#endif
1488 stw_raw(phys_ram_base + addr, val);
1489}
1490
1491static void code_mem_writel(uint32_t addr, uint32_t val)
1492{
1493#if !defined(CONFIG_USER_ONLY)
1494 tb_invalidate_phys_page_fast(addr, 4);
1495#endif
1496 stl_raw(phys_ram_base + addr, val);
1497}
1498
1499static CPUReadMemoryFunc *code_mem_read[3] = {
1500 NULL, /* never used */
1501 NULL, /* never used */
1502 NULL, /* never used */
1503};
1504
1505static CPUWriteMemoryFunc *code_mem_write[3] = {
1506 code_mem_writeb,
1507 code_mem_writew,
1508 code_mem_writel,
1509};
bellard33417e72003-08-10 21:47:01 +00001510
1511static void io_mem_init(void)
1512{
bellard9fa3e852004-01-04 18:06:42 +00001513 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1514 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1515 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1516 io_mem_nb = 4;
bellard33417e72003-08-10 21:47:01 +00001517}
1518
1519/* mem_read and mem_write are arrays of functions containing the
1520 function to access byte (index 0), word (index 1) and dword (index
1521 2). All functions must be supplied. If io_index is non zero, the
1522 corresponding io zone is modified. If it is zero, a new io zone is
1523 allocated. The return value can be used with
1524 cpu_register_physical_memory(). (-1) is returned if error. */
1525int cpu_register_io_memory(int io_index,
1526 CPUReadMemoryFunc **mem_read,
1527 CPUWriteMemoryFunc **mem_write)
1528{
1529 int i;
1530
1531 if (io_index <= 0) {
1532 if (io_index >= IO_MEM_NB_ENTRIES)
1533 return -1;
1534 io_index = io_mem_nb++;
1535 } else {
1536 if (io_index >= IO_MEM_NB_ENTRIES)
1537 return -1;
1538 }
1539
1540 for(i = 0;i < 3; i++) {
1541 io_mem_read[io_index][i] = mem_read[i];
1542 io_mem_write[io_index][i] = mem_write[i];
1543 }
1544 return io_index << IO_MEM_SHIFT;
1545}
bellard61382a52003-10-27 21:22:23 +00001546
1547#if !defined(CONFIG_USER_ONLY)
1548
1549#define MMUSUFFIX _cmmu
1550#define GETPC() NULL
1551#define env cpu_single_env
1552
1553#define SHIFT 0
1554#include "softmmu_template.h"
1555
1556#define SHIFT 1
1557#include "softmmu_template.h"
1558
1559#define SHIFT 2
1560#include "softmmu_template.h"
1561
1562#define SHIFT 3
1563#include "softmmu_template.h"
1564
1565#undef env
1566
1567#endif