blob: 4bf87e37e7d34f49e7ab362c87b870863973505b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
bellard54936002003-05-13 00:25:15 +00003 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdlib.h>
21#include <stdio.h>
22#include <stdarg.h>
23#include <string.h>
24#include <errno.h>
25#include <unistd.h>
26#include <inttypes.h>
bellardfd6ce8f2003-05-14 19:00:11 +000027#include <sys/mman.h>
bellard54936002003-05-13 00:25:15 +000028
bellardea041c02003-06-25 16:16:50 +000029#include "config.h"
bellard6180a182003-09-30 21:04:53 +000030#include "cpu.h"
31#include "exec-all.h"
bellard54936002003-05-13 00:25:15 +000032
bellardfd6ce8f2003-05-14 19:00:11 +000033//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000034//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000035//#define DEBUG_TLB
bellardfd6ce8f2003-05-14 19:00:11 +000036
37/* make various TB consistency checks */
38//#define DEBUG_TB_CHECK
bellard98857882004-01-18 21:52:14 +000039//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000040
41/* threshold to flush the translated code buffer */
42#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
43
bellard9fa3e852004-01-04 18:06:42 +000044#define SMC_BITMAP_USE_THRESHOLD 10
45
46#define MMAP_AREA_START 0x00000000
47#define MMAP_AREA_END 0xa8000000
bellardfd6ce8f2003-05-14 19:00:11 +000048
49TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
bellard9fa3e852004-01-04 18:06:42 +000051TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bellardfd6ce8f2003-05-14 19:00:11 +000052int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000053/* any access to the tbs or the page table must use this lock */
54spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000055
56uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
57uint8_t *code_gen_ptr;
58
bellard9fa3e852004-01-04 18:06:42 +000059int phys_ram_size;
60int phys_ram_fd;
61uint8_t *phys_ram_base;
62
bellard54936002003-05-13 00:25:15 +000063typedef struct PageDesc {
bellard9fa3e852004-01-04 18:06:42 +000064 /* offset in memory of the page + io_index in the low 12 bits */
65 unsigned long phys_offset;
66 /* list of TBs intersecting this physical page */
bellardfd6ce8f2003-05-14 19:00:11 +000067 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +000068 /* in order to optimize self modifying code, we count the number
69 of lookups we do to a given page to use a bitmap */
70 unsigned int code_write_count;
71 uint8_t *code_bitmap;
72#if defined(CONFIG_USER_ONLY)
73 unsigned long flags;
74#endif
bellard54936002003-05-13 00:25:15 +000075} PageDesc;
76
bellard9fa3e852004-01-04 18:06:42 +000077typedef struct VirtPageDesc {
78 /* physical address of code page. It is valid only if 'valid_tag'
79 matches 'virt_valid_tag' */
80 target_ulong phys_addr;
81 unsigned int valid_tag;
82#if !defined(CONFIG_SOFTMMU)
83 /* original page access rights. It is valid only if 'valid_tag'
84 matches 'virt_valid_tag' */
85 unsigned int prot;
86#endif
87} VirtPageDesc;
88
bellard54936002003-05-13 00:25:15 +000089#define L2_BITS 10
90#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
91
92#define L1_SIZE (1 << L1_BITS)
93#define L2_SIZE (1 << L2_BITS)
94
bellard33417e72003-08-10 21:47:01 +000095static void io_mem_init(void);
bellardfd6ce8f2003-05-14 19:00:11 +000096
bellard54936002003-05-13 00:25:15 +000097unsigned long real_host_page_size;
98unsigned long host_page_bits;
99unsigned long host_page_size;
100unsigned long host_page_mask;
101
102static PageDesc *l1_map[L1_SIZE];
103
bellard9fa3e852004-01-04 18:06:42 +0000104#if !defined(CONFIG_USER_ONLY)
105static VirtPageDesc *l1_virt_map[L1_SIZE];
106static unsigned int virt_valid_tag;
107#endif
108
bellard33417e72003-08-10 21:47:01 +0000109/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000110CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
111CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
112static int io_mem_nb;
113
bellard34865132003-10-05 14:28:56 +0000114/* log support */
115char *logfilename = "/tmp/qemu.log";
116FILE *logfile;
117int loglevel;
118
bellardb346ff42003-06-15 20:05:50 +0000119static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000120{
121 /* NOTE: we can always suppose that host_page_size >=
122 TARGET_PAGE_SIZE */
123 real_host_page_size = getpagesize();
124 if (host_page_size == 0)
125 host_page_size = real_host_page_size;
126 if (host_page_size < TARGET_PAGE_SIZE)
127 host_page_size = TARGET_PAGE_SIZE;
128 host_page_bits = 0;
129 while ((1 << host_page_bits) < host_page_size)
130 host_page_bits++;
131 host_page_mask = ~(host_page_size - 1);
bellard9fa3e852004-01-04 18:06:42 +0000132#if !defined(CONFIG_USER_ONLY)
133 virt_valid_tag = 1;
134#endif
bellard54936002003-05-13 00:25:15 +0000135}
136
bellardfd6ce8f2003-05-14 19:00:11 +0000137static inline PageDesc *page_find_alloc(unsigned int index)
bellard54936002003-05-13 00:25:15 +0000138{
bellard54936002003-05-13 00:25:15 +0000139 PageDesc **lp, *p;
140
bellard54936002003-05-13 00:25:15 +0000141 lp = &l1_map[index >> L2_BITS];
142 p = *lp;
143 if (!p) {
144 /* allocate if not found */
145 p = malloc(sizeof(PageDesc) * L2_SIZE);
bellardfd6ce8f2003-05-14 19:00:11 +0000146 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
bellard54936002003-05-13 00:25:15 +0000147 *lp = p;
148 }
149 return p + (index & (L2_SIZE - 1));
150}
151
bellardfd6ce8f2003-05-14 19:00:11 +0000152static inline PageDesc *page_find(unsigned int index)
bellard54936002003-05-13 00:25:15 +0000153{
bellard54936002003-05-13 00:25:15 +0000154 PageDesc *p;
155
bellard54936002003-05-13 00:25:15 +0000156 p = l1_map[index >> L2_BITS];
157 if (!p)
158 return 0;
bellardfd6ce8f2003-05-14 19:00:11 +0000159 return p + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000160}
161
bellard9fa3e852004-01-04 18:06:42 +0000162#if !defined(CONFIG_USER_ONLY)
163static void tlb_protect_code(CPUState *env, uint32_t addr);
164static void tlb_unprotect_code(CPUState *env, uint32_t addr);
165static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
bellardfd6ce8f2003-05-14 19:00:11 +0000166
bellard9fa3e852004-01-04 18:06:42 +0000167static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
168{
169 VirtPageDesc **lp, *p;
170
171 lp = &l1_virt_map[index >> L2_BITS];
172 p = *lp;
173 if (!p) {
174 /* allocate if not found */
175 p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
176 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
177 *lp = p;
178 }
179 return p + (index & (L2_SIZE - 1));
180}
181
182static inline VirtPageDesc *virt_page_find(unsigned int index)
183{
184 VirtPageDesc *p;
185
186 p = l1_virt_map[index >> L2_BITS];
bellardfd6ce8f2003-05-14 19:00:11 +0000187 if (!p)
188 return 0;
bellard9fa3e852004-01-04 18:06:42 +0000189 return p + (index & (L2_SIZE - 1));
bellardfd6ce8f2003-05-14 19:00:11 +0000190}
191
bellard9fa3e852004-01-04 18:06:42 +0000192static void virt_page_flush(void)
bellard54936002003-05-13 00:25:15 +0000193{
bellard9fa3e852004-01-04 18:06:42 +0000194 int i, j;
195 VirtPageDesc *p;
196
197 virt_valid_tag++;
bellard54936002003-05-13 00:25:15 +0000198
bellard9fa3e852004-01-04 18:06:42 +0000199 if (virt_valid_tag == 0) {
200 virt_valid_tag = 1;
201 for(i = 0; i < L1_SIZE; i++) {
202 p = l1_virt_map[i];
203 if (p) {
204 for(j = 0; j < L2_SIZE; j++)
205 p[j].valid_tag = 0;
206 }
bellardfd6ce8f2003-05-14 19:00:11 +0000207 }
bellard54936002003-05-13 00:25:15 +0000208 }
209}
bellard9fa3e852004-01-04 18:06:42 +0000210#else
211static void virt_page_flush(void)
212{
213}
214#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000215
bellardb346ff42003-06-15 20:05:50 +0000216void cpu_exec_init(void)
bellardfd6ce8f2003-05-14 19:00:11 +0000217{
218 if (!code_gen_ptr) {
219 code_gen_ptr = code_gen_buffer;
bellardb346ff42003-06-15 20:05:50 +0000220 page_init();
bellard33417e72003-08-10 21:47:01 +0000221 io_mem_init();
bellardfd6ce8f2003-05-14 19:00:11 +0000222 }
223}
224
bellard9fa3e852004-01-04 18:06:42 +0000225static inline void invalidate_page_bitmap(PageDesc *p)
226{
227 if (p->code_bitmap) {
228 free(p->code_bitmap);
229 p->code_bitmap = NULL;
230 }
231 p->code_write_count = 0;
232}
233
bellardfd6ce8f2003-05-14 19:00:11 +0000234/* set to NULL all the 'first_tb' fields in all PageDescs */
235static void page_flush_tb(void)
236{
237 int i, j;
238 PageDesc *p;
239
240 for(i = 0; i < L1_SIZE; i++) {
241 p = l1_map[i];
242 if (p) {
bellard9fa3e852004-01-04 18:06:42 +0000243 for(j = 0; j < L2_SIZE; j++) {
244 p->first_tb = NULL;
245 invalidate_page_bitmap(p);
246 p++;
247 }
bellardfd6ce8f2003-05-14 19:00:11 +0000248 }
249 }
250}
251
252/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000253/* XXX: tb_flush is currently not thread safe */
bellard01243112004-01-04 15:48:17 +0000254void tb_flush(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000255{
256 int i;
bellard01243112004-01-04 15:48:17 +0000257#if defined(DEBUG_FLUSH)
bellardfd6ce8f2003-05-14 19:00:11 +0000258 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
259 code_gen_ptr - code_gen_buffer,
260 nb_tbs,
bellard01243112004-01-04 15:48:17 +0000261 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000262#endif
263 nb_tbs = 0;
264 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
265 tb_hash[i] = NULL;
bellard9fa3e852004-01-04 18:06:42 +0000266 virt_page_flush();
267
268 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
269 tb_phys_hash[i] = NULL;
bellardfd6ce8f2003-05-14 19:00:11 +0000270 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000271
bellardfd6ce8f2003-05-14 19:00:11 +0000272 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000273 /* XXX: flush processor icache at this point if cache flush is
274 expensive */
bellardfd6ce8f2003-05-14 19:00:11 +0000275}
276
277#ifdef DEBUG_TB_CHECK
278
279static void tb_invalidate_check(unsigned long address)
280{
281 TranslationBlock *tb;
282 int i;
283 address &= TARGET_PAGE_MASK;
284 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
285 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
286 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
287 address >= tb->pc + tb->size)) {
288 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
289 address, tb->pc, tb->size);
290 }
291 }
292 }
293}
294
295/* verify that all the pages have correct rights for code */
296static void tb_page_check(void)
297{
298 TranslationBlock *tb;
299 int i, flags1, flags2;
300
301 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
302 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
303 flags1 = page_get_flags(tb->pc);
304 flags2 = page_get_flags(tb->pc + tb->size - 1);
305 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
306 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
307 tb->pc, tb->size, flags1, flags2);
308 }
309 }
310 }
311}
312
bellardd4e81642003-05-25 16:46:15 +0000313void tb_jmp_check(TranslationBlock *tb)
314{
315 TranslationBlock *tb1;
316 unsigned int n1;
317
318 /* suppress any remaining jumps to this TB */
319 tb1 = tb->jmp_first;
320 for(;;) {
321 n1 = (long)tb1 & 3;
322 tb1 = (TranslationBlock *)((long)tb1 & ~3);
323 if (n1 == 2)
324 break;
325 tb1 = tb1->jmp_next[n1];
326 }
327 /* check end of list */
328 if (tb1 != tb) {
329 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
330 }
331}
332
bellardfd6ce8f2003-05-14 19:00:11 +0000333#endif
334
335/* invalidate one TB */
336static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
337 int next_offset)
338{
339 TranslationBlock *tb1;
340 for(;;) {
341 tb1 = *ptb;
342 if (tb1 == tb) {
343 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
344 break;
345 }
346 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
347 }
348}
349
bellard9fa3e852004-01-04 18:06:42 +0000350static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
351{
352 TranslationBlock *tb1;
353 unsigned int n1;
354
355 for(;;) {
356 tb1 = *ptb;
357 n1 = (long)tb1 & 3;
358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
359 if (tb1 == tb) {
360 *ptb = tb1->page_next[n1];
361 break;
362 }
363 ptb = &tb1->page_next[n1];
364 }
365}
366
bellardd4e81642003-05-25 16:46:15 +0000367static inline void tb_jmp_remove(TranslationBlock *tb, int n)
368{
369 TranslationBlock *tb1, **ptb;
370 unsigned int n1;
371
372 ptb = &tb->jmp_next[n];
373 tb1 = *ptb;
374 if (tb1) {
375 /* find tb(n) in circular list */
376 for(;;) {
377 tb1 = *ptb;
378 n1 = (long)tb1 & 3;
379 tb1 = (TranslationBlock *)((long)tb1 & ~3);
380 if (n1 == n && tb1 == tb)
381 break;
382 if (n1 == 2) {
383 ptb = &tb1->jmp_first;
384 } else {
385 ptb = &tb1->jmp_next[n1];
386 }
387 }
388 /* now we can suppress tb(n) from the list */
389 *ptb = tb->jmp_next[n];
390
391 tb->jmp_next[n] = NULL;
392 }
393}
394
395/* reset the jump entry 'n' of a TB so that it is not chained to
396 another TB */
397static inline void tb_reset_jump(TranslationBlock *tb, int n)
398{
399 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
400}
401
bellard9fa3e852004-01-04 18:06:42 +0000402static inline void tb_invalidate(TranslationBlock *tb)
bellardfd6ce8f2003-05-14 19:00:11 +0000403{
bellardd4e81642003-05-25 16:46:15 +0000404 unsigned int h, n1;
bellard9fa3e852004-01-04 18:06:42 +0000405 TranslationBlock *tb1, *tb2, **ptb;
bellardd4e81642003-05-25 16:46:15 +0000406
bellard36bdbe52003-11-19 22:12:02 +0000407 tb_invalidated_flag = 1;
408
bellardfd6ce8f2003-05-14 19:00:11 +0000409 /* remove the TB from the hash list */
410 h = tb_hash_func(tb->pc);
bellard9fa3e852004-01-04 18:06:42 +0000411 ptb = &tb_hash[h];
412 for(;;) {
413 tb1 = *ptb;
414 /* NOTE: the TB is not necessarily linked in the hash. It
415 indicates that it is not currently used */
416 if (tb1 == NULL)
417 return;
418 if (tb1 == tb) {
419 *ptb = tb1->hash_next;
420 break;
421 }
422 ptb = &tb1->hash_next;
bellardfd6ce8f2003-05-14 19:00:11 +0000423 }
bellardd4e81642003-05-25 16:46:15 +0000424
425 /* suppress this TB from the two jump lists */
426 tb_jmp_remove(tb, 0);
427 tb_jmp_remove(tb, 1);
428
429 /* suppress any remaining jumps to this TB */
430 tb1 = tb->jmp_first;
431 for(;;) {
432 n1 = (long)tb1 & 3;
433 if (n1 == 2)
434 break;
435 tb1 = (TranslationBlock *)((long)tb1 & ~3);
436 tb2 = tb1->jmp_next[n1];
437 tb_reset_jump(tb1, n1);
438 tb1->jmp_next[n1] = NULL;
439 tb1 = tb2;
440 }
441 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
bellardfd6ce8f2003-05-14 19:00:11 +0000442}
443
bellard9fa3e852004-01-04 18:06:42 +0000444static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000445{
bellardfd6ce8f2003-05-14 19:00:11 +0000446 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +0000447 unsigned int h;
448 target_ulong phys_pc;
449
450 /* remove the TB from the hash list */
451 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
452 h = tb_phys_hash_func(phys_pc);
453 tb_remove(&tb_phys_hash[h], tb,
454 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000455
bellard9fa3e852004-01-04 18:06:42 +0000456 /* remove the TB from the page list */
457 if (tb->page_addr[0] != page_addr) {
458 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
459 tb_page_remove(&p->first_tb, tb);
460 invalidate_page_bitmap(p);
461 }
462 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
463 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
464 tb_page_remove(&p->first_tb, tb);
465 invalidate_page_bitmap(p);
466 }
467
468 tb_invalidate(tb);
469}
470
471static inline void set_bits(uint8_t *tab, int start, int len)
472{
473 int end, mask, end1;
474
475 end = start + len;
476 tab += start >> 3;
477 mask = 0xff << (start & 7);
478 if ((start & ~7) == (end & ~7)) {
479 if (start < end) {
480 mask &= ~(0xff << (end & 7));
481 *tab |= mask;
482 }
483 } else {
484 *tab++ |= mask;
485 start = (start + 8) & ~7;
486 end1 = end & ~7;
487 while (start < end1) {
488 *tab++ = 0xff;
489 start += 8;
490 }
491 if (start < end) {
492 mask = ~(0xff << (end & 7));
493 *tab |= mask;
494 }
495 }
496}
497
498static void build_page_bitmap(PageDesc *p)
499{
500 int n, tb_start, tb_end;
501 TranslationBlock *tb;
502
503 p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
504 if (!p->code_bitmap)
505 return;
506 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
507
508 tb = p->first_tb;
509 while (tb != NULL) {
510 n = (long)tb & 3;
511 tb = (TranslationBlock *)((long)tb & ~3);
512 /* NOTE: this is subtle as a TB may span two physical pages */
513 if (n == 0) {
514 /* NOTE: tb_end may be after the end of the page, but
515 it is not a problem */
516 tb_start = tb->pc & ~TARGET_PAGE_MASK;
517 tb_end = tb_start + tb->size;
518 if (tb_end > TARGET_PAGE_SIZE)
519 tb_end = TARGET_PAGE_SIZE;
520 } else {
521 tb_start = 0;
522 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
523 }
524 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
525 tb = tb->page_next[n];
526 }
527}
528
529/* invalidate all TBs which intersect with the target physical page
530 starting in range [start;end[. NOTE: start and end must refer to
531 the same physical page */
532static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
533{
534 int n;
535 PageDesc *p;
536 TranslationBlock *tb, *tb_next;
537 target_ulong tb_start, tb_end;
538
539 p = page_find(start >> TARGET_PAGE_BITS);
540 if (!p)
541 return;
542 if (!p->code_bitmap &&
543 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
544 /* build code bitmap */
545 build_page_bitmap(p);
546 }
547
548 /* we remove all the TBs in the range [start, end[ */
549 /* XXX: see if in some cases it could be faster to invalidate all the code */
550 tb = p->first_tb;
551 while (tb != NULL) {
552 n = (long)tb & 3;
553 tb = (TranslationBlock *)((long)tb & ~3);
554 tb_next = tb->page_next[n];
555 /* NOTE: this is subtle as a TB may span two physical pages */
556 if (n == 0) {
557 /* NOTE: tb_end may be after the end of the page, but
558 it is not a problem */
559 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
560 tb_end = tb_start + tb->size;
561 } else {
562 tb_start = tb->page_addr[1];
563 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
564 }
565 if (!(tb_end <= start || tb_start >= end)) {
566 tb_phys_invalidate(tb, -1);
567 }
568 tb = tb_next;
569 }
570#if !defined(CONFIG_USER_ONLY)
571 /* if no code remaining, no need to continue to use slow writes */
572 if (!p->first_tb) {
573 invalidate_page_bitmap(p);
574 tlb_unprotect_code_phys(cpu_single_env, start);
575 }
576#endif
577}
578
579/* len must be <= 8 and start must be a multiple of len */
580static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
581{
582 PageDesc *p;
583 int offset, b;
584
585 p = page_find(start >> TARGET_PAGE_BITS);
586 if (!p)
587 return;
588 if (p->code_bitmap) {
589 offset = start & ~TARGET_PAGE_MASK;
590 b = p->code_bitmap[offset >> 3] >> (offset & 7);
591 if (b & ((1 << len) - 1))
592 goto do_invalidate;
593 } else {
594 do_invalidate:
595 tb_invalidate_phys_page_range(start, start + len);
596 }
597}
598
599/* invalidate all TBs which intersect with the target virtual page
600 starting in range [start;end[. This function is usually used when
601 the target processor flushes its I-cache. NOTE: start and end must
602 refer to the same physical page */
603void tb_invalidate_page_range(target_ulong start, target_ulong end)
604{
605 int n;
606 PageDesc *p;
607 TranslationBlock *tb, *tb_next;
608 target_ulong pc;
609 target_ulong phys_start;
610
611#if !defined(CONFIG_USER_ONLY)
612 {
613 VirtPageDesc *vp;
614 vp = virt_page_find(start >> TARGET_PAGE_BITS);
615 if (!vp)
616 return;
617 if (vp->valid_tag != virt_valid_tag)
618 return;
619 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
620 }
621#else
622 phys_start = start;
623#endif
624 p = page_find(phys_start >> TARGET_PAGE_BITS);
625 if (!p)
626 return;
627 /* we remove all the TBs in the range [start, end[ */
628 /* XXX: see if in some cases it could be faster to invalidate all the code */
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 tb_next = tb->page_next[n];
634 pc = tb->pc;
635 if (!((pc + tb->size) <= start || pc >= end)) {
636 tb_phys_invalidate(tb, -1);
637 }
638 tb = tb_next;
639 }
640#if !defined(CONFIG_USER_ONLY)
641 /* if no code remaining, no need to continue to use slow writes */
642 if (!p->first_tb)
643 tlb_unprotect_code(cpu_single_env, start);
644#endif
645}
646
647#if !defined(CONFIG_SOFTMMU)
648static void tb_invalidate_phys_page(target_ulong addr)
649{
650 int n;
651 PageDesc *p;
652 TranslationBlock *tb;
653
654 addr &= TARGET_PAGE_MASK;
655 p = page_find(addr >> TARGET_PAGE_BITS);
656 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +0000657 return;
658 tb = p->first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000659 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +0000660 n = (long)tb & 3;
661 tb = (TranslationBlock *)((long)tb & ~3);
662 tb_phys_invalidate(tb, addr);
663 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +0000664 }
665 p->first_tb = NULL;
666}
bellard9fa3e852004-01-04 18:06:42 +0000667#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000668
669/* add the tb in the target page and protect it if necessary */
bellard9fa3e852004-01-04 18:06:42 +0000670static inline void tb_alloc_page(TranslationBlock *tb,
671 unsigned int n, unsigned int page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000672{
673 PageDesc *p;
bellard9fa3e852004-01-04 18:06:42 +0000674 TranslationBlock *last_first_tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000675
bellard9fa3e852004-01-04 18:06:42 +0000676 tb->page_addr[n] = page_addr;
677 p = page_find(page_addr >> TARGET_PAGE_BITS);
678 tb->page_next[n] = p->first_tb;
679 last_first_tb = p->first_tb;
680 p->first_tb = (TranslationBlock *)((long)tb | n);
681 invalidate_page_bitmap(p);
682
683#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +0000684 if (p->flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +0000685 unsigned long host_start, host_end, addr;
686 int prot;
687
bellardfd6ce8f2003-05-14 19:00:11 +0000688 /* force the host page as non writable (writes will have a
689 page fault + mprotect overhead) */
bellardfd6ce8f2003-05-14 19:00:11 +0000690 host_start = page_addr & host_page_mask;
691 host_end = host_start + host_page_size;
692 prot = 0;
693 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
694 prot |= page_get_flags(addr);
695 mprotect((void *)host_start, host_page_size,
696 (prot & PAGE_BITS) & ~PAGE_WRITE);
697#ifdef DEBUG_TB_INVALIDATE
698 printf("protecting code page: 0x%08lx\n",
699 host_start);
700#endif
701 p->flags &= ~PAGE_WRITE;
bellardfd6ce8f2003-05-14 19:00:11 +0000702 }
bellard9fa3e852004-01-04 18:06:42 +0000703#else
704 /* if some code is already present, then the pages are already
705 protected. So we handle the case where only the first TB is
706 allocated in a physical page */
707 if (!last_first_tb) {
708 target_ulong virt_addr;
709
710 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
711 tlb_protect_code(cpu_single_env, virt_addr);
712 }
713#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000714}
715
716/* Allocate a new translation block. Flush the translation buffer if
717 too many translation blocks or too much generated code. */
bellardd4e81642003-05-25 16:46:15 +0000718TranslationBlock *tb_alloc(unsigned long pc)
bellardfd6ce8f2003-05-14 19:00:11 +0000719{
720 TranslationBlock *tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000721
722 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
723 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
bellardd4e81642003-05-25 16:46:15 +0000724 return NULL;
bellardfd6ce8f2003-05-14 19:00:11 +0000725 tb = &tbs[nb_tbs++];
726 tb->pc = pc;
bellardd4e81642003-05-25 16:46:15 +0000727 return tb;
728}
729
bellard9fa3e852004-01-04 18:06:42 +0000730/* add a new TB and link it to the physical page tables. phys_page2 is
731 (-1) to indicate that only one page contains the TB. */
732void tb_link_phys(TranslationBlock *tb,
733 target_ulong phys_pc, target_ulong phys_page2)
bellardd4e81642003-05-25 16:46:15 +0000734{
bellard9fa3e852004-01-04 18:06:42 +0000735 unsigned int h;
736 TranslationBlock **ptb;
737
738 /* add in the physical hash table */
739 h = tb_phys_hash_func(phys_pc);
740 ptb = &tb_phys_hash[h];
741 tb->phys_hash_next = *ptb;
742 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +0000743
744 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +0000745 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
746 if (phys_page2 != -1)
747 tb_alloc_page(tb, 1, phys_page2);
748 else
749 tb->page_addr[1] = -1;
bellard61382a52003-10-27 21:22:23 +0000750#ifdef DEBUG_TB_CHECK
751 tb_page_check();
752#endif
bellard9fa3e852004-01-04 18:06:42 +0000753}
754
755/* link the tb with the other TBs */
756void tb_link(TranslationBlock *tb)
757{
758#if !defined(CONFIG_USER_ONLY)
759 {
760 VirtPageDesc *vp;
761 target_ulong addr;
762
763 /* save the code memory mappings (needed to invalidate the code) */
764 addr = tb->pc & TARGET_PAGE_MASK;
765 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
bellard98857882004-01-18 21:52:14 +0000766#ifdef DEBUG_TLB_CHECK
767 if (vp->valid_tag == virt_valid_tag &&
768 vp->phys_addr != tb->page_addr[0]) {
769 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
770 addr, tb->page_addr[0], vp->phys_addr);
771 }
772#endif
bellard9fa3e852004-01-04 18:06:42 +0000773 vp->phys_addr = tb->page_addr[0];
774 vp->valid_tag = virt_valid_tag;
775
776 if (tb->page_addr[1] != -1) {
777 addr += TARGET_PAGE_SIZE;
778 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
bellard98857882004-01-18 21:52:14 +0000779#ifdef DEBUG_TLB_CHECK
780 if (vp->valid_tag == virt_valid_tag &&
781 vp->phys_addr != tb->page_addr[1]) {
782 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
783 addr, tb->page_addr[1], vp->phys_addr);
784 }
785#endif
bellard9fa3e852004-01-04 18:06:42 +0000786 vp->phys_addr = tb->page_addr[1];
787 vp->valid_tag = virt_valid_tag;
788 }
789 }
790#endif
791
bellardd4e81642003-05-25 16:46:15 +0000792 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
793 tb->jmp_next[0] = NULL;
794 tb->jmp_next[1] = NULL;
795
796 /* init original jump addresses */
797 if (tb->tb_next_offset[0] != 0xffff)
798 tb_reset_jump(tb, 0);
799 if (tb->tb_next_offset[1] != 0xffff)
800 tb_reset_jump(tb, 1);
bellardfd6ce8f2003-05-14 19:00:11 +0000801}
802
bellarda513fe12003-05-27 23:29:48 +0000803/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
804 tb[1].tc_ptr. Return NULL if not found */
805TranslationBlock *tb_find_pc(unsigned long tc_ptr)
806{
807 int m_min, m_max, m;
808 unsigned long v;
809 TranslationBlock *tb;
810
811 if (nb_tbs <= 0)
812 return NULL;
813 if (tc_ptr < (unsigned long)code_gen_buffer ||
814 tc_ptr >= (unsigned long)code_gen_ptr)
815 return NULL;
816 /* binary search (cf Knuth) */
817 m_min = 0;
818 m_max = nb_tbs - 1;
819 while (m_min <= m_max) {
820 m = (m_min + m_max) >> 1;
821 tb = &tbs[m];
822 v = (unsigned long)tb->tc_ptr;
823 if (v == tc_ptr)
824 return tb;
825 else if (tc_ptr < v) {
826 m_max = m - 1;
827 } else {
828 m_min = m + 1;
829 }
830 }
831 return &tbs[m_max];
832}
bellard75012672003-06-21 13:11:07 +0000833
bellardea041c02003-06-25 16:16:50 +0000834static void tb_reset_jump_recursive(TranslationBlock *tb);
835
836static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
837{
838 TranslationBlock *tb1, *tb_next, **ptb;
839 unsigned int n1;
840
841 tb1 = tb->jmp_next[n];
842 if (tb1 != NULL) {
843 /* find head of list */
844 for(;;) {
845 n1 = (long)tb1 & 3;
846 tb1 = (TranslationBlock *)((long)tb1 & ~3);
847 if (n1 == 2)
848 break;
849 tb1 = tb1->jmp_next[n1];
850 }
851 /* we are now sure now that tb jumps to tb1 */
852 tb_next = tb1;
853
854 /* remove tb from the jmp_first list */
855 ptb = &tb_next->jmp_first;
856 for(;;) {
857 tb1 = *ptb;
858 n1 = (long)tb1 & 3;
859 tb1 = (TranslationBlock *)((long)tb1 & ~3);
860 if (n1 == n && tb1 == tb)
861 break;
862 ptb = &tb1->jmp_next[n1];
863 }
864 *ptb = tb->jmp_next[n];
865 tb->jmp_next[n] = NULL;
866
867 /* suppress the jump to next tb in generated code */
868 tb_reset_jump(tb, n);
869
bellard01243112004-01-04 15:48:17 +0000870 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +0000871 tb_reset_jump_recursive(tb_next);
872 }
873}
874
875static void tb_reset_jump_recursive(TranslationBlock *tb)
876{
877 tb_reset_jump_recursive2(tb, 0);
878 tb_reset_jump_recursive2(tb, 1);
879}
880
bellardc33a3462003-07-29 20:50:33 +0000881/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
882 breakpoint is reached */
bellard4c3a88a2003-07-26 12:06:08 +0000883int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
884{
885#if defined(TARGET_I386)
886 int i;
887
888 for(i = 0; i < env->nb_breakpoints; i++) {
889 if (env->breakpoints[i] == pc)
890 return 0;
891 }
892
893 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
894 return -1;
895 env->breakpoints[env->nb_breakpoints++] = pc;
bellard9fa3e852004-01-04 18:06:42 +0000896 tb_invalidate_page_range(pc, pc + 1);
bellard4c3a88a2003-07-26 12:06:08 +0000897 return 0;
898#else
899 return -1;
900#endif
901}
902
903/* remove a breakpoint */
904int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
905{
906#if defined(TARGET_I386)
907 int i;
908 for(i = 0; i < env->nb_breakpoints; i++) {
909 if (env->breakpoints[i] == pc)
910 goto found;
911 }
912 return -1;
913 found:
914 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
915 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
916 env->nb_breakpoints--;
bellard9fa3e852004-01-04 18:06:42 +0000917 tb_invalidate_page_range(pc, pc + 1);
bellard4c3a88a2003-07-26 12:06:08 +0000918 return 0;
919#else
920 return -1;
921#endif
922}
923
bellardc33a3462003-07-29 20:50:33 +0000924/* enable or disable single step mode. EXCP_DEBUG is returned by the
925 CPU loop after each instruction */
926void cpu_single_step(CPUState *env, int enabled)
927{
928#if defined(TARGET_I386)
929 if (env->singlestep_enabled != enabled) {
930 env->singlestep_enabled = enabled;
931 /* must flush all the translated code to avoid inconsistancies */
bellard9fa3e852004-01-04 18:06:42 +0000932 /* XXX: only flush what is necessary */
bellard01243112004-01-04 15:48:17 +0000933 tb_flush(env);
bellardc33a3462003-07-29 20:50:33 +0000934 }
935#endif
936}
937
bellard34865132003-10-05 14:28:56 +0000938/* enable or disable low levels log */
939void cpu_set_log(int log_flags)
940{
941 loglevel = log_flags;
942 if (loglevel && !logfile) {
943 logfile = fopen(logfilename, "w");
944 if (!logfile) {
945 perror(logfilename);
946 _exit(1);
947 }
bellard9fa3e852004-01-04 18:06:42 +0000948#if !defined(CONFIG_SOFTMMU)
949 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
950 {
951 static uint8_t logfile_buf[4096];
952 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
953 }
954#else
bellard34865132003-10-05 14:28:56 +0000955 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +0000956#endif
bellard34865132003-10-05 14:28:56 +0000957 }
958}
959
960void cpu_set_log_filename(const char *filename)
961{
962 logfilename = strdup(filename);
963}
bellardc33a3462003-07-29 20:50:33 +0000964
bellard01243112004-01-04 15:48:17 +0000965/* mask must never be zero, except for A20 change call */
bellard68a79312003-06-30 13:12:32 +0000966void cpu_interrupt(CPUState *env, int mask)
bellardea041c02003-06-25 16:16:50 +0000967{
968 TranslationBlock *tb;
bellardee8b7022004-02-03 23:35:10 +0000969 static int interrupt_lock;
bellard68a79312003-06-30 13:12:32 +0000970
971 env->interrupt_request |= mask;
bellardea041c02003-06-25 16:16:50 +0000972 /* if the cpu is currently executing code, we must unlink it and
973 all the potentially executing TB */
974 tb = env->current_tb;
bellardee8b7022004-02-03 23:35:10 +0000975 if (tb && !testandset(&interrupt_lock)) {
976 env->current_tb = NULL;
bellardea041c02003-06-25 16:16:50 +0000977 tb_reset_jump_recursive(tb);
bellardee8b7022004-02-03 23:35:10 +0000978 interrupt_lock = 0;
bellardea041c02003-06-25 16:16:50 +0000979 }
980}
981
982
bellard75012672003-06-21 13:11:07 +0000983void cpu_abort(CPUState *env, const char *fmt, ...)
984{
985 va_list ap;
986
987 va_start(ap, fmt);
988 fprintf(stderr, "qemu: fatal: ");
989 vfprintf(stderr, fmt, ap);
990 fprintf(stderr, "\n");
991#ifdef TARGET_I386
992 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
993#endif
994 va_end(ap);
995 abort();
996}
997
bellard01243112004-01-04 15:48:17 +0000998#if !defined(CONFIG_USER_ONLY)
999
bellardee8b7022004-02-03 23:35:10 +00001000/* NOTE: if flush_global is true, also flush global entries (not
1001 implemented yet) */
1002void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001003{
bellard33417e72003-08-10 21:47:01 +00001004 int i;
bellard01243112004-01-04 15:48:17 +00001005
bellard9fa3e852004-01-04 18:06:42 +00001006#if defined(DEBUG_TLB)
1007 printf("tlb_flush:\n");
1008#endif
bellard01243112004-01-04 15:48:17 +00001009 /* must reset current TB so that interrupts cannot modify the
1010 links while we are modifying them */
1011 env->current_tb = NULL;
1012
bellard33417e72003-08-10 21:47:01 +00001013 for(i = 0; i < CPU_TLB_SIZE; i++) {
1014 env->tlb_read[0][i].address = -1;
1015 env->tlb_write[0][i].address = -1;
1016 env->tlb_read[1][i].address = -1;
1017 env->tlb_write[1][i].address = -1;
1018 }
bellard9fa3e852004-01-04 18:06:42 +00001019
1020 virt_page_flush();
1021 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1022 tb_hash[i] = NULL;
1023
1024#if !defined(CONFIG_SOFTMMU)
1025 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1026#endif
bellard33417e72003-08-10 21:47:01 +00001027}
1028
bellard61382a52003-10-27 21:22:23 +00001029static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1030{
1031 if (addr == (tlb_entry->address &
1032 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1033 tlb_entry->address = -1;
1034}
1035
bellard33417e72003-08-10 21:47:01 +00001036void tlb_flush_page(CPUState *env, uint32_t addr)
1037{
bellard9fa3e852004-01-04 18:06:42 +00001038 int i, n;
1039 VirtPageDesc *vp;
1040 PageDesc *p;
1041 TranslationBlock *tb;
bellard01243112004-01-04 15:48:17 +00001042
bellard9fa3e852004-01-04 18:06:42 +00001043#if defined(DEBUG_TLB)
1044 printf("tlb_flush_page: 0x%08x\n", addr);
1045#endif
bellard01243112004-01-04 15:48:17 +00001046 /* must reset current TB so that interrupts cannot modify the
1047 links while we are modifying them */
1048 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00001049
bellard61382a52003-10-27 21:22:23 +00001050 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00001051 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
bellard61382a52003-10-27 21:22:23 +00001052 tlb_flush_entry(&env->tlb_read[0][i], addr);
1053 tlb_flush_entry(&env->tlb_write[0][i], addr);
1054 tlb_flush_entry(&env->tlb_read[1][i], addr);
1055 tlb_flush_entry(&env->tlb_write[1][i], addr);
bellard01243112004-01-04 15:48:17 +00001056
bellard9fa3e852004-01-04 18:06:42 +00001057 /* remove from the virtual pc hash table all the TB at this
1058 virtual address */
1059
1060 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1061 if (vp && vp->valid_tag == virt_valid_tag) {
1062 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1063 if (p) {
1064 /* we remove all the links to the TBs in this virtual page */
1065 tb = p->first_tb;
1066 while (tb != NULL) {
1067 n = (long)tb & 3;
1068 tb = (TranslationBlock *)((long)tb & ~3);
1069 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1070 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1071 tb_invalidate(tb);
1072 }
1073 tb = tb->page_next[n];
1074 }
1075 }
bellard98857882004-01-18 21:52:14 +00001076 vp->valid_tag = 0;
bellard9fa3e852004-01-04 18:06:42 +00001077 }
1078
bellard01243112004-01-04 15:48:17 +00001079#if !defined(CONFIG_SOFTMMU)
bellard9fa3e852004-01-04 18:06:42 +00001080 if (addr < MMAP_AREA_END)
bellard01243112004-01-04 15:48:17 +00001081 munmap((void *)addr, TARGET_PAGE_SIZE);
bellard61382a52003-10-27 21:22:23 +00001082#endif
bellard9fa3e852004-01-04 18:06:42 +00001083}
1084
1085static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1086{
1087 if (addr == (tlb_entry->address &
1088 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
bellard98857882004-01-18 21:52:14 +00001089 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1090 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
bellard9fa3e852004-01-04 18:06:42 +00001091 tlb_entry->address |= IO_MEM_CODE;
1092 tlb_entry->addend -= (unsigned long)phys_ram_base;
bellard01243112004-01-04 15:48:17 +00001093 }
bellard61382a52003-10-27 21:22:23 +00001094}
1095
bellard9fa3e852004-01-04 18:06:42 +00001096/* update the TLBs so that writes to code in the virtual page 'addr'
1097 can be detected */
1098static void tlb_protect_code(CPUState *env, uint32_t addr)
bellard61382a52003-10-27 21:22:23 +00001099{
bellard61382a52003-10-27 21:22:23 +00001100 int i;
1101
1102 addr &= TARGET_PAGE_MASK;
1103 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
bellard9fa3e852004-01-04 18:06:42 +00001104 tlb_protect_code1(&env->tlb_write[0][i], addr);
1105 tlb_protect_code1(&env->tlb_write[1][i], addr);
1106#if !defined(CONFIG_SOFTMMU)
1107 /* NOTE: as we generated the code for this page, it is already at
1108 least readable */
1109 if (addr < MMAP_AREA_END)
1110 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1111#endif
1112}
1113
1114static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1115{
1116 if (addr == (tlb_entry->address &
1117 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1118 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1119 tlb_entry->address &= TARGET_PAGE_MASK;
1120 tlb_entry->addend += (unsigned long)phys_ram_base;
1121 }
1122}
1123
1124/* update the TLB so that writes in virtual page 'addr' are no longer
1125 tested self modifying code */
1126static void tlb_unprotect_code(CPUState *env, uint32_t addr)
1127{
1128 int i;
1129
1130 addr &= TARGET_PAGE_MASK;
1131 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1132 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1133 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1134}
1135
1136static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1137 uint32_t phys_addr)
1138{
1139 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1140 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1141 tlb_entry->address &= TARGET_PAGE_MASK;
1142 tlb_entry->addend += (unsigned long)phys_ram_base;
1143 }
1144}
1145
1146/* update the TLB so that writes in physical page 'phys_addr' are no longer
1147 tested self modifying code */
1148/* XXX: find a way to improve it */
1149static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
1150{
1151 int i;
1152
1153 phys_addr &= TARGET_PAGE_MASK;
1154 for(i = 0; i < CPU_TLB_SIZE; i++)
1155 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1156 for(i = 0; i < CPU_TLB_SIZE; i++)
1157 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1158}
1159
bellard98857882004-01-18 21:52:14 +00001160/* add a new TLB entry. At most one entry for a given virtual
bellard9fa3e852004-01-04 18:06:42 +00001161 address is permitted. */
1162int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1163 int is_user, int is_softmmu)
1164{
1165 PageDesc *p;
1166 target_ulong pd;
1167 TranslationBlock *first_tb;
1168 unsigned int index;
1169 target_ulong address, addend;
1170 int ret;
1171
1172 p = page_find(paddr >> TARGET_PAGE_BITS);
1173 if (!p) {
1174 pd = IO_MEM_UNASSIGNED;
1175 first_tb = NULL;
1176 } else {
1177 pd = p->phys_offset;
1178 first_tb = p->first_tb;
1179 }
1180#if defined(DEBUG_TLB)
1181 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1182 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1183#endif
1184
1185 ret = 0;
1186#if !defined(CONFIG_SOFTMMU)
1187 if (is_softmmu)
1188#endif
1189 {
1190 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1191 /* IO memory case */
1192 address = vaddr | pd;
1193 addend = paddr;
1194 } else {
1195 /* standard memory */
1196 address = vaddr;
1197 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1198 }
1199
1200 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1201 addend -= vaddr;
1202 if (prot & PROT_READ) {
1203 env->tlb_read[is_user][index].address = address;
1204 env->tlb_read[is_user][index].addend = addend;
1205 } else {
1206 env->tlb_read[is_user][index].address = -1;
1207 env->tlb_read[is_user][index].addend = -1;
1208 }
1209 if (prot & PROT_WRITE) {
1210 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1211 /* ROM: access is ignored (same as unassigned) */
1212 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1213 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1214 } else if (first_tb) {
1215 /* if code is present, we use a specific memory
1216 handler. It works only for physical memory access */
1217 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1218 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1219 } else {
1220 env->tlb_write[is_user][index].address = address;
1221 env->tlb_write[is_user][index].addend = addend;
1222 }
1223 } else {
1224 env->tlb_write[is_user][index].address = -1;
1225 env->tlb_write[is_user][index].addend = -1;
1226 }
1227 }
1228#if !defined(CONFIG_SOFTMMU)
1229 else {
1230 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1231 /* IO access: no mapping is done as it will be handled by the
1232 soft MMU */
1233 if (!(env->hflags & HF_SOFTMMU_MASK))
1234 ret = 2;
1235 } else {
1236 void *map_addr;
1237 if (prot & PROT_WRITE) {
1238 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
1239 /* ROM: we do as if code was inside */
1240 /* if code is present, we only map as read only and save the
1241 original mapping */
1242 VirtPageDesc *vp;
1243
1244 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1245 vp->phys_addr = pd;
1246 vp->prot = prot;
1247 vp->valid_tag = virt_valid_tag;
1248 prot &= ~PAGE_WRITE;
1249 }
1250 }
1251 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1252 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1253 if (map_addr == MAP_FAILED) {
1254 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1255 paddr, vaddr);
1256 }
1257 }
1258 }
1259#endif
1260 return ret;
1261}
1262
1263/* called from signal handler: invalidate the code and unprotect the
1264 page. Return TRUE if the fault was succesfully handled. */
1265int page_unprotect(unsigned long addr)
1266{
1267#if !defined(CONFIG_SOFTMMU)
1268 VirtPageDesc *vp;
1269
1270#if defined(DEBUG_TLB)
1271 printf("page_unprotect: addr=0x%08x\n", addr);
1272#endif
1273 addr &= TARGET_PAGE_MASK;
1274 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1275 if (!vp)
1276 return 0;
1277 /* NOTE: in this case, validate_tag is _not_ tested as it
1278 validates only the code TLB */
1279 if (vp->valid_tag != virt_valid_tag)
1280 return 0;
1281 if (!(vp->prot & PAGE_WRITE))
1282 return 0;
1283#if defined(DEBUG_TLB)
1284 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1285 addr, vp->phys_addr, vp->prot);
1286#endif
1287 tb_invalidate_phys_page(vp->phys_addr);
1288 mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
1289 return 1;
1290#else
1291 return 0;
1292#endif
bellard33417e72003-08-10 21:47:01 +00001293}
1294
bellard01243112004-01-04 15:48:17 +00001295#else
1296
bellardee8b7022004-02-03 23:35:10 +00001297void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00001298{
1299}
1300
1301void tlb_flush_page(CPUState *env, uint32_t addr)
1302{
1303}
1304
1305void tlb_flush_page_write(CPUState *env, uint32_t addr)
1306{
1307}
1308
bellard9fa3e852004-01-04 18:06:42 +00001309int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1310 int is_user, int is_softmmu)
bellard33417e72003-08-10 21:47:01 +00001311{
bellard9fa3e852004-01-04 18:06:42 +00001312 return 0;
1313}
bellard33417e72003-08-10 21:47:01 +00001314
bellard9fa3e852004-01-04 18:06:42 +00001315/* dump memory mappings */
1316void page_dump(FILE *f)
1317{
1318 unsigned long start, end;
1319 int i, j, prot, prot1;
1320 PageDesc *p;
1321
1322 fprintf(f, "%-8s %-8s %-8s %s\n",
1323 "start", "end", "size", "prot");
1324 start = -1;
1325 end = -1;
1326 prot = 0;
1327 for(i = 0; i <= L1_SIZE; i++) {
1328 if (i < L1_SIZE)
1329 p = l1_map[i];
1330 else
1331 p = NULL;
1332 for(j = 0;j < L2_SIZE; j++) {
1333 if (!p)
1334 prot1 = 0;
1335 else
1336 prot1 = p[j].flags;
1337 if (prot1 != prot) {
1338 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1339 if (start != -1) {
1340 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1341 start, end, end - start,
1342 prot & PAGE_READ ? 'r' : '-',
1343 prot & PAGE_WRITE ? 'w' : '-',
1344 prot & PAGE_EXEC ? 'x' : '-');
1345 }
1346 if (prot1 != 0)
1347 start = end;
1348 else
1349 start = -1;
1350 prot = prot1;
1351 }
1352 if (!p)
1353 break;
1354 }
bellard33417e72003-08-10 21:47:01 +00001355 }
bellard33417e72003-08-10 21:47:01 +00001356}
1357
bellard9fa3e852004-01-04 18:06:42 +00001358int page_get_flags(unsigned long address)
bellard33417e72003-08-10 21:47:01 +00001359{
bellard9fa3e852004-01-04 18:06:42 +00001360 PageDesc *p;
1361
1362 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00001363 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001364 return 0;
1365 return p->flags;
bellard33417e72003-08-10 21:47:01 +00001366}
1367
bellard9fa3e852004-01-04 18:06:42 +00001368/* modify the flags of a page and invalidate the code if
1369 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1370 depending on PAGE_WRITE */
1371void page_set_flags(unsigned long start, unsigned long end, int flags)
1372{
1373 PageDesc *p;
1374 unsigned long addr;
1375
1376 start = start & TARGET_PAGE_MASK;
1377 end = TARGET_PAGE_ALIGN(end);
1378 if (flags & PAGE_WRITE)
1379 flags |= PAGE_WRITE_ORG;
1380 spin_lock(&tb_lock);
1381 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1382 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1383 /* if the write protection is set, then we invalidate the code
1384 inside */
1385 if (!(p->flags & PAGE_WRITE) &&
1386 (flags & PAGE_WRITE) &&
1387 p->first_tb) {
1388 tb_invalidate_phys_page(addr);
1389 }
1390 p->flags = flags;
1391 }
1392 spin_unlock(&tb_lock);
1393}
1394
1395/* called from signal handler: invalidate the code and unprotect the
1396 page. Return TRUE if the fault was succesfully handled. */
1397int page_unprotect(unsigned long address)
1398{
1399 unsigned int page_index, prot, pindex;
1400 PageDesc *p, *p1;
1401 unsigned long host_start, host_end, addr;
1402
1403 host_start = address & host_page_mask;
1404 page_index = host_start >> TARGET_PAGE_BITS;
1405 p1 = page_find(page_index);
1406 if (!p1)
1407 return 0;
1408 host_end = host_start + host_page_size;
1409 p = p1;
1410 prot = 0;
1411 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1412 prot |= p->flags;
1413 p++;
1414 }
1415 /* if the page was really writable, then we change its
1416 protection back to writable */
1417 if (prot & PAGE_WRITE_ORG) {
1418 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1419 if (!(p1[pindex].flags & PAGE_WRITE)) {
1420 mprotect((void *)host_start, host_page_size,
1421 (prot & PAGE_BITS) | PAGE_WRITE);
1422 p1[pindex].flags |= PAGE_WRITE;
1423 /* and since the content will be modified, we must invalidate
1424 the corresponding translated code. */
1425 tb_invalidate_phys_page(address);
1426#ifdef DEBUG_TB_CHECK
1427 tb_invalidate_check(address);
1428#endif
1429 return 1;
1430 }
1431 }
1432 return 0;
1433}
1434
1435/* call this function when system calls directly modify a memory area */
1436void page_unprotect_range(uint8_t *data, unsigned long data_size)
1437{
1438 unsigned long start, end, addr;
1439
1440 start = (unsigned long)data;
1441 end = start + data_size;
1442 start &= TARGET_PAGE_MASK;
1443 end = TARGET_PAGE_ALIGN(end);
1444 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1445 page_unprotect(addr);
1446 }
1447}
1448
1449#endif /* defined(CONFIG_USER_ONLY) */
1450
bellard33417e72003-08-10 21:47:01 +00001451/* register physical memory. 'size' must be a multiple of the target
1452 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1453 io memory page */
1454void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1455 long phys_offset)
1456{
1457 unsigned long addr, end_addr;
bellard9fa3e852004-01-04 18:06:42 +00001458 PageDesc *p;
bellard33417e72003-08-10 21:47:01 +00001459
1460 end_addr = start_addr + size;
1461 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
bellard9fa3e852004-01-04 18:06:42 +00001462 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1463 p->phys_offset = phys_offset;
1464 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
bellard33417e72003-08-10 21:47:01 +00001465 phys_offset += TARGET_PAGE_SIZE;
1466 }
1467}
1468
1469static uint32_t unassigned_mem_readb(uint32_t addr)
1470{
1471 return 0;
1472}
1473
1474static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
1475{
1476}
1477
1478static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1479 unassigned_mem_readb,
1480 unassigned_mem_readb,
1481 unassigned_mem_readb,
1482};
1483
1484static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1485 unassigned_mem_writeb,
1486 unassigned_mem_writeb,
1487 unassigned_mem_writeb,
1488};
1489
bellard9fa3e852004-01-04 18:06:42 +00001490/* self modifying code support in soft mmu mode : writing to a page
1491 containing code comes to these functions */
1492
1493static void code_mem_writeb(uint32_t addr, uint32_t val)
1494{
1495#if !defined(CONFIG_USER_ONLY)
1496 tb_invalidate_phys_page_fast(addr, 1);
1497#endif
1498 stb_raw(phys_ram_base + addr, val);
1499}
1500
1501static void code_mem_writew(uint32_t addr, uint32_t val)
1502{
1503#if !defined(CONFIG_USER_ONLY)
1504 tb_invalidate_phys_page_fast(addr, 2);
1505#endif
1506 stw_raw(phys_ram_base + addr, val);
1507}
1508
1509static void code_mem_writel(uint32_t addr, uint32_t val)
1510{
1511#if !defined(CONFIG_USER_ONLY)
1512 tb_invalidate_phys_page_fast(addr, 4);
1513#endif
1514 stl_raw(phys_ram_base + addr, val);
1515}
1516
1517static CPUReadMemoryFunc *code_mem_read[3] = {
1518 NULL, /* never used */
1519 NULL, /* never used */
1520 NULL, /* never used */
1521};
1522
1523static CPUWriteMemoryFunc *code_mem_write[3] = {
1524 code_mem_writeb,
1525 code_mem_writew,
1526 code_mem_writel,
1527};
bellard33417e72003-08-10 21:47:01 +00001528
1529static void io_mem_init(void)
1530{
bellard9fa3e852004-01-04 18:06:42 +00001531 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1532 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1533 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1534 io_mem_nb = 4;
bellard33417e72003-08-10 21:47:01 +00001535}
1536
1537/* mem_read and mem_write are arrays of functions containing the
1538 function to access byte (index 0), word (index 1) and dword (index
1539 2). All functions must be supplied. If io_index is non zero, the
1540 corresponding io zone is modified. If it is zero, a new io zone is
1541 allocated. The return value can be used with
1542 cpu_register_physical_memory(). (-1) is returned if error. */
1543int cpu_register_io_memory(int io_index,
1544 CPUReadMemoryFunc **mem_read,
1545 CPUWriteMemoryFunc **mem_write)
1546{
1547 int i;
1548
1549 if (io_index <= 0) {
1550 if (io_index >= IO_MEM_NB_ENTRIES)
1551 return -1;
1552 io_index = io_mem_nb++;
1553 } else {
1554 if (io_index >= IO_MEM_NB_ENTRIES)
1555 return -1;
1556 }
1557
1558 for(i = 0;i < 3; i++) {
1559 io_mem_read[io_index][i] = mem_read[i];
1560 io_mem_write[io_index][i] = mem_write[i];
1561 }
1562 return io_index << IO_MEM_SHIFT;
1563}
bellard61382a52003-10-27 21:22:23 +00001564
bellard13eb76e2004-01-24 15:23:36 +00001565/* physical memory access (slow version, mainly for debug) */
1566#if defined(CONFIG_USER_ONLY)
1567void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr,
1568 int len, int is_write)
1569{
1570 int l, flags;
1571 target_ulong page;
1572
1573 while (len > 0) {
1574 page = addr & TARGET_PAGE_MASK;
1575 l = (page + TARGET_PAGE_SIZE) - addr;
1576 if (l > len)
1577 l = len;
1578 flags = page_get_flags(page);
1579 if (!(flags & PAGE_VALID))
1580 return;
1581 if (is_write) {
1582 if (!(flags & PAGE_WRITE))
1583 return;
1584 memcpy((uint8_t *)addr, buf, len);
1585 } else {
1586 if (!(flags & PAGE_READ))
1587 return;
1588 memcpy(buf, (uint8_t *)addr, len);
1589 }
1590 len -= l;
1591 buf += l;
1592 addr += l;
1593 }
1594}
1595#else
1596void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr,
1597 int len, int is_write)
1598{
1599 int l, io_index;
1600 uint8_t *ptr;
1601 uint32_t val;
1602 target_ulong page, pd;
1603 PageDesc *p;
1604
1605 while (len > 0) {
1606 page = addr & TARGET_PAGE_MASK;
1607 l = (page + TARGET_PAGE_SIZE) - addr;
1608 if (l > len)
1609 l = len;
1610 p = page_find(page >> TARGET_PAGE_BITS);
1611 if (!p) {
1612 pd = IO_MEM_UNASSIGNED;
1613 } else {
1614 pd = p->phys_offset;
1615 }
1616
1617 if (is_write) {
1618 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1619 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1620 if (l >= 4 && ((addr & 3) == 0)) {
1621 /* 32 bit read access */
1622 val = ldl_raw(buf);
1623 io_mem_write[io_index][2](addr, val);
1624 l = 4;
1625 } else if (l >= 2 && ((addr & 1) == 0)) {
1626 /* 16 bit read access */
1627 val = lduw_raw(buf);
1628 io_mem_write[io_index][1](addr, val);
1629 l = 2;
1630 } else {
1631 /* 8 bit access */
1632 val = ldub_raw(buf);
1633 io_mem_write[io_index][0](addr, val);
1634 l = 1;
1635 }
1636 } else {
1637 /* RAM case */
1638 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1639 (addr & ~TARGET_PAGE_MASK);
1640 memcpy(ptr, buf, l);
1641 }
1642 } else {
1643 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1644 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1645 /* I/O case */
1646 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1647 if (l >= 4 && ((addr & 3) == 0)) {
1648 /* 32 bit read access */
1649 val = io_mem_read[io_index][2](addr);
1650 stl_raw(buf, val);
1651 l = 4;
1652 } else if (l >= 2 && ((addr & 1) == 0)) {
1653 /* 16 bit read access */
1654 val = io_mem_read[io_index][1](addr);
1655 stw_raw(buf, val);
1656 l = 2;
1657 } else {
1658 /* 8 bit access */
1659 val = io_mem_read[io_index][0](addr);
1660 stb_raw(buf, val);
1661 l = 1;
1662 }
1663 } else {
1664 /* RAM case */
1665 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1666 (addr & ~TARGET_PAGE_MASK);
1667 memcpy(buf, ptr, l);
1668 }
1669 }
1670 len -= l;
1671 buf += l;
1672 addr += l;
1673 }
1674}
1675#endif
1676
1677/* virtual memory access for debug */
1678int cpu_memory_rw_debug(CPUState *env,
1679 uint8_t *buf, target_ulong addr, int len, int is_write)
1680{
1681 int l;
1682 target_ulong page, phys_addr;
1683
1684 while (len > 0) {
1685 page = addr & TARGET_PAGE_MASK;
1686 phys_addr = cpu_get_phys_page_debug(env, page);
1687 /* if no physical page mapped, return an error */
1688 if (phys_addr == -1)
1689 return -1;
1690 l = (page + TARGET_PAGE_SIZE) - addr;
1691 if (l > len)
1692 l = len;
1693 cpu_physical_memory_rw(env, buf,
1694 phys_addr + (addr & ~TARGET_PAGE_MASK), l,
1695 is_write);
1696 len -= l;
1697 buf += l;
1698 addr += l;
1699 }
1700 return 0;
1701}
1702
bellard61382a52003-10-27 21:22:23 +00001703#if !defined(CONFIG_USER_ONLY)
1704
1705#define MMUSUFFIX _cmmu
1706#define GETPC() NULL
1707#define env cpu_single_env
1708
1709#define SHIFT 0
1710#include "softmmu_template.h"
1711
1712#define SHIFT 1
1713#include "softmmu_template.h"
1714
1715#define SHIFT 2
1716#include "softmmu_template.h"
1717
1718#define SHIFT 3
1719#include "softmmu_template.h"
1720
1721#undef env
1722
1723#endif