2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 //#define DEBUG_TB_INVALIDATE
34 /* make various TB consistency checks */
35 //#define DEBUG_TB_CHECK
37 /* threshold to flush the translated code buffer */
38 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
40 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
42 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
43 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
45 /* any access to the tbs or the page table must use this lock */
46 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
48 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
49 uint8_t *code_gen_ptr
;
51 /* XXX: pack the flags in the low bits of the pointer ? */
52 typedef struct PageDesc
{
54 TranslationBlock
*first_tb
;
58 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
60 #define L1_SIZE (1 << L1_BITS)
61 #define L2_SIZE (1 << L2_BITS)
63 static void tb_invalidate_page(unsigned long address
);
65 unsigned long real_host_page_size
;
66 unsigned long host_page_bits
;
67 unsigned long host_page_size
;
68 unsigned long host_page_mask
;
70 static PageDesc
*l1_map
[L1_SIZE
];
74 /* NOTE: we can always suppose that host_page_size >=
76 real_host_page_size
= getpagesize();
77 if (host_page_size
== 0)
78 host_page_size
= real_host_page_size
;
79 if (host_page_size
< TARGET_PAGE_SIZE
)
80 host_page_size
= TARGET_PAGE_SIZE
;
82 while ((1 << host_page_bits
) < host_page_size
)
84 host_page_mask
= ~(host_page_size
- 1);
87 /* dump memory mappings */
88 void page_dump(FILE *f
)
90 unsigned long start
, end
;
91 int i
, j
, prot
, prot1
;
94 fprintf(f
, "%-8s %-8s %-8s %s\n",
95 "start", "end", "size", "prot");
99 for(i
= 0; i
<= L1_SIZE
; i
++) {
104 for(j
= 0;j
< L2_SIZE
; j
++) {
110 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
112 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
113 start
, end
, end
- start
,
114 prot
& PAGE_READ
? 'r' : '-',
115 prot
& PAGE_WRITE
? 'w' : '-',
116 prot
& PAGE_EXEC
? 'x' : '-');
130 static inline PageDesc
*page_find_alloc(unsigned int index
)
134 lp
= &l1_map
[index
>> L2_BITS
];
137 /* allocate if not found */
138 p
= malloc(sizeof(PageDesc
) * L2_SIZE
);
139 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
142 return p
+ (index
& (L2_SIZE
- 1));
145 static inline PageDesc
*page_find(unsigned int index
)
149 p
= l1_map
[index
>> L2_BITS
];
152 return p
+ (index
& (L2_SIZE
- 1));
155 int page_get_flags(unsigned long address
)
159 p
= page_find(address
>> TARGET_PAGE_BITS
);
165 /* modify the flags of a page and invalidate the code if
166 necessary. The flag PAGE_WRITE_ORG is positionned automatically
167 depending on PAGE_WRITE */
168 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
173 start
= start
& TARGET_PAGE_MASK
;
174 end
= TARGET_PAGE_ALIGN(end
);
175 if (flags
& PAGE_WRITE
)
176 flags
|= PAGE_WRITE_ORG
;
178 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
179 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
180 /* if the write protection is set, then we invalidate the code
182 if (!(p
->flags
& PAGE_WRITE
) &&
183 (flags
& PAGE_WRITE
) &&
185 tb_invalidate_page(addr
);
189 spin_unlock(&tb_lock
);
192 void cpu_x86_tblocks_init(void)
195 code_gen_ptr
= code_gen_buffer
;
199 /* set to NULL all the 'first_tb' fields in all PageDescs */
200 static void page_flush_tb(void)
205 for(i
= 0; i
< L1_SIZE
; i
++) {
208 for(j
= 0; j
< L2_SIZE
; j
++)
209 p
[j
].first_tb
= NULL
;
214 /* flush all the translation blocks */
219 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
220 code_gen_ptr
- code_gen_buffer
,
222 (code_gen_ptr
- code_gen_buffer
) / nb_tbs
);
225 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
228 code_gen_ptr
= code_gen_buffer
;
229 /* XXX: flush processor icache at this point */
232 #ifdef DEBUG_TB_CHECK
234 static void tb_invalidate_check(unsigned long address
)
236 TranslationBlock
*tb
;
238 address
&= TARGET_PAGE_MASK
;
239 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
240 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
241 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
242 address
>= tb
->pc
+ tb
->size
)) {
243 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
244 address
, tb
->pc
, tb
->size
);
250 /* verify that all the pages have correct rights for code */
251 static void tb_page_check(void)
253 TranslationBlock
*tb
;
254 int i
, flags1
, flags2
;
256 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
257 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
258 flags1
= page_get_flags(tb
->pc
);
259 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
260 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
261 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
262 tb
->pc
, tb
->size
, flags1
, flags2
);
270 /* invalidate one TB */
271 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
274 TranslationBlock
*tb1
;
278 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
281 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
285 static inline void tb_invalidate(TranslationBlock
*tb
, int parity
)
288 unsigned int page_index1
, page_index2
;
291 /* remove the TB from the hash list */
292 h
= tb_hash_func(tb
->pc
);
293 tb_remove(&tb_hash
[h
], tb
,
294 offsetof(TranslationBlock
, hash_next
));
295 /* remove the TB from the page list */
296 page_index1
= tb
->pc
>> TARGET_PAGE_BITS
;
297 if ((page_index1
& 1) == parity
) {
298 p
= page_find(page_index1
);
299 tb_remove(&p
->first_tb
, tb
,
300 offsetof(TranslationBlock
, page_next
[page_index1
& 1]));
302 page_index2
= (tb
->pc
+ tb
->size
- 1) >> TARGET_PAGE_BITS
;
303 if ((page_index2
& 1) == parity
) {
304 p
= page_find(page_index2
);
305 tb_remove(&p
->first_tb
, tb
,
306 offsetof(TranslationBlock
, page_next
[page_index2
& 1]));
310 /* invalidate all TBs which intersect with the target page starting at addr */
311 static void tb_invalidate_page(unsigned long address
)
313 TranslationBlock
*tb_next
, *tb
;
314 unsigned int page_index
;
315 int parity1
, parity2
;
317 #ifdef DEBUG_TB_INVALIDATE
318 printf("tb_invalidate_page: %lx\n", address
);
321 page_index
= address
>> TARGET_PAGE_BITS
;
322 p
= page_find(page_index
);
326 parity1
= page_index
& 1;
327 parity2
= parity1
^ 1;
329 tb_next
= tb
->page_next
[parity1
];
330 tb_invalidate(tb
, parity2
);
336 /* add the tb in the target page and protect it if necessary */
337 static inline void tb_alloc_page(TranslationBlock
*tb
, unsigned int page_index
)
340 unsigned long host_start
, host_end
, addr
, page_addr
;
343 p
= page_find_alloc(page_index
);
344 tb
->page_next
[page_index
& 1] = p
->first_tb
;
346 if (p
->flags
& PAGE_WRITE
) {
347 /* force the host page as non writable (writes will have a
348 page fault + mprotect overhead) */
349 page_addr
= (page_index
<< TARGET_PAGE_BITS
);
350 host_start
= page_addr
& host_page_mask
;
351 host_end
= host_start
+ host_page_size
;
353 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
354 prot
|= page_get_flags(addr
);
355 mprotect((void *)host_start
, host_page_size
,
356 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
357 #ifdef DEBUG_TB_INVALIDATE
358 printf("protecting code page: 0x%08lx\n",
361 p
->flags
&= ~PAGE_WRITE
;
362 #ifdef DEBUG_TB_CHECK
368 /* Allocate a new translation block. Flush the translation buffer if
369 too many translation blocks or too much generated code. */
370 TranslationBlock
*tb_alloc(unsigned long pc
,
373 TranslationBlock
*tb
;
374 unsigned int page_index1
, page_index2
;
376 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
377 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
383 /* add in the page list */
384 page_index1
= pc
>> TARGET_PAGE_BITS
;
385 tb_alloc_page(tb
, page_index1
);
386 page_index2
= (pc
+ size
- 1) >> TARGET_PAGE_BITS
;
387 if (page_index2
!= page_index1
) {
388 tb_alloc_page(tb
, page_index2
);
393 /* called from signal handler: invalidate the code and unprotect the
394 page. Return TRUE if the fault was succesfully handled. */
395 int page_unprotect(unsigned long address
)
397 unsigned int page_index
, prot
;
399 unsigned long host_start
, host_end
, addr
;
401 page_index
= address
>> TARGET_PAGE_BITS
;
402 p
= page_find(page_index
);
405 if ((p
->flags
& (PAGE_WRITE_ORG
| PAGE_WRITE
)) == PAGE_WRITE_ORG
) {
406 /* if the page was really writable, then we change its
407 protection back to writable */
408 host_start
= address
& host_page_mask
;
409 host_end
= host_start
+ host_page_size
;
411 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
412 prot
|= page_get_flags(addr
);
413 mprotect((void *)host_start
, host_page_size
,
414 (prot
& PAGE_BITS
) | PAGE_WRITE
);
415 p
->flags
|= PAGE_WRITE
;
417 /* and since the content will be modified, we must invalidate
418 the corresponding translated code. */
419 tb_invalidate_page(address
);
420 #ifdef DEBUG_TB_CHECK
421 tb_invalidate_check(address
);
429 /* call this function when system calls directly modify a memory area */
430 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
432 unsigned long start
, end
, addr
;
434 start
= (unsigned long)data
;
435 end
= start
+ data_size
;
436 start
&= TARGET_PAGE_MASK
;
437 end
= TARGET_PAGE_ALIGN(end
);
438 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
439 page_unprotect(addr
);