Revert "Add predictability in CType initialization."
[tinycc.git] / lib / bcheck.c
blob998e30436219404feaff3f6bf68ec7bb2b2d3974
1 /*
2 * Tiny C Memory and bounds checker
3 *
4 * Copyright (c) 2002 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) \
25 && !defined(__DragonFly__) && !defined(__OpenBSD__)
26 #include <malloc.h>
27 #endif
28 #if !defined(_WIN32)
29 #include <unistd.h>
30 #endif
32 //#define BOUND_DEBUG
34 /* define so that bound array is static (faster, but use memory if
35 bound checking not used) */
36 //#define BOUND_STATIC
38 /* use malloc hooks. Currently the code cannot be reliable if no hooks */
39 #define CONFIG_TCC_MALLOC_HOOKS
40 #define HAVE_MEMALIGN
42 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
43 || defined(__DragonFly__) || defined(__dietlibc__) \
44 || defined(__UCLIBC__) || defined(__OpenBSD__) || defined(_WIN32)
45 #warning Bound checking does not support malloc (etc.) in this environment.
46 #undef CONFIG_TCC_MALLOC_HOOKS
47 #undef HAVE_MEMALIGN
48 #endif
50 #define BOUND_T1_BITS 13
51 #define BOUND_T2_BITS 11
52 #define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
54 #define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
55 #define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
56 #define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
57 #define BOUND_E_BITS 4
59 #define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
60 #define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
63 /* this pointer is generated when bound check is incorrect */
64 #define INVALID_POINTER ((void *)(-2))
65 /* size of an empty region */
66 #define EMPTY_SIZE 0xffffffff
67 /* size of an invalid region */
68 #define INVALID_SIZE 0
70 typedef struct BoundEntry {
71 unsigned long start;
72 unsigned long size;
73 struct BoundEntry *next;
74 unsigned long is_invalid; /* true if pointers outside region are invalid */
75 } BoundEntry;
77 /* external interface */
78 void __bound_init(void);
79 void __bound_new_region(void *p, unsigned long size);
80 int __bound_delete_region(void *p);
82 #define FASTCALL __attribute__((regparm(3)))
84 void *__bound_malloc(size_t size, const void *caller);
85 void *__bound_memalign(size_t size, size_t align, const void *caller);
86 void __bound_free(void *ptr, const void *caller);
87 void *__bound_realloc(void *ptr, size_t size, const void *caller);
88 static void *libc_malloc(size_t size);
89 static void libc_free(void *ptr);
90 static void install_malloc_hooks(void);
91 static void restore_malloc_hooks(void);
93 #ifdef CONFIG_TCC_MALLOC_HOOKS
94 static void *saved_malloc_hook;
95 static void *saved_free_hook;
96 static void *saved_realloc_hook;
97 static void *saved_memalign_hook;
98 #endif
100 /* TCC definitions */
101 extern char __bounds_start; /* start of static bounds table */
102 /* error message, just for TCC */
103 const char *__bound_error_msg;
105 /* runtime error output */
106 extern void rt_error(unsigned long pc, const char *fmt, ...);
108 #ifdef BOUND_STATIC
109 static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
110 #else
111 static BoundEntry **__bound_t1; /* page table */
112 #endif
113 static BoundEntry *__bound_empty_t2; /* empty page, for unused pages */
114 static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
116 static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
118 unsigned long addr, tmp;
119 BoundEntry *e;
121 e = e1;
122 while (e != NULL) {
123 addr = (unsigned long)p;
124 addr -= e->start;
125 if (addr <= e->size) {
126 /* put region at the head */
127 tmp = e1->start;
128 e1->start = e->start;
129 e->start = tmp;
130 tmp = e1->size;
131 e1->size = e->size;
132 e->size = tmp;
133 return e1;
135 e = e->next;
137 /* no entry found: return empty entry or invalid entry */
138 if (e1->is_invalid)
139 return __bound_invalid_t2;
140 else
141 return __bound_empty_t2;
144 /* print a bound error message */
145 static void bound_error(const char *fmt, ...)
147 __bound_error_msg = fmt;
148 *(int *)0 = 0; /* force a runtime error */
151 static void bound_alloc_error(void)
153 bound_error("not enough memory for bound checking code");
156 /* return '(p + offset)' for pointer arithmetic (a pointer can reach
157 the end of a region in this case */
158 void * FASTCALL __bound_ptr_add(void *p, int offset)
160 unsigned long addr = (unsigned long)p;
161 BoundEntry *e;
162 #if defined(BOUND_DEBUG)
163 printf("add: 0x%x %d\n", (int)p, offset);
164 #endif
166 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
167 e = (BoundEntry *)((char *)e +
168 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
169 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
170 addr -= e->start;
171 if (addr > e->size) {
172 e = __bound_find_region(e, p);
173 addr = (unsigned long)p - e->start;
175 addr += offset;
176 if (addr > e->size)
177 return INVALID_POINTER; /* return an invalid pointer */
178 return p + offset;
181 /* return '(p + offset)' for pointer indirection (the resulting must
182 be strictly inside the region */
183 #define BOUND_PTR_INDIR(dsize) \
184 void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset) \
186 unsigned long addr = (unsigned long)p; \
187 BoundEntry *e; \
189 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)]; \
190 e = (BoundEntry *)((char *)e + \
191 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) & \
192 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS))); \
193 addr -= e->start; \
194 if (addr > e->size) { \
195 e = __bound_find_region(e, p); \
196 addr = (unsigned long)p - e->start; \
198 addr += offset + dsize; \
199 if (addr > e->size) \
200 return INVALID_POINTER; /* return an invalid pointer */ \
201 return p + offset; \
204 BOUND_PTR_INDIR(1)
205 BOUND_PTR_INDIR(2)
206 BOUND_PTR_INDIR(4)
207 BOUND_PTR_INDIR(8)
208 BOUND_PTR_INDIR(12)
209 BOUND_PTR_INDIR(16)
211 /* return the frame pointer of the caller */
212 #define GET_CALLER_FP(fp)\
214 fp = (unsigned long)__builtin_frame_address(1);\
217 /* called when entering a function to add all the local regions */
218 void FASTCALL __bound_local_new(void *p1)
220 unsigned long addr, size, fp, *p = p1;
221 GET_CALLER_FP(fp);
222 for(;;) {
223 addr = p[0];
224 if (addr == 0)
225 break;
226 addr += fp;
227 size = p[1];
228 p += 2;
229 __bound_new_region((void *)addr, size);
233 /* called when leaving a function to delete all the local regions */
234 void FASTCALL __bound_local_delete(void *p1)
236 unsigned long addr, fp, *p = p1;
237 GET_CALLER_FP(fp);
238 for(;;) {
239 addr = p[0];
240 if (addr == 0)
241 break;
242 addr += fp;
243 p += 2;
244 __bound_delete_region((void *)addr);
248 static BoundEntry *__bound_new_page(void)
250 BoundEntry *page;
251 int i;
253 page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
254 if (!page)
255 bound_alloc_error();
256 for(i=0;i<BOUND_T2_SIZE;i++) {
257 /* put empty entries */
258 page[i].start = 0;
259 page[i].size = EMPTY_SIZE;
260 page[i].next = NULL;
261 page[i].is_invalid = 0;
263 return page;
266 /* currently we use malloc(). Should use bound_new_page() */
267 static BoundEntry *bound_new_entry(void)
269 BoundEntry *e;
270 e = libc_malloc(sizeof(BoundEntry));
271 return e;
274 static void bound_free_entry(BoundEntry *e)
276 libc_free(e);
279 static inline BoundEntry *get_page(int index)
281 BoundEntry *page;
282 page = __bound_t1[index];
283 if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
284 /* create a new page if necessary */
285 page = __bound_new_page();
286 __bound_t1[index] = page;
288 return page;
291 /* mark a region as being invalid (can only be used during init) */
292 static void mark_invalid(unsigned long addr, unsigned long size)
294 unsigned long start, end;
295 BoundEntry *page;
296 int t1_start, t1_end, i, j, t2_start, t2_end;
298 start = addr;
299 end = addr + size;
301 t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
302 if (end != 0)
303 t2_end = end >> BOUND_T3_BITS;
304 else
305 t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
307 #if 0
308 printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
309 #endif
311 /* first we handle full pages */
312 t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
313 t1_end = t2_end >> BOUND_T2_BITS;
315 i = t2_start & (BOUND_T2_SIZE - 1);
316 j = t2_end & (BOUND_T2_SIZE - 1);
318 if (t1_start == t1_end) {
319 page = get_page(t2_start >> BOUND_T2_BITS);
320 for(; i < j; i++) {
321 page[i].size = INVALID_SIZE;
322 page[i].is_invalid = 1;
324 } else {
325 if (i > 0) {
326 page = get_page(t2_start >> BOUND_T2_BITS);
327 for(; i < BOUND_T2_SIZE; i++) {
328 page[i].size = INVALID_SIZE;
329 page[i].is_invalid = 1;
332 for(i = t1_start; i < t1_end; i++) {
333 __bound_t1[i] = __bound_invalid_t2;
335 if (j != 0) {
336 page = get_page(t1_end);
337 for(i = 0; i < j; i++) {
338 page[i].size = INVALID_SIZE;
339 page[i].is_invalid = 1;
345 void __bound_init(void)
347 int i;
348 BoundEntry *page;
349 unsigned long start, size;
350 int *p;
352 /* save malloc hooks and install bound check hooks */
353 install_malloc_hooks();
355 #ifndef BOUND_STATIC
356 __bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
357 if (!__bound_t1)
358 bound_alloc_error();
359 #endif
360 __bound_empty_t2 = __bound_new_page();
361 for(i=0;i<BOUND_T1_SIZE;i++) {
362 __bound_t1[i] = __bound_empty_t2;
365 page = __bound_new_page();
366 for(i=0;i<BOUND_T2_SIZE;i++) {
367 /* put invalid entries */
368 page[i].start = 0;
369 page[i].size = INVALID_SIZE;
370 page[i].next = NULL;
371 page[i].is_invalid = 1;
373 __bound_invalid_t2 = page;
375 /* invalid pointer zone */
376 start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
377 size = BOUND_T23_SIZE;
378 mark_invalid(start, size);
380 #if !defined(__TINYC__) && defined(CONFIG_TCC_MALLOC_HOOKS)
381 /* malloc zone is also marked invalid. can only use that with
382 * hooks because all libs should use the same malloc. The solution
383 * would be to build a new malloc for tcc.
385 * usually heap (= malloc zone) comes right after bss, i.e. after _end, but
386 * not always - either if we are running from under `tcc -b -run`, or if
387 * address space randomization is turned on(a), heap start will be separated
388 * from bss end.
390 * So sbrk(0) will be a good approximation for start_brk:
392 * - if we are a separately compiled program, __bound_init() runs early,
393 * and sbrk(0) should be equal or very near to start_brk(b) (in case other
394 * constructors malloc something), or
396 * - if we are running from under `tcc -b -run`, sbrk(0) will return
397 * start of heap portion which is under this program control, and not
398 * mark as invalid earlier allocated memory.
401 * (a) /proc/sys/kernel/randomize_va_space = 2, on Linux;
402 * usually turned on by default.
404 * (b) on Linux >= v3.3, the alternative is to read
405 * start_brk from /proc/self/stat
407 start = (unsigned long)sbrk(0);
408 size = 128 * 0x100000;
409 mark_invalid(start, size);
410 #endif
412 /* add all static bound check values */
413 p = (int *)&__bounds_start;
414 while (p[0] != 0) {
415 __bound_new_region((void *)p[0], p[1]);
416 p += 2;
420 void __bound_exit(void)
422 restore_malloc_hooks();
425 static inline void add_region(BoundEntry *e,
426 unsigned long start, unsigned long size)
428 BoundEntry *e1;
429 if (e->start == 0) {
430 /* no region : add it */
431 e->start = start;
432 e->size = size;
433 } else {
434 /* already regions in the list: add it at the head */
435 e1 = bound_new_entry();
436 e1->start = e->start;
437 e1->size = e->size;
438 e1->next = e->next;
439 e->start = start;
440 e->size = size;
441 e->next = e1;
445 /* create a new region. It should not already exist in the region list */
446 void __bound_new_region(void *p, unsigned long size)
448 unsigned long start, end;
449 BoundEntry *page, *e, *e2;
450 int t1_start, t1_end, i, t2_start, t2_end;
452 start = (unsigned long)p;
453 end = start + size;
454 t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
455 t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
457 /* start */
458 page = get_page(t1_start);
459 t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
460 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
461 t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
462 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
463 #ifdef BOUND_DEBUG
464 printf("new %lx %lx %x %x %x %x\n",
465 start, end, t1_start, t1_end, t2_start, t2_end);
466 #endif
468 e = (BoundEntry *)((char *)page + t2_start);
469 add_region(e, start, size);
471 if (t1_end == t1_start) {
472 /* same ending page */
473 e2 = (BoundEntry *)((char *)page + t2_end);
474 if (e2 > e) {
475 e++;
476 for(;e<e2;e++) {
477 e->start = start;
478 e->size = size;
480 add_region(e, start, size);
482 } else {
483 /* mark until end of page */
484 e2 = page + BOUND_T2_SIZE;
485 e++;
486 for(;e<e2;e++) {
487 e->start = start;
488 e->size = size;
490 /* mark intermediate pages, if any */
491 for(i=t1_start+1;i<t1_end;i++) {
492 page = get_page(i);
493 e2 = page + BOUND_T2_SIZE;
494 for(e=page;e<e2;e++) {
495 e->start = start;
496 e->size = size;
499 /* last page */
500 page = get_page(t1_end);
501 e2 = (BoundEntry *)((char *)page + t2_end);
502 for(e=page;e<e2;e++) {
503 e->start = start;
504 e->size = size;
506 add_region(e, start, size);
510 /* delete a region */
511 static inline void delete_region(BoundEntry *e,
512 void *p, unsigned long empty_size)
514 unsigned long addr;
515 BoundEntry *e1;
517 addr = (unsigned long)p;
518 addr -= e->start;
519 if (addr <= e->size) {
520 /* region found is first one */
521 e1 = e->next;
522 if (e1 == NULL) {
523 /* no more region: mark it empty */
524 e->start = 0;
525 e->size = empty_size;
526 } else {
527 /* copy next region in head */
528 e->start = e1->start;
529 e->size = e1->size;
530 e->next = e1->next;
531 bound_free_entry(e1);
533 } else {
534 /* find the matching region */
535 for(;;) {
536 e1 = e;
537 e = e->next;
538 /* region not found: do nothing */
539 if (e == NULL)
540 break;
541 addr = (unsigned long)p - e->start;
542 if (addr <= e->size) {
543 /* found: remove entry */
544 e1->next = e->next;
545 bound_free_entry(e);
546 break;
552 /* WARNING: 'p' must be the starting point of the region. */
553 /* return non zero if error */
554 int __bound_delete_region(void *p)
556 unsigned long start, end, addr, size, empty_size;
557 BoundEntry *page, *e, *e2;
558 int t1_start, t1_end, t2_start, t2_end, i;
560 start = (unsigned long)p;
561 t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
562 t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
563 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
565 /* find region size */
566 page = __bound_t1[t1_start];
567 e = (BoundEntry *)((char *)page + t2_start);
568 addr = start - e->start;
569 if (addr > e->size)
570 e = __bound_find_region(e, p);
571 /* test if invalid region */
572 if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
573 return -1;
574 /* compute the size we put in invalid regions */
575 if (e->is_invalid)
576 empty_size = INVALID_SIZE;
577 else
578 empty_size = EMPTY_SIZE;
579 size = e->size;
580 end = start + size;
582 /* now we can free each entry */
583 t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
584 t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
585 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
587 delete_region(e, p, empty_size);
588 if (t1_end == t1_start) {
589 /* same ending page */
590 e2 = (BoundEntry *)((char *)page + t2_end);
591 if (e2 > e) {
592 e++;
593 for(;e<e2;e++) {
594 e->start = 0;
595 e->size = empty_size;
597 delete_region(e, p, empty_size);
599 } else {
600 /* mark until end of page */
601 e2 = page + BOUND_T2_SIZE;
602 e++;
603 for(;e<e2;e++) {
604 e->start = 0;
605 e->size = empty_size;
607 /* mark intermediate pages, if any */
608 /* XXX: should free them */
609 for(i=t1_start+1;i<t1_end;i++) {
610 page = get_page(i);
611 e2 = page + BOUND_T2_SIZE;
612 for(e=page;e<e2;e++) {
613 e->start = 0;
614 e->size = empty_size;
617 /* last page */
618 page = get_page(t1_end);
619 e2 = (BoundEntry *)((char *)page + t2_end);
620 for(e=page;e<e2;e++) {
621 e->start = 0;
622 e->size = empty_size;
624 delete_region(e, p, empty_size);
626 return 0;
629 /* return the size of the region starting at p, or EMPTY_SIZE if non
630 existant region. */
631 static unsigned long get_region_size(void *p)
633 unsigned long addr = (unsigned long)p;
634 BoundEntry *e;
636 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
637 e = (BoundEntry *)((char *)e +
638 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
639 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
640 addr -= e->start;
641 if (addr > e->size)
642 e = __bound_find_region(e, p);
643 if (e->start != (unsigned long)p)
644 return EMPTY_SIZE;
645 return e->size;
648 /* patched memory functions */
650 /* force compiler to perform stores coded up to this point */
651 #define barrier() __asm__ __volatile__ ("": : : "memory")
653 static void install_malloc_hooks(void)
655 #ifdef CONFIG_TCC_MALLOC_HOOKS
656 saved_malloc_hook = __malloc_hook;
657 saved_free_hook = __free_hook;
658 saved_realloc_hook = __realloc_hook;
659 saved_memalign_hook = __memalign_hook;
660 __malloc_hook = __bound_malloc;
661 __free_hook = __bound_free;
662 __realloc_hook = __bound_realloc;
663 __memalign_hook = __bound_memalign;
665 barrier();
666 #endif
669 static void restore_malloc_hooks(void)
671 #ifdef CONFIG_TCC_MALLOC_HOOKS
672 __malloc_hook = saved_malloc_hook;
673 __free_hook = saved_free_hook;
674 __realloc_hook = saved_realloc_hook;
675 __memalign_hook = saved_memalign_hook;
677 barrier();
678 #endif
681 static void *libc_malloc(size_t size)
683 void *ptr;
684 restore_malloc_hooks();
685 ptr = malloc(size);
686 install_malloc_hooks();
687 return ptr;
690 static void libc_free(void *ptr)
692 restore_malloc_hooks();
693 free(ptr);
694 install_malloc_hooks();
697 /* XXX: we should use a malloc which ensure that it is unlikely that
698 two malloc'ed data have the same address if 'free' are made in
699 between. */
700 void *__bound_malloc(size_t size, const void *caller)
702 void *ptr;
704 /* we allocate one more byte to ensure the regions will be
705 separated by at least one byte. With the glibc malloc, it may
706 be in fact not necessary */
707 ptr = libc_malloc(size + 1);
709 if (!ptr)
710 return NULL;
711 __bound_new_region(ptr, size);
712 return ptr;
715 void *__bound_memalign(size_t size, size_t align, const void *caller)
717 void *ptr;
719 restore_malloc_hooks();
721 #ifndef HAVE_MEMALIGN
722 if (align > 4) {
723 /* XXX: handle it ? */
724 ptr = NULL;
725 } else {
726 /* we suppose that malloc aligns to at least four bytes */
727 ptr = malloc(size + 1);
729 #else
730 /* we allocate one more byte to ensure the regions will be
731 separated by at least one byte. With the glibc malloc, it may
732 be in fact not necessary */
733 ptr = memalign(size + 1, align);
734 #endif
736 install_malloc_hooks();
738 if (!ptr)
739 return NULL;
740 __bound_new_region(ptr, size);
741 return ptr;
744 void __bound_free(void *ptr, const void *caller)
746 if (ptr == NULL)
747 return;
748 if (__bound_delete_region(ptr) != 0)
749 bound_error("freeing invalid region");
751 libc_free(ptr);
754 void *__bound_realloc(void *ptr, size_t size, const void *caller)
756 void *ptr1;
757 int old_size;
759 if (size == 0) {
760 __bound_free(ptr, caller);
761 return NULL;
762 } else {
763 ptr1 = __bound_malloc(size, caller);
764 if (ptr == NULL || ptr1 == NULL)
765 return ptr1;
766 old_size = get_region_size(ptr);
767 if (old_size == EMPTY_SIZE)
768 bound_error("realloc'ing invalid pointer");
769 memcpy(ptr1, ptr, old_size);
770 __bound_free(ptr, caller);
771 return ptr1;
775 #ifndef CONFIG_TCC_MALLOC_HOOKS
776 void *__bound_calloc(size_t nmemb, size_t size)
778 void *ptr;
779 size = size * nmemb;
780 ptr = __bound_malloc(size, NULL);
781 if (!ptr)
782 return NULL;
783 memset(ptr, 0, size);
784 return ptr;
786 #endif
788 #if 0
789 static void bound_dump(void)
791 BoundEntry *page, *e;
792 int i, j;
794 printf("region dump:\n");
795 for(i=0;i<BOUND_T1_SIZE;i++) {
796 page = __bound_t1[i];
797 for(j=0;j<BOUND_T2_SIZE;j++) {
798 e = page + j;
799 /* do not print invalid or empty entries */
800 if (e->size != EMPTY_SIZE && e->start != 0) {
801 printf("%08x:",
802 (i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
803 (j << BOUND_T3_BITS));
804 do {
805 printf(" %08lx:%08lx", e->start, e->start + e->size);
806 e = e->next;
807 } while (e != NULL);
808 printf("\n");
813 #endif
815 /* some useful checked functions */
817 /* check that (p ... p + size - 1) lies inside 'p' region, if any */
818 static void __bound_check(const void *p, size_t size)
820 if (size == 0)
821 return;
822 p = __bound_ptr_add((void *)p, size);
823 if (p == INVALID_POINTER)
824 bound_error("invalid pointer");
827 void *__bound_memcpy(void *dst, const void *src, size_t size)
829 __bound_check(dst, size);
830 __bound_check(src, size);
831 /* check also region overlap */
832 if (src >= dst && src < dst + size)
833 bound_error("overlapping regions in memcpy()");
834 return memcpy(dst, src, size);
837 void *__bound_memmove(void *dst, const void *src, size_t size)
839 __bound_check(dst, size);
840 __bound_check(src, size);
841 return memmove(dst, src, size);
844 void *__bound_memset(void *dst, int c, size_t size)
846 __bound_check(dst, size);
847 return memset(dst, c, size);
850 /* XXX: could be optimized */
851 int __bound_strlen(const char *s)
853 const char *p;
854 int len;
856 len = 0;
857 for(;;) {
858 p = __bound_ptr_indir1((char *)s, len);
859 if (p == INVALID_POINTER)
860 bound_error("bad pointer in strlen()");
861 if (*p == '\0')
862 break;
863 len++;
865 return len;
868 char *__bound_strcpy(char *dst, const char *src)
870 int len;
871 len = __bound_strlen(src);
872 return __bound_memcpy(dst, src, len + 1);