configure: add switches to set search paths
[tinycc.git] / lib / bcheck.c
blob9996649d602e7efc7f3d9aa061184165999e0d84
1 /*
2 * Tiny C Memory and bounds checker
3 *
4 * Copyright (c) 2002 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #if !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__) \
25 && !defined(__DragonFly__) && !defined(__OpenBSD__)
26 #include <malloc.h>
27 #endif
29 //#define BOUND_DEBUG
31 /* define so that bound array is static (faster, but use memory if
32 bound checking not used) */
33 //#define BOUND_STATIC
35 /* use malloc hooks. Currently the code cannot be reliable if no hooks */
36 #define CONFIG_TCC_MALLOC_HOOKS
37 #define HAVE_MEMALIGN
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
40 || defined(__DragonFly__) || defined(__dietlibc__) \
41 || defined(__UCLIBC__) || defined(__OpenBSD__) || defined(_WIN32)
42 #warning Bound checking does not support malloc (etc.) in this environment.
43 #undef CONFIG_TCC_MALLOC_HOOKS
44 #undef HAVE_MEMALIGN
45 #endif
47 #define BOUND_T1_BITS 13
48 #define BOUND_T2_BITS 11
49 #define BOUND_T3_BITS (32 - BOUND_T1_BITS - BOUND_T2_BITS)
51 #define BOUND_T1_SIZE (1 << BOUND_T1_BITS)
52 #define BOUND_T2_SIZE (1 << BOUND_T2_BITS)
53 #define BOUND_T3_SIZE (1 << BOUND_T3_BITS)
54 #define BOUND_E_BITS 4
56 #define BOUND_T23_BITS (BOUND_T2_BITS + BOUND_T3_BITS)
57 #define BOUND_T23_SIZE (1 << BOUND_T23_BITS)
60 /* this pointer is generated when bound check is incorrect */
61 #define INVALID_POINTER ((void *)(-2))
62 /* size of an empty region */
63 #define EMPTY_SIZE 0xffffffff
64 /* size of an invalid region */
65 #define INVALID_SIZE 0
67 typedef struct BoundEntry {
68 unsigned long start;
69 unsigned long size;
70 struct BoundEntry *next;
71 unsigned long is_invalid; /* true if pointers outside region are invalid */
72 } BoundEntry;
74 /* external interface */
75 void __bound_init(void);
76 void __bound_new_region(void *p, unsigned long size);
77 int __bound_delete_region(void *p);
79 #define FASTCALL __attribute__((regparm(3)))
81 void *__bound_malloc(size_t size, const void *caller);
82 void *__bound_memalign(size_t size, size_t align, const void *caller);
83 void __bound_free(void *ptr, const void *caller);
84 void *__bound_realloc(void *ptr, size_t size, const void *caller);
85 static void *libc_malloc(size_t size);
86 static void libc_free(void *ptr);
87 static void install_malloc_hooks(void);
88 static void restore_malloc_hooks(void);
90 #ifdef CONFIG_TCC_MALLOC_HOOKS
91 static void *saved_malloc_hook;
92 static void *saved_free_hook;
93 static void *saved_realloc_hook;
94 static void *saved_memalign_hook;
95 #endif
97 /* linker definitions */
98 extern char _end;
100 /* TCC definitions */
101 extern char __bounds_start; /* start of static bounds table */
102 /* error message, just for TCC */
103 const char *__bound_error_msg;
105 /* runtime error output */
106 extern void rt_error(unsigned long pc, const char *fmt, ...);
108 #ifdef BOUND_STATIC
109 static BoundEntry *__bound_t1[BOUND_T1_SIZE]; /* page table */
110 #else
111 static BoundEntry **__bound_t1; /* page table */
112 #endif
113 static BoundEntry *__bound_empty_t2; /* empty page, for unused pages */
114 static BoundEntry *__bound_invalid_t2; /* invalid page, for invalid pointers */
116 static BoundEntry *__bound_find_region(BoundEntry *e1, void *p)
118 unsigned long addr, tmp;
119 BoundEntry *e;
121 e = e1;
122 while (e != NULL) {
123 addr = (unsigned long)p;
124 addr -= e->start;
125 if (addr <= e->size) {
126 /* put region at the head */
127 tmp = e1->start;
128 e1->start = e->start;
129 e->start = tmp;
130 tmp = e1->size;
131 e1->size = e->size;
132 e->size = tmp;
133 return e1;
135 e = e->next;
137 /* no entry found: return empty entry or invalid entry */
138 if (e1->is_invalid)
139 return __bound_invalid_t2;
140 else
141 return __bound_empty_t2;
144 /* print a bound error message */
145 static void bound_error(const char *fmt, ...)
147 __bound_error_msg = fmt;
148 *(int *)0 = 0; /* force a runtime error */
151 static void bound_alloc_error(void)
153 bound_error("not enough memory for bound checking code");
156 /* return '(p + offset)' for pointer arithmetic (a pointer can reach
157 the end of a region in this case */
158 void * FASTCALL __bound_ptr_add(void *p, int offset)
160 unsigned long addr = (unsigned long)p;
161 BoundEntry *e;
162 #if defined(BOUND_DEBUG)
163 printf("add: 0x%x %d\n", (int)p, offset);
164 #endif
166 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
167 e = (BoundEntry *)((char *)e +
168 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
169 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
170 addr -= e->start;
171 if (addr > e->size) {
172 e = __bound_find_region(e, p);
173 addr = (unsigned long)p - e->start;
175 addr += offset;
176 if (addr > e->size)
177 return INVALID_POINTER; /* return an invalid pointer */
178 return p + offset;
181 /* return '(p + offset)' for pointer indirection (the resulting must
182 be strictly inside the region */
183 #define BOUND_PTR_INDIR(dsize) \
184 void * FASTCALL __bound_ptr_indir ## dsize (void *p, int offset) \
186 unsigned long addr = (unsigned long)p; \
187 BoundEntry *e; \
189 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)]; \
190 e = (BoundEntry *)((char *)e + \
191 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) & \
192 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS))); \
193 addr -= e->start; \
194 if (addr > e->size) { \
195 e = __bound_find_region(e, p); \
196 addr = (unsigned long)p - e->start; \
198 addr += offset + dsize; \
199 if (addr > e->size) \
200 return INVALID_POINTER; /* return an invalid pointer */ \
201 return p + offset; \
204 BOUND_PTR_INDIR(1)
205 BOUND_PTR_INDIR(2)
206 BOUND_PTR_INDIR(4)
207 BOUND_PTR_INDIR(8)
208 BOUND_PTR_INDIR(12)
209 BOUND_PTR_INDIR(16)
211 #ifdef __i386__
212 /* return the frame pointer of the caller */
213 #define GET_CALLER_FP(fp)\
215 unsigned long *fp1;\
216 __asm__ __volatile__ ("movl %%ebp,%0" :"=g" (fp1));\
217 fp = fp1[0];\
219 #else
220 #error put code to extract the calling frame pointer
221 #endif
223 /* called when entering a function to add all the local regions */
224 void FASTCALL __bound_local_new(void *p1)
226 unsigned long addr, size, fp, *p = p1;
227 GET_CALLER_FP(fp);
228 for(;;) {
229 addr = p[0];
230 if (addr == 0)
231 break;
232 addr += fp;
233 size = p[1];
234 p += 2;
235 __bound_new_region((void *)addr, size);
239 /* called when leaving a function to delete all the local regions */
240 void FASTCALL __bound_local_delete(void *p1)
242 unsigned long addr, fp, *p = p1;
243 GET_CALLER_FP(fp);
244 for(;;) {
245 addr = p[0];
246 if (addr == 0)
247 break;
248 addr += fp;
249 p += 2;
250 __bound_delete_region((void *)addr);
254 static BoundEntry *__bound_new_page(void)
256 BoundEntry *page;
257 int i;
259 page = libc_malloc(sizeof(BoundEntry) * BOUND_T2_SIZE);
260 if (!page)
261 bound_alloc_error();
262 for(i=0;i<BOUND_T2_SIZE;i++) {
263 /* put empty entries */
264 page[i].start = 0;
265 page[i].size = EMPTY_SIZE;
266 page[i].next = NULL;
267 page[i].is_invalid = 0;
269 return page;
272 /* currently we use malloc(). Should use bound_new_page() */
273 static BoundEntry *bound_new_entry(void)
275 BoundEntry *e;
276 e = libc_malloc(sizeof(BoundEntry));
277 return e;
280 static void bound_free_entry(BoundEntry *e)
282 libc_free(e);
285 static inline BoundEntry *get_page(int index)
287 BoundEntry *page;
288 page = __bound_t1[index];
289 if (page == __bound_empty_t2 || page == __bound_invalid_t2) {
290 /* create a new page if necessary */
291 page = __bound_new_page();
292 __bound_t1[index] = page;
294 return page;
297 /* mark a region as being invalid (can only be used during init) */
298 static void mark_invalid(unsigned long addr, unsigned long size)
300 unsigned long start, end;
301 BoundEntry *page;
302 int t1_start, t1_end, i, j, t2_start, t2_end;
304 start = addr;
305 end = addr + size;
307 t2_start = (start + BOUND_T3_SIZE - 1) >> BOUND_T3_BITS;
308 if (end != 0)
309 t2_end = end >> BOUND_T3_BITS;
310 else
311 t2_end = 1 << (BOUND_T1_BITS + BOUND_T2_BITS);
313 #if 0
314 printf("mark_invalid: start = %x %x\n", t2_start, t2_end);
315 #endif
317 /* first we handle full pages */
318 t1_start = (t2_start + BOUND_T2_SIZE - 1) >> BOUND_T2_BITS;
319 t1_end = t2_end >> BOUND_T2_BITS;
321 i = t2_start & (BOUND_T2_SIZE - 1);
322 j = t2_end & (BOUND_T2_SIZE - 1);
324 if (t1_start == t1_end) {
325 page = get_page(t2_start >> BOUND_T2_BITS);
326 for(; i < j; i++) {
327 page[i].size = INVALID_SIZE;
328 page[i].is_invalid = 1;
330 } else {
331 if (i > 0) {
332 page = get_page(t2_start >> BOUND_T2_BITS);
333 for(; i < BOUND_T2_SIZE; i++) {
334 page[i].size = INVALID_SIZE;
335 page[i].is_invalid = 1;
338 for(i = t1_start; i < t1_end; i++) {
339 __bound_t1[i] = __bound_invalid_t2;
341 if (j != 0) {
342 page = get_page(t1_end);
343 for(i = 0; i < j; i++) {
344 page[i].size = INVALID_SIZE;
345 page[i].is_invalid = 1;
351 void __bound_init(void)
353 int i;
354 BoundEntry *page;
355 unsigned long start, size;
356 int *p;
358 /* save malloc hooks and install bound check hooks */
359 install_malloc_hooks();
361 #ifndef BOUND_STATIC
362 __bound_t1 = libc_malloc(BOUND_T1_SIZE * sizeof(BoundEntry *));
363 if (!__bound_t1)
364 bound_alloc_error();
365 #endif
366 __bound_empty_t2 = __bound_new_page();
367 for(i=0;i<BOUND_T1_SIZE;i++) {
368 __bound_t1[i] = __bound_empty_t2;
371 page = __bound_new_page();
372 for(i=0;i<BOUND_T2_SIZE;i++) {
373 /* put invalid entries */
374 page[i].start = 0;
375 page[i].size = INVALID_SIZE;
376 page[i].next = NULL;
377 page[i].is_invalid = 1;
379 __bound_invalid_t2 = page;
381 /* invalid pointer zone */
382 start = (unsigned long)INVALID_POINTER & ~(BOUND_T23_SIZE - 1);
383 size = BOUND_T23_SIZE;
384 mark_invalid(start, size);
386 #if !defined(__TINYC__) && defined(CONFIG_TCC_MALLOC_HOOKS)
387 /* malloc zone is also marked invalid. can only use that with
388 hooks because all libs should use the same malloc. The solution
389 would be to build a new malloc for tcc. */
390 start = (unsigned long)&_end;
391 size = 128 * 0x100000;
392 mark_invalid(start, size);
393 #endif
395 /* add all static bound check values */
396 p = (int *)&__bounds_start;
397 while (p[0] != 0) {
398 __bound_new_region((void *)p[0], p[1]);
399 p += 2;
403 void __bound_exit(void)
405 restore_malloc_hooks();
408 static inline void add_region(BoundEntry *e,
409 unsigned long start, unsigned long size)
411 BoundEntry *e1;
412 if (e->start == 0) {
413 /* no region : add it */
414 e->start = start;
415 e->size = size;
416 } else {
417 /* already regions in the list: add it at the head */
418 e1 = bound_new_entry();
419 e1->start = e->start;
420 e1->size = e->size;
421 e1->next = e->next;
422 e->start = start;
423 e->size = size;
424 e->next = e1;
428 /* create a new region. It should not already exist in the region list */
429 void __bound_new_region(void *p, unsigned long size)
431 unsigned long start, end;
432 BoundEntry *page, *e, *e2;
433 int t1_start, t1_end, i, t2_start, t2_end;
435 start = (unsigned long)p;
436 end = start + size;
437 t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
438 t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
440 /* start */
441 page = get_page(t1_start);
442 t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
443 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
444 t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
445 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
446 #ifdef BOUND_DEBUG
447 printf("new %lx %lx %x %x %x %x\n",
448 start, end, t1_start, t1_end, t2_start, t2_end);
449 #endif
451 e = (BoundEntry *)((char *)page + t2_start);
452 add_region(e, start, size);
454 if (t1_end == t1_start) {
455 /* same ending page */
456 e2 = (BoundEntry *)((char *)page + t2_end);
457 if (e2 > e) {
458 e++;
459 for(;e<e2;e++) {
460 e->start = start;
461 e->size = size;
463 add_region(e, start, size);
465 } else {
466 /* mark until end of page */
467 e2 = page + BOUND_T2_SIZE;
468 e++;
469 for(;e<e2;e++) {
470 e->start = start;
471 e->size = size;
473 /* mark intermediate pages, if any */
474 for(i=t1_start+1;i<t1_end;i++) {
475 page = get_page(i);
476 e2 = page + BOUND_T2_SIZE;
477 for(e=page;e<e2;e++) {
478 e->start = start;
479 e->size = size;
482 /* last page */
483 page = get_page(t1_end);
484 e2 = (BoundEntry *)((char *)page + t2_end);
485 for(e=page;e<e2;e++) {
486 e->start = start;
487 e->size = size;
489 add_region(e, start, size);
493 /* delete a region */
494 static inline void delete_region(BoundEntry *e,
495 void *p, unsigned long empty_size)
497 unsigned long addr;
498 BoundEntry *e1;
500 addr = (unsigned long)p;
501 addr -= e->start;
502 if (addr <= e->size) {
503 /* region found is first one */
504 e1 = e->next;
505 if (e1 == NULL) {
506 /* no more region: mark it empty */
507 e->start = 0;
508 e->size = empty_size;
509 } else {
510 /* copy next region in head */
511 e->start = e1->start;
512 e->size = e1->size;
513 e->next = e1->next;
514 bound_free_entry(e1);
516 } else {
517 /* find the matching region */
518 for(;;) {
519 e1 = e;
520 e = e->next;
521 /* region not found: do nothing */
522 if (e == NULL)
523 break;
524 addr = (unsigned long)p - e->start;
525 if (addr <= e->size) {
526 /* found: remove entry */
527 e1->next = e->next;
528 bound_free_entry(e);
529 break;
535 /* WARNING: 'p' must be the starting point of the region. */
536 /* return non zero if error */
537 int __bound_delete_region(void *p)
539 unsigned long start, end, addr, size, empty_size;
540 BoundEntry *page, *e, *e2;
541 int t1_start, t1_end, t2_start, t2_end, i;
543 start = (unsigned long)p;
544 t1_start = start >> (BOUND_T2_BITS + BOUND_T3_BITS);
545 t2_start = (start >> (BOUND_T3_BITS - BOUND_E_BITS)) &
546 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
548 /* find region size */
549 page = __bound_t1[t1_start];
550 e = (BoundEntry *)((char *)page + t2_start);
551 addr = start - e->start;
552 if (addr > e->size)
553 e = __bound_find_region(e, p);
554 /* test if invalid region */
555 if (e->size == EMPTY_SIZE || (unsigned long)p != e->start)
556 return -1;
557 /* compute the size we put in invalid regions */
558 if (e->is_invalid)
559 empty_size = INVALID_SIZE;
560 else
561 empty_size = EMPTY_SIZE;
562 size = e->size;
563 end = start + size;
565 /* now we can free each entry */
566 t1_end = end >> (BOUND_T2_BITS + BOUND_T3_BITS);
567 t2_end = (end >> (BOUND_T3_BITS - BOUND_E_BITS)) &
568 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS);
570 delete_region(e, p, empty_size);
571 if (t1_end == t1_start) {
572 /* same ending page */
573 e2 = (BoundEntry *)((char *)page + t2_end);
574 if (e2 > e) {
575 e++;
576 for(;e<e2;e++) {
577 e->start = 0;
578 e->size = empty_size;
580 delete_region(e, p, empty_size);
582 } else {
583 /* mark until end of page */
584 e2 = page + BOUND_T2_SIZE;
585 e++;
586 for(;e<e2;e++) {
587 e->start = 0;
588 e->size = empty_size;
590 /* mark intermediate pages, if any */
591 /* XXX: should free them */
592 for(i=t1_start+1;i<t1_end;i++) {
593 page = get_page(i);
594 e2 = page + BOUND_T2_SIZE;
595 for(e=page;e<e2;e++) {
596 e->start = 0;
597 e->size = empty_size;
600 /* last page */
601 page = get_page(t2_end);
602 e2 = (BoundEntry *)((char *)page + t2_end);
603 for(e=page;e<e2;e++) {
604 e->start = 0;
605 e->size = empty_size;
607 delete_region(e, p, empty_size);
609 return 0;
612 /* return the size of the region starting at p, or EMPTY_SIZE if non
613 existant region. */
614 static unsigned long get_region_size(void *p)
616 unsigned long addr = (unsigned long)p;
617 BoundEntry *e;
619 e = __bound_t1[addr >> (BOUND_T2_BITS + BOUND_T3_BITS)];
620 e = (BoundEntry *)((char *)e +
621 ((addr >> (BOUND_T3_BITS - BOUND_E_BITS)) &
622 ((BOUND_T2_SIZE - 1) << BOUND_E_BITS)));
623 addr -= e->start;
624 if (addr > e->size)
625 e = __bound_find_region(e, p);
626 if (e->start != (unsigned long)p)
627 return EMPTY_SIZE;
628 return e->size;
631 /* patched memory functions */
633 static void install_malloc_hooks(void)
635 #ifdef CONFIG_TCC_MALLOC_HOOKS
636 saved_malloc_hook = __malloc_hook;
637 saved_free_hook = __free_hook;
638 saved_realloc_hook = __realloc_hook;
639 saved_memalign_hook = __memalign_hook;
640 __malloc_hook = __bound_malloc;
641 __free_hook = __bound_free;
642 __realloc_hook = __bound_realloc;
643 __memalign_hook = __bound_memalign;
644 #endif
647 static void restore_malloc_hooks(void)
649 #ifdef CONFIG_TCC_MALLOC_HOOKS
650 __malloc_hook = saved_malloc_hook;
651 __free_hook = saved_free_hook;
652 __realloc_hook = saved_realloc_hook;
653 __memalign_hook = saved_memalign_hook;
654 #endif
657 static void *libc_malloc(size_t size)
659 void *ptr;
660 restore_malloc_hooks();
661 ptr = malloc(size);
662 install_malloc_hooks();
663 return ptr;
666 static void libc_free(void *ptr)
668 restore_malloc_hooks();
669 free(ptr);
670 install_malloc_hooks();
673 /* XXX: we should use a malloc which ensure that it is unlikely that
674 two malloc'ed data have the same address if 'free' are made in
675 between. */
676 void *__bound_malloc(size_t size, const void *caller)
678 void *ptr;
680 /* we allocate one more byte to ensure the regions will be
681 separated by at least one byte. With the glibc malloc, it may
682 be in fact not necessary */
683 ptr = libc_malloc(size + 1);
685 if (!ptr)
686 return NULL;
687 __bound_new_region(ptr, size);
688 return ptr;
691 void *__bound_memalign(size_t size, size_t align, const void *caller)
693 void *ptr;
695 restore_malloc_hooks();
697 #ifndef HAVE_MEMALIGN
698 if (align > 4) {
699 /* XXX: handle it ? */
700 ptr = NULL;
701 } else {
702 /* we suppose that malloc aligns to at least four bytes */
703 ptr = malloc(size + 1);
705 #else
706 /* we allocate one more byte to ensure the regions will be
707 separated by at least one byte. With the glibc malloc, it may
708 be in fact not necessary */
709 ptr = memalign(size + 1, align);
710 #endif
712 install_malloc_hooks();
714 if (!ptr)
715 return NULL;
716 __bound_new_region(ptr, size);
717 return ptr;
720 void __bound_free(void *ptr, const void *caller)
722 if (ptr == NULL)
723 return;
724 if (__bound_delete_region(ptr) != 0)
725 bound_error("freeing invalid region");
727 libc_free(ptr);
730 void *__bound_realloc(void *ptr, size_t size, const void *caller)
732 void *ptr1;
733 int old_size;
735 if (size == 0) {
736 __bound_free(ptr, caller);
737 return NULL;
738 } else {
739 ptr1 = __bound_malloc(size, caller);
740 if (ptr == NULL || ptr1 == NULL)
741 return ptr1;
742 old_size = get_region_size(ptr);
743 if (old_size == EMPTY_SIZE)
744 bound_error("realloc'ing invalid pointer");
745 memcpy(ptr1, ptr, old_size);
746 __bound_free(ptr, caller);
747 return ptr1;
751 #ifndef CONFIG_TCC_MALLOC_HOOKS
752 void *__bound_calloc(size_t nmemb, size_t size)
754 void *ptr;
755 size = size * nmemb;
756 ptr = __bound_malloc(size, NULL);
757 if (!ptr)
758 return NULL;
759 memset(ptr, 0, size);
760 return ptr;
762 #endif
764 #if 0
765 static void bound_dump(void)
767 BoundEntry *page, *e;
768 int i, j;
770 printf("region dump:\n");
771 for(i=0;i<BOUND_T1_SIZE;i++) {
772 page = __bound_t1[i];
773 for(j=0;j<BOUND_T2_SIZE;j++) {
774 e = page + j;
775 /* do not print invalid or empty entries */
776 if (e->size != EMPTY_SIZE && e->start != 0) {
777 printf("%08x:",
778 (i << (BOUND_T2_BITS + BOUND_T3_BITS)) +
779 (j << BOUND_T3_BITS));
780 do {
781 printf(" %08lx:%08lx", e->start, e->start + e->size);
782 e = e->next;
783 } while (e != NULL);
784 printf("\n");
789 #endif
791 /* some useful checked functions */
793 /* check that (p ... p + size - 1) lies inside 'p' region, if any */
794 static void __bound_check(const void *p, size_t size)
796 if (size == 0)
797 return;
798 p = __bound_ptr_add((void *)p, size);
799 if (p == INVALID_POINTER)
800 bound_error("invalid pointer");
803 void *__bound_memcpy(void *dst, const void *src, size_t size)
805 __bound_check(dst, size);
806 __bound_check(src, size);
807 /* check also region overlap */
808 if (src >= dst && src < dst + size)
809 bound_error("overlapping regions in memcpy()");
810 return memcpy(dst, src, size);
813 void *__bound_memmove(void *dst, const void *src, size_t size)
815 __bound_check(dst, size);
816 __bound_check(src, size);
817 return memmove(dst, src, size);
820 void *__bound_memset(void *dst, int c, size_t size)
822 __bound_check(dst, size);
823 return memset(dst, c, size);
826 /* XXX: could be optimized */
827 int __bound_strlen(const char *s)
829 const char *p;
830 int len;
832 len = 0;
833 for(;;) {
834 p = __bound_ptr_indir1((char *)s, len);
835 if (p == INVALID_POINTER)
836 bound_error("bad pointer in strlen()");
837 if (*p == '\0')
838 break;
839 len++;
841 return len;
844 char *__bound_strcpy(char *dst, const char *src)
846 int len;
847 len = __bound_strlen(src);
848 return __bound_memcpy(dst, src, len + 1);