s3:winbindd: s/struct timed_event/struct tevent_timer
[Samba/gebeck_regimport.git] / lib / ccan / failtest / failtest.c
blob7915a9222d9f37ddeb1ac183c2e84e02611e6bc6
1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
3 #include <stdarg.h>
4 #include <string.h>
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <ctype.h>
8 #include <unistd.h>
9 #include <poll.h>
10 #include <errno.h>
11 #include <sys/types.h>
12 #include <sys/wait.h>
13 #include <sys/stat.h>
14 #include <sys/time.h>
15 #include <sys/mman.h>
16 #include <sys/resource.h>
17 #include <signal.h>
18 #include <assert.h>
19 #include <ccan/err/err.h>
20 #include <ccan/time/time.h>
21 #include <ccan/read_write_all/read_write_all.h>
22 #include <ccan/failtest/failtest_proto.h>
23 #include <ccan/build_assert/build_assert.h>
24 #include <ccan/hash/hash.h>
25 #include <ccan/htable/htable_type.h>
26 #include <ccan/str/str.h>
27 #include <ccan/compiler/compiler.h>
29 enum failtest_result (*failtest_hook)(struct tlist_calls *);
31 static FILE *tracef = NULL, *warnf;
32 static int traceindent = 0;
34 unsigned int failtest_timeout_ms = 20000;
36 const char *failpath;
37 const char *debugpath;
39 enum info_type {
40 WRITE,
41 RELEASE_LOCKS,
42 FAILURE,
43 SUCCESS,
44 UNEXPECTED
47 struct lock_info {
48 int fd;
49 /* end is inclusive: you can't have a 0-byte lock. */
50 off_t start, end;
51 int type;
54 /* We hash the call location together with its backtrace. */
55 static size_t hash_call(const struct failtest_call *call)
57 return hash(call->file, strlen(call->file),
58 hash(&call->line, 1,
59 hash(call->backtrace, call->backtrace_num,
60 call->type)));
63 static bool call_eq(const struct failtest_call *call1,
64 const struct failtest_call *call2)
66 unsigned int i;
68 if (strcmp(call1->file, call2->file) != 0
69 || call1->line != call2->line
70 || call1->type != call2->type
71 || call1->backtrace_num != call2->backtrace_num)
72 return false;
74 for (i = 0; i < call1->backtrace_num; i++)
75 if (call1->backtrace[i] != call2->backtrace[i])
76 return false;
78 return true;
81 /* Defines struct failtable. */
82 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
83 call_eq, failtable);
85 bool (*failtest_exit_check)(struct tlist_calls *history);
87 /* The entire history of all calls. */
88 static struct tlist_calls history = TLIST_INIT(history);
89 /* If we're a child, the fd two write control info to the parent. */
90 static int control_fd = -1;
91 /* If we're a child, this is the first call we did ourselves. */
92 static struct failtest_call *our_history_start = NULL;
93 /* For printing runtime with --trace. */
94 static struct timeval start;
95 /* Set when failtest_hook returns FAIL_PROBE */
96 static bool probing = false;
97 /* Table to track duplicates. */
98 static struct failtable failtable;
100 /* Array of writes which our child did. We report them on failure. */
101 static struct write_call *child_writes = NULL;
102 static unsigned int child_writes_num = 0;
104 /* fcntl locking info. */
105 static pid_t lock_owner;
106 static struct lock_info *locks = NULL;
107 static unsigned int lock_num = 0;
109 /* Our original pid, which we return to anyone who asks. */
110 static pid_t orig_pid;
112 /* Mapping from failtest_type to char. */
113 static const char info_to_arg[] = "mceoxprwfal";
115 /* Dummy call used for failtest_undo wrappers. */
116 static struct failtest_call unrecorded_call;
118 struct contents_saved {
119 size_t count;
120 off_t off;
121 off_t old_len;
122 char contents[1];
125 /* File contents, saved in this child only. */
126 struct saved_mmapped_file {
127 struct saved_mmapped_file *next;
128 struct failtest_call *opener;
129 struct contents_saved *s;
132 static struct saved_mmapped_file *saved_mmapped_files;
134 #if HAVE_BACKTRACE
135 #include <execinfo.h>
137 static void **get_backtrace(unsigned int *num)
139 static unsigned int max_back = 100;
140 void **ret;
142 again:
143 ret = malloc(max_back * sizeof(void *));
144 *num = backtrace(ret, max_back);
145 if (*num == max_back) {
146 free(ret);
147 max_back *= 2;
148 goto again;
150 return ret;
152 #else
153 /* This will test slightly less, since will consider all of the same
154 * calls as identical. But, it's slightly faster! */
155 static void **get_backtrace(unsigned int *num)
157 *num = 0;
158 return NULL;
160 #endif /* HAVE_BACKTRACE */
162 static struct failtest_call *add_history_(enum failtest_call_type type,
163 bool can_leak,
164 const char *file,
165 unsigned int line,
166 const void *elem,
167 size_t elem_size)
169 struct failtest_call *call;
171 /* NULL file is how we suppress failure. */
172 if (!file)
173 return &unrecorded_call;
175 call = malloc(sizeof *call);
176 call->type = type;
177 call->can_leak = can_leak;
178 call->file = file;
179 call->line = line;
180 call->cleanup = NULL;
181 call->backtrace = get_backtrace(&call->backtrace_num);
182 memcpy(&call->u, elem, elem_size);
183 tlist_add_tail(&history, call, list);
184 return call;
187 #define add_history(type, can_leak, file, line, elem) \
188 add_history_((type), (can_leak), (file), (line), (elem), sizeof(*(elem)))
190 /* We do a fake call inside a sizeof(), to check types. */
191 #define set_cleanup(call, clean, type) \
192 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL, false),1), (clean))
194 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
195 static int move_fd_to_high(int fd)
197 int i;
198 struct rlimit lim;
199 int max;
201 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
202 max = lim.rlim_cur;
203 } else
204 max = FD_SETSIZE;
206 for (i = max - 1; i > fd; i--) {
207 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
208 if (dup2(fd, i) == -1) {
209 warn("Failed to dup fd %i to %i", fd, i);
210 continue;
212 close(fd);
213 return i;
216 /* Nothing? Really? Er... ok? */
217 return fd;
220 static bool read_write_info(int fd)
222 struct write_call *w;
223 char *buf;
225 /* We don't need all of this, but it's simple. */
226 child_writes = realloc(child_writes,
227 (child_writes_num+1) * sizeof(child_writes[0]));
228 w = &child_writes[child_writes_num];
229 if (!read_all(fd, w, sizeof(*w)))
230 return false;
232 w->buf = buf = malloc(w->count);
233 if (!read_all(fd, buf, w->count))
234 return false;
236 child_writes_num++;
237 return true;
240 static char *failpath_string(void)
242 struct failtest_call *i;
243 char *ret = strdup("");
244 unsigned len = 0;
246 /* Inefficient, but who cares? */
247 tlist_for_each(&history, i, list) {
248 ret = realloc(ret, len + 2);
249 ret[len] = info_to_arg[i->type];
250 if (i->fail)
251 ret[len] = toupper(ret[len]);
252 ret[++len] = '\0';
254 return ret;
257 static void do_warn(int e, const char *fmt, va_list ap)
259 char *p = failpath_string();
261 vfprintf(warnf, fmt, ap);
262 if (e != -1)
263 fprintf(warnf, ": %s", strerror(e));
264 fprintf(warnf, " [%s]\n", p);
265 free(p);
268 static void fwarn(const char *fmt, ...)
270 va_list ap;
271 int e = errno;
273 va_start(ap, fmt);
274 do_warn(e, fmt, ap);
275 va_end(ap);
279 static void fwarnx(const char *fmt, ...)
281 va_list ap;
283 va_start(ap, fmt);
284 do_warn(-1, fmt, ap);
285 va_end(ap);
288 static void tell_parent(enum info_type type)
290 if (control_fd != -1)
291 write_all(control_fd, &type, sizeof(type));
294 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
296 va_list ap;
297 char *path = failpath_string();
299 va_start(ap, fmt);
300 vfprintf(stderr, fmt, ap);
301 va_end(ap);
303 fprintf(stderr, "%.*s", (int)outlen, out);
304 printf("To reproduce: --failpath=%s\n", path);
305 free(path);
306 tell_parent(FAILURE);
307 exit(1);
310 static void PRINTF_FMT(1, 2) trace(const char *fmt, ...)
312 va_list ap;
313 unsigned int i;
314 char *p;
315 static int idx;
317 if (!tracef)
318 return;
320 for (i = 0; i < traceindent; i++)
321 fprintf(tracef, " ");
323 p = failpath_string();
324 fprintf(tracef, "%i: %u: %s ", idx++, getpid(), p);
325 va_start(ap, fmt);
326 vfprintf(tracef, fmt, ap);
327 va_end(ap);
328 free(p);
331 static pid_t child;
333 static void hand_down(int signum)
335 kill(child, signum);
338 static void release_locks(void)
340 /* Locks were never acquired/reacquired? */
341 if (lock_owner == 0)
342 return;
344 /* We own them? Release them all. */
345 if (lock_owner == getpid()) {
346 unsigned int i;
347 struct flock fl;
348 fl.l_type = F_UNLCK;
349 fl.l_whence = SEEK_SET;
350 fl.l_start = 0;
351 fl.l_len = 0;
353 trace("Releasing %u locks\n", lock_num);
354 for (i = 0; i < lock_num; i++)
355 fcntl(locks[i].fd, F_SETLK, &fl);
356 } else {
357 /* Our parent must have them; pass request up. */
358 enum info_type type = RELEASE_LOCKS;
359 assert(control_fd != -1);
360 write_all(control_fd, &type, sizeof(type));
362 lock_owner = 0;
365 /* off_t is a signed type. Getting its max is non-trivial. */
366 static off_t off_max(void)
368 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
369 if (sizeof(off_t) == 4)
370 return (off_t)0x7FFFFFF;
371 else
372 return (off_t)0x7FFFFFFFFFFFFFFULL;
375 static void get_locks(void)
377 unsigned int i;
378 struct flock fl;
380 if (lock_owner == getpid())
381 return;
383 if (lock_owner != 0) {
384 enum info_type type = RELEASE_LOCKS;
385 assert(control_fd != -1);
386 trace("Asking parent to release locks\n");
387 write_all(control_fd, &type, sizeof(type));
390 fl.l_whence = SEEK_SET;
392 for (i = 0; i < lock_num; i++) {
393 fl.l_type = locks[i].type;
394 fl.l_start = locks[i].start;
395 if (locks[i].end == off_max())
396 fl.l_len = 0;
397 else
398 fl.l_len = locks[i].end - locks[i].start + 1;
400 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
401 abort();
403 trace("Acquired %u locks\n", lock_num);
404 lock_owner = getpid();
408 static struct contents_saved *save_contents(const char *filename,
409 int fd, size_t count, off_t off,
410 const char *why)
412 struct contents_saved *s = malloc(sizeof(*s) + count);
413 ssize_t ret;
415 s->off = off;
417 ret = pread(fd, s->contents, count, off);
418 if (ret < 0) {
419 fwarn("failtest_write: failed to save old contents!");
420 s->count = 0;
421 } else
422 s->count = ret;
424 /* Use lseek to get the size of file, but we have to restore
425 * file offset */
426 off = lseek(fd, 0, SEEK_CUR);
427 s->old_len = lseek(fd, 0, SEEK_END);
428 lseek(fd, off, SEEK_SET);
430 trace("Saving %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
431 s, filename, s->count, (long long)s->off, why,
432 (long long)s->old_len, fd);
433 return s;
436 static void restore_contents(struct failtest_call *opener,
437 struct contents_saved *s,
438 bool restore_offset,
439 const char *caller)
441 int fd;
443 /* The top parent doesn't need to restore. */
444 if (control_fd == -1)
445 return;
447 /* Has the fd been closed? */
448 if (opener->u.open.closed) {
449 /* Reopen, replace fd, close silently as we clean up. */
450 fd = open(opener->u.open.pathname, O_RDWR);
451 if (fd < 0) {
452 fwarn("failtest: could not reopen %s to clean up %s!",
453 opener->u.open.pathname, caller);
454 return;
456 /* Make it clearly distinguisable from a "normal" fd. */
457 fd = move_fd_to_high(fd);
458 trace("Reopening %s to restore it (was fd %i, now %i)\n",
459 opener->u.open.pathname, opener->u.open.ret, fd);
460 opener->u.open.ret = fd;
461 opener->u.open.closed = false;
463 fd = opener->u.open.ret;
465 trace("Restoring %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
466 s, opener->u.open.pathname, s->count, (long long)s->off, caller,
467 (long long)s->old_len, fd);
468 if (pwrite(fd, s->contents, s->count, s->off) != s->count) {
469 fwarn("failtest: write failed cleaning up %s for %s!",
470 opener->u.open.pathname, caller);
473 if (ftruncate(fd, s->old_len) != 0) {
474 fwarn("failtest_write: truncate failed cleaning up %s for %s!",
475 opener->u.open.pathname, caller);
478 if (restore_offset) {
479 trace("Restoring offset of fd %i to %llu\n",
480 fd, (long long)s->off);
481 lseek(fd, s->off, SEEK_SET);
485 /* We save/restore most things on demand, but always do mmaped files. */
486 static void save_mmapped_files(void)
488 struct failtest_call *i;
489 trace("Saving mmapped files in child\n");
491 tlist_for_each_rev(&history, i, list) {
492 struct mmap_call *m = &i->u.mmap;
493 struct saved_mmapped_file *s;
495 if (i->type != FAILTEST_MMAP)
496 continue;
498 /* FIXME: We only handle mmapped files where fd is still open. */
499 if (m->opener->u.open.closed)
500 continue;
502 s = malloc(sizeof *s);
503 s->s = save_contents(m->opener->u.open.pathname,
504 m->fd, m->length, m->offset,
505 "mmapped file before fork");
506 s->opener = m->opener;
507 s->next = saved_mmapped_files;
508 saved_mmapped_files = s;
512 static void free_mmapped_files(bool restore)
514 trace("%s mmapped files in child\n",
515 restore ? "Restoring" : "Discarding");
516 while (saved_mmapped_files) {
517 struct saved_mmapped_file *next = saved_mmapped_files->next;
518 if (restore)
519 restore_contents(saved_mmapped_files->opener,
520 saved_mmapped_files->s, false,
521 "saved mmap");
522 free(saved_mmapped_files->s);
523 free(saved_mmapped_files);
524 saved_mmapped_files = next;
528 /* Returns a FAILTEST_OPEN, FAILTEST_PIPE or NULL. */
529 static struct failtest_call *opener_of(int fd)
531 struct failtest_call *i;
533 /* Don't get confused and match genuinely failed opens. */
534 if (fd < 0)
535 return NULL;
537 /* Figure out the set of live fds. */
538 tlist_for_each_rev(&history, i, list) {
539 if (i->fail)
540 continue;
541 switch (i->type) {
542 case FAILTEST_CLOSE:
543 if (i->u.close.fd == fd) {
544 return NULL;
546 break;
547 case FAILTEST_OPEN:
548 if (i->u.open.ret == fd) {
549 if (i->u.open.closed)
550 return NULL;
551 return i;
553 break;
554 case FAILTEST_PIPE:
555 if (i->u.pipe.fds[0] == fd || i->u.pipe.fds[1] == fd) {
556 return i;
558 break;
559 default:
560 break;
564 /* FIXME: socket, dup, etc are untracked! */
565 return NULL;
568 static void free_call(struct failtest_call *call)
570 /* We don't do this in cleanup: needed even for failed opens. */
571 if (call->type == FAILTEST_OPEN)
572 free((char *)call->u.open.pathname);
573 free(call->backtrace);
574 tlist_del_from(&history, call, list);
575 free(call);
578 /* Free up memory, so valgrind doesn't report leaks. */
579 static void free_everything(void)
581 struct failtest_call *i;
583 while ((i = tlist_top(&history, list)) != NULL)
584 free_call(i);
586 failtable_clear(&failtable);
589 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
591 struct failtest_call *i;
592 bool restore = true;
594 /* For children, we don't care if they "failed" the testing. */
595 if (control_fd != -1)
596 status = 0;
597 else
598 /* We don't restore contents for original parent. */
599 restore = false;
601 /* Cleanup everything, in reverse order. */
602 tlist_for_each_rev(&history, i, list) {
603 /* Don't restore things our parent did. */
604 if (i == our_history_start)
605 restore = false;
607 if (i->fail)
608 continue;
610 if (i->cleanup)
611 i->cleanup(&i->u, restore);
613 /* But their program shouldn't leak, even on failure. */
614 if (!forced_cleanup && i->can_leak) {
615 printf("Leak at %s:%u: --failpath=%s\n",
616 i->file, i->line, failpath_string());
617 status = 1;
621 /* Put back mmaped files the way our parent (if any) expects. */
622 free_mmapped_files(true);
624 free_everything();
625 if (status == 0)
626 tell_parent(SUCCESS);
627 else
628 tell_parent(FAILURE);
629 exit(status);
632 static bool following_path(void)
634 if (!failpath)
635 return false;
636 /* + means continue after end, like normal. */
637 if (*failpath == '+') {
638 failpath = NULL;
639 return false;
641 return true;
644 static bool follow_path(struct failtest_call *call)
646 if (*failpath == '\0') {
647 /* Continue, but don't inject errors. */
648 return call->fail = false;
651 if (tolower((unsigned char)*failpath) != info_to_arg[call->type])
652 errx(1, "Failpath expected '%s' got '%c'\n",
653 failpath, info_to_arg[call->type]);
654 call->fail = cisupper(*(failpath++));
655 if (call->fail)
656 call->can_leak = false;
657 return call->fail;
660 static bool should_fail(struct failtest_call *call)
662 int status;
663 int control[2], output[2];
664 enum info_type type = UNEXPECTED;
665 char *out = NULL;
666 size_t outlen = 0;
667 struct failtest_call *dup;
669 if (call == &unrecorded_call)
670 return false;
672 if (following_path())
673 return follow_path(call);
675 /* Attach debugger if they asked for it. */
676 if (debugpath) {
677 char *path;
679 /* Pretend this last call matches whatever path wanted:
680 * keeps valgrind happy. */
681 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
682 path = failpath_string();
684 if (streq(path, debugpath)) {
685 char str[80];
687 /* Don't timeout. */
688 signal(SIGUSR1, SIG_IGN);
689 sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
690 getpid(), getpid());
691 if (system(str) == 0)
692 sleep(5);
693 } else {
694 /* Ignore last character: could be upper or lower. */
695 path[strlen(path)-1] = '\0';
696 if (!strstarts(debugpath, path)) {
697 fprintf(stderr,
698 "--debugpath not followed: %s\n", path);
699 debugpath = NULL;
702 free(path);
705 /* Are we probing? If so, we never fail twice. */
706 if (probing) {
707 trace("Not failing %c due to FAIL_PROBE return\n",
708 info_to_arg[call->type]);
709 return call->fail = false;
712 /* Don't fail more than once in the same place. */
713 dup = failtable_get(&failtable, call);
714 if (dup) {
715 trace("Not failing %c due to duplicate\n",
716 info_to_arg[call->type]);
717 return call->fail = false;
720 if (failtest_hook) {
721 switch (failtest_hook(&history)) {
722 case FAIL_OK:
723 break;
724 case FAIL_PROBE:
725 probing = true;
726 break;
727 case FAIL_DONT_FAIL:
728 trace("Not failing %c due to failhook return\n",
729 info_to_arg[call->type]);
730 call->fail = false;
731 return false;
732 default:
733 abort();
737 /* Add it to our table of calls. */
738 failtable_add(&failtable, call);
740 /* We're going to fail in the child. */
741 call->fail = true;
742 if (pipe(control) != 0 || pipe(output) != 0)
743 err(1, "opening pipe");
745 /* Move out the way, to high fds. */
746 control[0] = move_fd_to_high(control[0]);
747 control[1] = move_fd_to_high(control[1]);
748 output[0] = move_fd_to_high(output[0]);
749 output[1] = move_fd_to_high(output[1]);
751 /* Prevent double-printing (in child and parent) */
752 fflush(stdout);
753 fflush(warnf);
754 if (tracef)
755 fflush(tracef);
756 child = fork();
757 if (child == -1)
758 err(1, "forking failed");
760 if (child == 0) {
761 traceindent++;
762 if (tracef) {
763 struct timeval diff;
764 const char *p;
765 char *failpath;
766 struct failtest_call *c;
768 c = tlist_tail(&history, list);
769 diff = time_sub(time_now(), start);
770 failpath = failpath_string();
771 p = strrchr(c->file, '/');
772 if (p)
773 p++;
774 else
775 p = c->file;
776 trace("%u->%u (%u.%02u): %s (%s:%u)\n",
777 getppid(), getpid(),
778 (int)diff.tv_sec, (int)diff.tv_usec / 10000,
779 failpath, p, c->line);
780 free(failpath);
782 /* From here on, we have to clean up! */
783 our_history_start = tlist_tail(&history, list);
784 close(control[0]);
785 close(output[0]);
786 /* Don't swallow stderr if we're tracing. */
787 if (!tracef) {
788 dup2(output[1], STDOUT_FILENO);
789 dup2(output[1], STDERR_FILENO);
790 if (output[1] != STDOUT_FILENO
791 && output[1] != STDERR_FILENO)
792 close(output[1]);
794 control_fd = move_fd_to_high(control[1]);
796 /* Forget any of our parent's saved files. */
797 free_mmapped_files(false);
799 /* Now, save any files we need to. */
800 save_mmapped_files();
802 /* Failed calls can't leak. */
803 call->can_leak = false;
805 return true;
808 signal(SIGUSR1, hand_down);
810 close(control[1]);
811 close(output[1]);
813 /* We grab output so we can display it; we grab writes so we
814 * can compare. */
815 do {
816 struct pollfd pfd[2];
817 int ret;
819 pfd[0].fd = output[0];
820 pfd[0].events = POLLIN|POLLHUP;
821 pfd[1].fd = control[0];
822 pfd[1].events = POLLIN|POLLHUP;
824 if (type == SUCCESS)
825 ret = poll(pfd, 1, failtest_timeout_ms);
826 else
827 ret = poll(pfd, 2, failtest_timeout_ms);
829 if (ret == 0)
830 hand_down(SIGUSR1);
831 if (ret < 0) {
832 if (errno == EINTR)
833 continue;
834 err(1, "Poll returned %i", ret);
837 if (pfd[0].revents & POLLIN) {
838 ssize_t len;
840 out = realloc(out, outlen + 8192);
841 len = read(output[0], out + outlen, 8192);
842 outlen += len;
843 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
844 if (read_all(control[0], &type, sizeof(type))) {
845 if (type == WRITE) {
846 if (!read_write_info(control[0]))
847 break;
848 } else if (type == RELEASE_LOCKS) {
849 release_locks();
850 /* FIXME: Tell them we're done... */
853 } else if (pfd[0].revents & POLLHUP) {
854 break;
856 } while (type != FAILURE);
858 close(output[0]);
859 close(control[0]);
860 waitpid(child, &status, 0);
861 if (!WIFEXITED(status)) {
862 if (WTERMSIG(status) == SIGUSR1)
863 child_fail(out, outlen, "Timed out");
864 else
865 child_fail(out, outlen, "Killed by signal %u: ",
866 WTERMSIG(status));
868 /* Child printed failure already, just pass up exit code. */
869 if (type == FAILURE) {
870 fprintf(stderr, "%.*s", (int)outlen, out);
871 tell_parent(type);
872 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
874 if (WEXITSTATUS(status) != 0)
875 child_fail(out, outlen, "Exited with status %i: ",
876 WEXITSTATUS(status));
878 free(out);
879 signal(SIGUSR1, SIG_DFL);
881 /* Only child does probe. */
882 probing = false;
884 /* We continue onwards without failing. */
885 call->fail = false;
886 return false;
889 static void cleanup_calloc(struct calloc_call *call, bool restore)
891 trace("undoing calloc %p\n", call->ret);
892 free(call->ret);
895 void *failtest_calloc(size_t nmemb, size_t size,
896 const char *file, unsigned line)
898 struct failtest_call *p;
899 struct calloc_call call;
900 call.nmemb = nmemb;
901 call.size = size;
902 p = add_history(FAILTEST_CALLOC, true, file, line, &call);
904 if (should_fail(p)) {
905 p->u.calloc.ret = NULL;
906 p->error = ENOMEM;
907 } else {
908 p->u.calloc.ret = calloc(nmemb, size);
909 set_cleanup(p, cleanup_calloc, struct calloc_call);
911 trace("calloc %zu x %zu %s:%u -> %p\n",
912 nmemb, size, file, line, p->u.calloc.ret);
913 errno = p->error;
914 return p->u.calloc.ret;
917 static void cleanup_malloc(struct malloc_call *call, bool restore)
919 trace("undoing malloc %p\n", call->ret);
920 free(call->ret);
923 void *failtest_malloc(size_t size, const char *file, unsigned line)
925 struct failtest_call *p;
926 struct malloc_call call;
927 call.size = size;
929 p = add_history(FAILTEST_MALLOC, true, file, line, &call);
930 if (should_fail(p)) {
931 p->u.malloc.ret = NULL;
932 p->error = ENOMEM;
933 } else {
934 p->u.malloc.ret = malloc(size);
935 set_cleanup(p, cleanup_malloc, struct malloc_call);
937 trace("malloc %zu %s:%u -> %p\n",
938 size, file, line, p->u.malloc.ret);
939 errno = p->error;
940 return p->u.malloc.ret;
943 static void cleanup_realloc(struct realloc_call *call, bool restore)
945 trace("undoing realloc %p\n", call->ret);
946 free(call->ret);
949 /* Walk back and find out if we got this ptr from a previous routine. */
950 static void fixup_ptr_history(void *ptr, const char *why)
952 struct failtest_call *i;
954 /* Start at end of history, work back. */
955 tlist_for_each_rev(&history, i, list) {
956 switch (i->type) {
957 case FAILTEST_REALLOC:
958 if (i->u.realloc.ret == ptr) {
959 trace("found realloc %p %s:%u matching %s\n",
960 ptr, i->file, i->line, why);
961 i->cleanup = NULL;
962 i->can_leak = false;
963 return;
965 break;
966 case FAILTEST_MALLOC:
967 if (i->u.malloc.ret == ptr) {
968 trace("found malloc %p %s:%u matching %s\n",
969 ptr, i->file, i->line, why);
970 i->cleanup = NULL;
971 i->can_leak = false;
972 return;
974 break;
975 case FAILTEST_CALLOC:
976 if (i->u.calloc.ret == ptr) {
977 trace("found calloc %p %s:%u matching %s\n",
978 ptr, i->file, i->line, why);
979 i->cleanup = NULL;
980 i->can_leak = false;
981 return;
983 break;
984 default:
985 break;
988 trace("Did not find %p matching %s\n", ptr, why);
991 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
993 struct failtest_call *p;
994 struct realloc_call call;
995 call.size = size;
996 p = add_history(FAILTEST_REALLOC, true, file, line, &call);
998 /* FIXME: Try one child moving allocation, one not. */
999 if (should_fail(p)) {
1000 p->u.realloc.ret = NULL;
1001 p->error = ENOMEM;
1002 } else {
1003 /* Don't catch this one in the history fixup... */
1004 p->u.realloc.ret = NULL;
1005 fixup_ptr_history(ptr, "realloc");
1006 p->u.realloc.ret = realloc(ptr, size);
1007 set_cleanup(p, cleanup_realloc, struct realloc_call);
1009 trace("realloc %p %s:%u -> %p\n",
1010 ptr, file, line, p->u.realloc.ret);
1011 errno = p->error;
1012 return p->u.realloc.ret;
1015 /* FIXME: Record free, so we can terminate fixup_ptr_history correctly.
1016 * If there's an alloc we don't see, it could get confusing if it matches
1017 * a previous allocation we did see. */
1018 void failtest_free(void *ptr)
1020 fixup_ptr_history(ptr, "free");
1021 trace("free %p\n", ptr);
1022 free(ptr);
1026 static struct contents_saved *save_file(const char *pathname)
1028 int fd;
1029 struct contents_saved *s;
1031 fd = open(pathname, O_RDONLY);
1032 if (fd < 0)
1033 return NULL;
1035 s = save_contents(pathname, fd, lseek(fd, 0, SEEK_END), 0,
1036 "open with O_TRUNC");
1037 close(fd);
1038 return s;
1041 /* Optimization: don't create a child for an open which *we know*
1042 * would fail anyway. */
1043 static bool open_would_fail(const char *pathname, int flags)
1045 if ((flags & O_ACCMODE) == O_RDONLY)
1046 return access(pathname, R_OK) != 0;
1047 if (!(flags & O_CREAT)) {
1048 if ((flags & O_ACCMODE) == O_WRONLY)
1049 return access(pathname, W_OK) != 0;
1050 if ((flags & O_ACCMODE) == O_RDWR)
1051 return access(pathname, W_OK) != 0
1052 || access(pathname, R_OK) != 0;
1054 /* FIXME: We could check if it exists, for O_CREAT|O_EXCL */
1055 return false;
1058 static void cleanup_open(struct open_call *call, bool restore)
1060 if (restore && call->saved)
1061 restore_contents(container_of(call, struct failtest_call,
1062 u.open),
1063 call->saved, false, "open with O_TRUNC");
1064 if (!call->closed) {
1065 trace("Cleaning up open %s by closing fd %i\n",
1066 call->pathname, call->ret);
1067 close(call->ret);
1068 call->closed = true;
1070 free(call->saved);
1073 int failtest_open(const char *pathname,
1074 const char *file, unsigned line, ...)
1076 struct failtest_call *p;
1077 struct open_call call;
1078 va_list ap;
1080 call.pathname = strdup(pathname);
1081 va_start(ap, line);
1082 call.flags = va_arg(ap, int);
1083 call.always_save = false;
1084 call.closed = false;
1085 if (call.flags & O_CREAT) {
1086 call.mode = va_arg(ap, int);
1087 va_end(ap);
1089 p = add_history(FAILTEST_OPEN, true, file, line, &call);
1090 /* Avoid memory leak! */
1091 if (p == &unrecorded_call)
1092 free((char *)call.pathname);
1094 if (should_fail(p)) {
1095 /* Don't bother inserting failures that would happen anyway. */
1096 if (open_would_fail(pathname, call.flags)) {
1097 trace("Open would have failed anyway: stopping\n");
1098 failtest_cleanup(true, 0);
1100 p->u.open.ret = -1;
1101 /* FIXME: Play with error codes? */
1102 p->error = EACCES;
1103 } else {
1104 /* Save the old version if they're truncating it. */
1105 if (call.flags & O_TRUNC)
1106 p->u.open.saved = save_file(pathname);
1107 else
1108 p->u.open.saved = NULL;
1109 p->u.open.ret = open(pathname, call.flags, call.mode);
1110 if (p->u.open.ret == -1) {
1111 p->u.open.closed = true;
1112 p->can_leak = false;
1113 } else {
1114 set_cleanup(p, cleanup_open, struct open_call);
1117 trace("open %s %s:%u -> %i (opener %p)\n",
1118 pathname, file, line, p->u.open.ret, &p->u.open);
1119 errno = p->error;
1120 return p->u.open.ret;
1123 static void cleanup_mmap(struct mmap_call *mmap, bool restore)
1125 trace("cleaning up mmap @%p (opener %p)\n",
1126 mmap->ret, mmap->opener);
1127 if (restore)
1128 restore_contents(mmap->opener, mmap->saved, false, "mmap");
1129 free(mmap->saved);
1132 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
1133 int fd, off_t offset, const char *file, unsigned line)
1135 struct failtest_call *p;
1136 struct mmap_call call;
1138 call.addr = addr;
1139 call.length = length;
1140 call.prot = prot;
1141 call.flags = flags;
1142 call.offset = offset;
1143 call.fd = fd;
1144 call.opener = opener_of(fd);
1146 /* If we don't know what file it was, don't fail. */
1147 if (!call.opener) {
1148 if (fd != -1) {
1149 fwarnx("failtest_mmap: couldn't figure out source for"
1150 " fd %i at %s:%u", fd, file, line);
1152 addr = mmap(addr, length, prot, flags, fd, offset);
1153 trace("mmap of fd %i -> %p (opener = NULL)\n", fd, addr);
1154 return addr;
1157 p = add_history(FAILTEST_MMAP, false, file, line, &call);
1158 if (should_fail(p)) {
1159 p->u.mmap.ret = MAP_FAILED;
1160 p->error = ENOMEM;
1161 } else {
1162 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
1163 /* Save contents if we're writing to a normal file */
1164 if (p->u.mmap.ret != MAP_FAILED
1165 && (prot & PROT_WRITE)
1166 && call.opener->type == FAILTEST_OPEN) {
1167 const char *fname = call.opener->u.open.pathname;
1168 p->u.mmap.saved = save_contents(fname, fd, length,
1169 offset, "being mmapped");
1170 set_cleanup(p, cleanup_mmap, struct mmap_call);
1173 trace("mmap of fd %i %s:%u -> %p (opener = %p)\n",
1174 fd, file, line, addr, call.opener);
1175 errno = p->error;
1176 return p->u.mmap.ret;
1179 /* Since OpenBSD can't handle adding args, we use this file and line.
1180 * This will make all mmaps look the same, reducing coverage. */
1181 void *failtest_mmap_noloc(void *addr, size_t length, int prot, int flags,
1182 int fd, off_t offset)
1184 return failtest_mmap(addr, length, prot, flags, fd, offset,
1185 __FILE__, __LINE__);
1188 static void cleanup_pipe(struct pipe_call *call, bool restore)
1190 trace("cleaning up pipe fd=%i%s,%i%s\n",
1191 call->fds[0], call->closed[0] ? "(already closed)" : "",
1192 call->fds[1], call->closed[1] ? "(already closed)" : "");
1193 if (!call->closed[0])
1194 close(call->fds[0]);
1195 if (!call->closed[1])
1196 close(call->fds[1]);
1199 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
1201 struct failtest_call *p;
1202 struct pipe_call call;
1204 p = add_history(FAILTEST_PIPE, true, file, line, &call);
1205 if (should_fail(p)) {
1206 p->u.open.ret = -1;
1207 /* FIXME: Play with error codes? */
1208 p->error = EMFILE;
1209 } else {
1210 p->u.pipe.ret = pipe(p->u.pipe.fds);
1211 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
1212 set_cleanup(p, cleanup_pipe, struct pipe_call);
1215 trace("pipe %s:%u -> %i,%i\n", file, line,
1216 p->u.pipe.ret ? -1 : p->u.pipe.fds[0],
1217 p->u.pipe.ret ? -1 : p->u.pipe.fds[1]);
1219 /* This causes valgrind to notice if they use pipefd[] after failure */
1220 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
1221 errno = p->error;
1222 return p->u.pipe.ret;
1225 static void cleanup_read(struct read_call *call, bool restore)
1227 if (restore) {
1228 trace("cleaning up read on fd %i: seeking to %llu\n",
1229 call->fd, (long long)call->off);
1231 /* Read (not readv!) moves file offset! */
1232 if (lseek(call->fd, call->off, SEEK_SET) != call->off) {
1233 fwarn("Restoring lseek pointer failed (read)");
1238 static ssize_t failtest_add_read(int fd, void *buf, size_t count, off_t off,
1239 bool is_pread, const char *file, unsigned line)
1241 struct failtest_call *p;
1242 struct read_call call;
1243 call.fd = fd;
1244 call.buf = buf;
1245 call.count = count;
1246 call.off = off;
1247 p = add_history(FAILTEST_READ, false, file, line, &call);
1249 /* FIXME: Try partial read returns. */
1250 if (should_fail(p)) {
1251 p->u.read.ret = -1;
1252 p->error = EIO;
1253 } else {
1254 if (is_pread)
1255 p->u.read.ret = pread(fd, buf, count, off);
1256 else {
1257 p->u.read.ret = read(fd, buf, count);
1258 if (p->u.read.ret != -1)
1259 set_cleanup(p, cleanup_read, struct read_call);
1262 trace("%sread %s:%u fd %i %zu@%llu -> %zd\n",
1263 is_pread ? "p" : "", file, line, fd, count, (long long)off,
1264 p->u.read.ret);
1265 errno = p->error;
1266 return p->u.read.ret;
1269 static void cleanup_write(struct write_call *write, bool restore)
1271 trace("cleaning up write on %s\n", write->opener->u.open.pathname);
1272 if (restore)
1273 restore_contents(write->opener, write->saved, !write->is_pwrite,
1274 "write");
1275 free(write->saved);
1278 static ssize_t failtest_add_write(int fd, const void *buf,
1279 size_t count, off_t off,
1280 bool is_pwrite,
1281 const char *file, unsigned line)
1283 struct failtest_call *p;
1284 struct write_call call;
1286 call.fd = fd;
1287 call.buf = buf;
1288 call.count = count;
1289 call.off = off;
1290 call.is_pwrite = is_pwrite;
1291 call.opener = opener_of(fd);
1292 p = add_history(FAILTEST_WRITE, false, file, line, &call);
1294 /* If we're a child, we need to make sure we write the same thing
1295 * to non-files as the parent does, so tell it. */
1296 if (control_fd != -1 && off == (off_t)-1) {
1297 enum info_type type = WRITE;
1299 write_all(control_fd, &type, sizeof(type));
1300 write_all(control_fd, &p->u.write, sizeof(p->u.write));
1301 write_all(control_fd, buf, count);
1304 /* FIXME: Try partial write returns. */
1305 if (should_fail(p)) {
1306 p->u.write.ret = -1;
1307 p->error = EIO;
1308 } else {
1309 bool is_file;
1310 assert(call.opener == p->u.write.opener);
1312 if (p->u.write.opener) {
1313 is_file = (p->u.write.opener->type == FAILTEST_OPEN);
1314 } else {
1315 /* We can't unwind it, so at least check same
1316 * in parent and child. */
1317 is_file = false;
1320 /* FIXME: We assume same write order in parent and child */
1321 if (!is_file && child_writes_num != 0) {
1322 if (child_writes[0].fd != fd)
1323 errx(1, "Child wrote to fd %u, not %u?",
1324 child_writes[0].fd, fd);
1325 if (child_writes[0].off != p->u.write.off)
1326 errx(1, "Child wrote to offset %zu, not %zu?",
1327 (size_t)child_writes[0].off,
1328 (size_t)p->u.write.off);
1329 if (child_writes[0].count != count)
1330 errx(1, "Child wrote length %zu, not %zu?",
1331 child_writes[0].count, count);
1332 if (memcmp(child_writes[0].buf, buf, count)) {
1333 child_fail(NULL, 0,
1334 "Child wrote differently to"
1335 " fd %u than we did!\n", fd);
1337 free((char *)child_writes[0].buf);
1338 child_writes_num--;
1339 memmove(&child_writes[0], &child_writes[1],
1340 sizeof(child_writes[0]) * child_writes_num);
1342 /* Child wrote it already. */
1343 trace("write %s:%i on fd %i already done by child\n",
1344 file, line, fd);
1345 p->u.write.ret = count;
1346 errno = p->error;
1347 return p->u.write.ret;
1350 if (is_file) {
1351 p->u.write.saved = save_contents(call.opener->u.open.pathname,
1352 fd, count, off,
1353 "being overwritten");
1354 set_cleanup(p, cleanup_write, struct write_call);
1357 /* Though off is current seek ptr for write case, we need to
1358 * move it. write() does that for us. */
1359 if (p->u.write.is_pwrite)
1360 p->u.write.ret = pwrite(fd, buf, count, off);
1361 else
1362 p->u.write.ret = write(fd, buf, count);
1364 trace("%swrite %s:%i %zu@%llu on fd %i -> %zd\n",
1365 p->u.write.is_pwrite ? "p" : "",
1366 file, line, count, (long long)off, fd, p->u.write.ret);
1367 errno = p->error;
1368 return p->u.write.ret;
1371 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset,
1372 const char *file, unsigned line)
1374 return failtest_add_write(fd, buf, count, offset, true, file, line);
1377 ssize_t failtest_write(int fd, const void *buf, size_t count,
1378 const char *file, unsigned line)
1380 return failtest_add_write(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1381 file, line);
1384 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
1385 const char *file, unsigned line)
1387 return failtest_add_read(fd, buf, count, off, true, file, line);
1390 ssize_t failtest_read(int fd, void *buf, size_t count,
1391 const char *file, unsigned line)
1393 return failtest_add_read(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1394 file, line);
1397 static struct lock_info *WARN_UNUSED_RESULT
1398 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1400 unsigned int i;
1401 struct lock_info *l;
1403 for (i = 0; i < lock_num; i++) {
1404 l = &locks[i];
1406 if (l->fd != fd)
1407 continue;
1408 /* Four cases we care about:
1409 * Start overlap:
1410 * l = | |
1411 * new = | |
1412 * Mid overlap:
1413 * l = | |
1414 * new = | |
1415 * End overlap:
1416 * l = | |
1417 * new = | |
1418 * Total overlap:
1419 * l = | |
1420 * new = | |
1422 if (start > l->start && end < l->end) {
1423 /* Mid overlap: trim entry, add new one. */
1424 off_t new_start, new_end;
1425 new_start = end + 1;
1426 new_end = l->end;
1427 trace("splitting lock on fd %i from %llu-%llu"
1428 " to %llu-%llu\n",
1429 fd, (long long)l->start, (long long)l->end,
1430 (long long)l->start, (long long)start - 1);
1431 l->end = start - 1;
1432 locks = add_lock(locks,
1433 fd, new_start, new_end, l->type);
1434 l = &locks[i];
1435 } else if (start <= l->start && end >= l->end) {
1436 /* Total overlap: eliminate entry. */
1437 trace("erasing lock on fd %i %llu-%llu\n",
1438 fd, (long long)l->start, (long long)l->end);
1439 l->end = 0;
1440 l->start = 1;
1441 } else if (end >= l->start && end < l->end) {
1442 trace("trimming lock on fd %i from %llu-%llu"
1443 " to %llu-%llu\n",
1444 fd, (long long)l->start, (long long)l->end,
1445 (long long)end + 1, (long long)l->end);
1446 /* Start overlap: trim entry. */
1447 l->start = end + 1;
1448 } else if (start > l->start && start <= l->end) {
1449 trace("trimming lock on fd %i from %llu-%llu"
1450 " to %llu-%llu\n",
1451 fd, (long long)l->start, (long long)l->end,
1452 (long long)l->start, (long long)start - 1);
1453 /* End overlap: trim entry. */
1454 l->end = start-1;
1456 /* Nothing left? Remove it. */
1457 if (l->end < l->start) {
1458 trace("forgetting lock on fd %i\n", fd);
1459 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1460 i--;
1464 if (type != F_UNLCK) {
1465 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1466 l = &locks[lock_num++];
1467 l->fd = fd;
1468 l->start = start;
1469 l->end = end;
1470 l->type = type;
1471 trace("new lock on fd %i %llu-%llu\n",
1472 fd, (long long)l->start, (long long)l->end);
1474 return locks;
1477 /* We trap this so we can record it: we don't fail it. */
1478 int failtest_close(int fd, const char *file, unsigned line)
1480 struct close_call call;
1481 struct failtest_call *p, *opener;
1483 /* Do this before we add ourselves to history! */
1484 opener = opener_of(fd);
1486 call.fd = fd;
1487 p = add_history(FAILTEST_CLOSE, false, file, line, &call);
1488 p->fail = false;
1490 /* Consume close from failpath (shouldn't tell us to fail). */
1491 if (following_path()) {
1492 if (follow_path(p))
1493 abort();
1496 trace("close on fd %i\n", fd);
1497 if (fd < 0)
1498 return close(fd);
1500 /* Mark opener as not leaking, remove its cleanup function. */
1501 if (opener) {
1502 trace("close on fd %i found opener %p\n", fd, opener);
1503 if (opener->type == FAILTEST_PIPE) {
1504 /* From a pipe? */
1505 if (opener->u.pipe.fds[0] == fd) {
1506 assert(!opener->u.pipe.closed[0]);
1507 opener->u.pipe.closed[0] = true;
1508 } else if (opener->u.pipe.fds[1] == fd) {
1509 assert(!opener->u.pipe.closed[1]);
1510 opener->u.pipe.closed[1] = true;
1511 } else
1512 abort();
1513 opener->can_leak = (!opener->u.pipe.closed[0]
1514 || !opener->u.pipe.closed[1]);
1515 } else if (opener->type == FAILTEST_OPEN) {
1516 opener->u.open.closed = true;
1517 opener->can_leak = false;
1518 } else
1519 abort();
1522 /* Restore offset now, in case parent shared (can't do after close!). */
1523 if (control_fd != -1) {
1524 struct failtest_call *i;
1526 tlist_for_each_rev(&history, i, list) {
1527 if (i == our_history_start)
1528 break;
1529 if (i == opener)
1530 break;
1531 if (i->type == FAILTEST_LSEEK && i->u.lseek.fd == fd) {
1532 trace("close on fd %i undoes lseek\n", fd);
1533 /* This seeks back. */
1534 i->cleanup(&i->u, true);
1535 i->cleanup = NULL;
1536 } else if (i->type == FAILTEST_WRITE
1537 && i->u.write.fd == fd
1538 && !i->u.write.is_pwrite) {
1539 trace("close on fd %i undoes write"
1540 " offset change\n", fd);
1541 /* Write (not pwrite!) moves file offset! */
1542 if (lseek(fd, i->u.write.off, SEEK_SET)
1543 != i->u.write.off) {
1544 fwarn("Restoring lseek pointer failed (write)");
1546 } else if (i->type == FAILTEST_READ
1547 && i->u.read.fd == fd) {
1548 /* preads don't *have* cleanups */
1549 if (i->cleanup) {
1550 trace("close on fd %i undoes read"
1551 " offset change\n", fd);
1552 /* This seeks back. */
1553 i->cleanup(&i->u, true);
1554 i->cleanup = NULL;
1560 /* Close unlocks everything. */
1561 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1562 return close(fd);
1565 /* Zero length means "to end of file" */
1566 static off_t end_of(off_t start, off_t len)
1568 if (len == 0)
1569 return off_max();
1570 return start + len - 1;
1573 /* FIXME: This only handles locks, really. */
1574 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1576 struct failtest_call *p;
1577 struct fcntl_call call;
1578 va_list ap;
1580 call.fd = fd;
1581 call.cmd = cmd;
1583 /* Argument extraction. */
1584 switch (cmd) {
1585 case F_SETFL:
1586 case F_SETFD:
1587 va_start(ap, cmd);
1588 call.arg.l = va_arg(ap, long);
1589 va_end(ap);
1590 trace("fcntl on fd %i F_SETFL/F_SETFD\n", fd);
1591 return fcntl(fd, cmd, call.arg.l);
1592 case F_GETFD:
1593 case F_GETFL:
1594 trace("fcntl on fd %i F_GETFL/F_GETFD\n", fd);
1595 return fcntl(fd, cmd);
1596 case F_GETLK:
1597 trace("fcntl on fd %i F_GETLK\n", fd);
1598 get_locks();
1599 va_start(ap, cmd);
1600 call.arg.fl = *va_arg(ap, struct flock *);
1601 va_end(ap);
1602 return fcntl(fd, cmd, &call.arg.fl);
1603 case F_SETLK:
1604 case F_SETLKW:
1605 trace("fcntl on fd %i F_SETLK%s\n",
1606 fd, cmd == F_SETLKW ? "W" : "");
1607 va_start(ap, cmd);
1608 call.arg.fl = *va_arg(ap, struct flock *);
1609 va_end(ap);
1610 break;
1611 default:
1612 /* This means you need to implement it here. */
1613 err(1, "failtest: unknown fcntl %u", cmd);
1616 p = add_history(FAILTEST_FCNTL, false, file, line, &call);
1618 if (should_fail(p)) {
1619 p->u.fcntl.ret = -1;
1620 if (p->u.fcntl.cmd == F_SETLK)
1621 p->error = EAGAIN;
1622 else
1623 p->error = EDEADLK;
1624 } else {
1625 get_locks();
1626 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1627 &p->u.fcntl.arg.fl);
1628 if (p->u.fcntl.ret == -1)
1629 p->error = errno;
1630 else {
1631 /* We don't handle anything else yet. */
1632 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1633 locks = add_lock(locks,
1634 p->u.fcntl.fd,
1635 p->u.fcntl.arg.fl.l_start,
1636 end_of(p->u.fcntl.arg.fl.l_start,
1637 p->u.fcntl.arg.fl.l_len),
1638 p->u.fcntl.arg.fl.l_type);
1641 trace("fcntl on fd %i -> %i\n", fd, p->u.fcntl.ret);
1642 errno = p->error;
1643 return p->u.fcntl.ret;
1646 static void cleanup_lseek(struct lseek_call *call, bool restore)
1648 if (restore) {
1649 trace("cleaning up lseek on fd %i -> %llu\n",
1650 call->fd, (long long)call->old_off);
1651 if (lseek(call->fd, call->old_off, SEEK_SET) != call->old_off)
1652 fwarn("Restoring lseek pointer failed");
1656 /* We trap this so we can undo it: we don't fail it. */
1657 off_t failtest_lseek(int fd, off_t offset, int whence, const char *file,
1658 unsigned int line)
1660 struct failtest_call *p;
1661 struct lseek_call call;
1662 call.fd = fd;
1663 call.offset = offset;
1664 call.whence = whence;
1665 call.old_off = lseek(fd, 0, SEEK_CUR);
1667 p = add_history(FAILTEST_LSEEK, false, file, line, &call);
1668 p->fail = false;
1670 /* Consume lseek from failpath. */
1671 if (failpath)
1672 if (should_fail(p))
1673 abort();
1675 p->u.lseek.ret = lseek(fd, offset, whence);
1677 if (p->u.lseek.ret != (off_t)-1)
1678 set_cleanup(p, cleanup_lseek, struct lseek_call);
1680 trace("lseek %s:%u on fd %i from %llu to %llu%s\n",
1681 file, line, fd, (long long)call.old_off, (long long)offset,
1682 whence == SEEK_CUR ? " (from current off)" :
1683 whence == SEEK_END ? " (from end)" :
1684 whence == SEEK_SET ? "" : " (invalid whence)");
1685 return p->u.lseek.ret;
1689 pid_t failtest_getpid(const char *file, unsigned line)
1691 /* You must call failtest_init first! */
1692 assert(orig_pid);
1693 return orig_pid;
1696 void failtest_init(int argc, char *argv[])
1698 unsigned int i;
1700 orig_pid = getpid();
1702 warnf = fdopen(move_fd_to_high(dup(STDERR_FILENO)), "w");
1703 for (i = 1; i < argc; i++) {
1704 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1705 failpath = argv[i] + strlen("--failpath=");
1706 } else if (strcmp(argv[i], "--trace") == 0) {
1707 tracef = warnf;
1708 failtest_timeout_ms = -1;
1709 } else if (!strncmp(argv[i], "--debugpath=",
1710 strlen("--debugpath="))) {
1711 debugpath = argv[i] + strlen("--debugpath=");
1714 failtable_init(&failtable);
1715 start = time_now();
1718 bool failtest_has_failed(void)
1720 return control_fd != -1;
1723 void failtest_exit(int status)
1725 trace("failtest_exit with status %i\n", status);
1726 if (failtest_exit_check) {
1727 if (!failtest_exit_check(&history))
1728 child_fail(NULL, 0, "failtest_exit_check failed\n");
1731 failtest_cleanup(false, status);