s3:smbd: change user_struct->vuid to uint64_t
[Samba/gebeck_regimport.git] / lib / ccan / failtest / failtest.c
bloba4a374f4fb449416e762dc1765a726a8d01e5095
1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
3 #include <stdarg.h>
4 #include <string.h>
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <ctype.h>
8 #include <err.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/wait.h>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/mman.h>
17 #include <sys/resource.h>
18 #include <signal.h>
19 #include <assert.h>
20 #include <ccan/time/time.h>
21 #include <ccan/read_write_all/read_write_all.h>
22 #include <ccan/failtest/failtest_proto.h>
23 #include <ccan/build_assert/build_assert.h>
24 #include <ccan/hash/hash.h>
25 #include <ccan/htable/htable_type.h>
26 #include <ccan/str/str.h>
27 #include <ccan/compiler/compiler.h>
29 enum failtest_result (*failtest_hook)(struct tlist_calls *);
31 static FILE *tracef = NULL, *warnf;
32 static int traceindent = 0;
34 unsigned int failtest_timeout_ms = 20000;
36 const char *failpath;
37 const char *debugpath;
39 enum info_type {
40 WRITE,
41 RELEASE_LOCKS,
42 FAILURE,
43 SUCCESS,
44 UNEXPECTED
47 struct lock_info {
48 int fd;
49 /* end is inclusive: you can't have a 0-byte lock. */
50 off_t start, end;
51 int type;
54 /* We hash the call location together with its backtrace. */
55 static size_t hash_call(const struct failtest_call *call)
57 return hash(call->file, strlen(call->file),
58 hash(&call->line, 1,
59 hash(call->backtrace, call->backtrace_num,
60 call->type)));
63 static bool call_eq(const struct failtest_call *call1,
64 const struct failtest_call *call2)
66 unsigned int i;
68 if (strcmp(call1->file, call2->file) != 0
69 || call1->line != call2->line
70 || call1->type != call2->type
71 || call1->backtrace_num != call2->backtrace_num)
72 return false;
74 for (i = 0; i < call1->backtrace_num; i++)
75 if (call1->backtrace[i] != call2->backtrace[i])
76 return false;
78 return true;
81 /* Defines struct failtable. */
82 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
83 call_eq, failtable);
85 bool (*failtest_exit_check)(struct tlist_calls *history);
87 /* The entire history of all calls. */
88 static struct tlist_calls history = TLIST_INIT(history);
89 /* If we're a child, the fd two write control info to the parent. */
90 static int control_fd = -1;
91 /* If we're a child, this is the first call we did ourselves. */
92 static struct failtest_call *our_history_start = NULL;
93 /* For printing runtime with --trace. */
94 static struct timeval start;
95 /* Set when failtest_hook returns FAIL_PROBE */
96 static bool probing = false;
97 /* Table to track duplicates. */
98 static struct failtable failtable;
100 /* Array of writes which our child did. We report them on failure. */
101 static struct write_call *child_writes = NULL;
102 static unsigned int child_writes_num = 0;
104 /* fcntl locking info. */
105 static pid_t lock_owner;
106 static struct lock_info *locks = NULL;
107 static unsigned int lock_num = 0;
109 /* Our original pid, which we return to anyone who asks. */
110 static pid_t orig_pid;
112 /* Mapping from failtest_type to char. */
113 static const char info_to_arg[] = "mceoxprwfal";
115 /* Dummy call used for failtest_undo wrappers. */
116 static struct failtest_call unrecorded_call;
118 struct contents_saved {
119 size_t count;
120 off_t off;
121 off_t old_len;
122 char contents[1];
125 /* File contents, saved in this child only. */
126 struct saved_mmapped_file {
127 struct saved_mmapped_file *next;
128 struct failtest_call *opener;
129 struct contents_saved *s;
132 static struct saved_mmapped_file *saved_mmapped_files;
134 #if HAVE_BACKTRACE
135 #include <execinfo.h>
137 static void **get_backtrace(unsigned int *num)
139 static unsigned int max_back = 100;
140 void **ret;
142 again:
143 ret = malloc(max_back * sizeof(void *));
144 *num = backtrace(ret, max_back);
145 if (*num == max_back) {
146 free(ret);
147 max_back *= 2;
148 goto again;
150 return ret;
152 #else
153 /* This will test slightly less, since will consider all of the same
154 * calls as identical. But, it's slightly faster! */
155 static void **get_backtrace(unsigned int *num)
157 *num = 0;
158 return NULL;
160 #endif /* HAVE_BACKTRACE */
162 static struct failtest_call *add_history_(enum failtest_call_type type,
163 bool can_leak,
164 const char *file,
165 unsigned int line,
166 const void *elem,
167 size_t elem_size)
169 struct failtest_call *call;
171 /* NULL file is how we suppress failure. */
172 if (!file)
173 return &unrecorded_call;
175 call = malloc(sizeof *call);
176 call->type = type;
177 call->can_leak = can_leak;
178 call->file = file;
179 call->line = line;
180 call->cleanup = NULL;
181 call->backtrace = get_backtrace(&call->backtrace_num);
182 memcpy(&call->u, elem, elem_size);
183 tlist_add_tail(&history, call, list);
184 return call;
187 #define add_history(type, can_leak, file, line, elem) \
188 add_history_((type), (can_leak), (file), (line), (elem), sizeof(*(elem)))
190 /* We do a fake call inside a sizeof(), to check types. */
191 #define set_cleanup(call, clean, type) \
192 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL, false),1), (clean))
194 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
195 static int move_fd_to_high(int fd)
197 int i;
198 struct rlimit lim;
199 int max;
201 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
202 max = lim.rlim_cur;
203 printf("Max is %i\n", max);
204 } else
205 max = FD_SETSIZE;
207 for (i = max - 1; i > fd; i--) {
208 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
209 if (dup2(fd, i) == -1) {
210 warn("Failed to dup fd %i to %i", fd, i);
211 continue;
213 close(fd);
214 return i;
217 /* Nothing? Really? Er... ok? */
218 return fd;
221 static bool read_write_info(int fd)
223 struct write_call *w;
224 char *buf;
226 /* We don't need all of this, but it's simple. */
227 child_writes = realloc(child_writes,
228 (child_writes_num+1) * sizeof(child_writes[0]));
229 w = &child_writes[child_writes_num];
230 if (!read_all(fd, w, sizeof(*w)))
231 return false;
233 w->buf = buf = malloc(w->count);
234 if (!read_all(fd, buf, w->count))
235 return false;
237 child_writes_num++;
238 return true;
241 static char *failpath_string(void)
243 struct failtest_call *i;
244 char *ret = strdup("");
245 unsigned len = 0;
247 /* Inefficient, but who cares? */
248 tlist_for_each(&history, i, list) {
249 ret = realloc(ret, len + 2);
250 ret[len] = info_to_arg[i->type];
251 if (i->fail)
252 ret[len] = toupper(ret[len]);
253 ret[++len] = '\0';
255 return ret;
258 static void do_warn(int e, const char *fmt, va_list ap)
260 char *p = failpath_string();
262 vfprintf(warnf, fmt, ap);
263 if (e != -1)
264 fprintf(warnf, ": %s", strerror(e));
265 fprintf(warnf, " [%s]\n", p);
266 free(p);
269 static void fwarn(const char *fmt, ...)
271 va_list ap;
272 int e = errno;
274 va_start(ap, fmt);
275 do_warn(e, fmt, ap);
276 va_end(ap);
280 static void fwarnx(const char *fmt, ...)
282 va_list ap;
284 va_start(ap, fmt);
285 do_warn(-1, fmt, ap);
286 va_end(ap);
289 static void tell_parent(enum info_type type)
291 if (control_fd != -1)
292 write_all(control_fd, &type, sizeof(type));
295 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
297 va_list ap;
298 char *path = failpath_string();
300 va_start(ap, fmt);
301 vfprintf(stderr, fmt, ap);
302 va_end(ap);
304 fprintf(stderr, "%.*s", (int)outlen, out);
305 printf("To reproduce: --failpath=%s\n", path);
306 free(path);
307 tell_parent(FAILURE);
308 exit(1);
311 static void PRINTF_FMT(1, 2) trace(const char *fmt, ...)
313 va_list ap;
314 unsigned int i;
315 char *p;
316 static int idx;
318 if (!tracef)
319 return;
321 for (i = 0; i < traceindent; i++)
322 fprintf(tracef, " ");
324 p = failpath_string();
325 fprintf(tracef, "%i: %u: %s ", idx++, getpid(), p);
326 va_start(ap, fmt);
327 vfprintf(tracef, fmt, ap);
328 va_end(ap);
329 free(p);
332 static pid_t child;
334 static void hand_down(int signum)
336 kill(child, signum);
339 static void release_locks(void)
341 /* Locks were never acquired/reacquired? */
342 if (lock_owner == 0)
343 return;
345 /* We own them? Release them all. */
346 if (lock_owner == getpid()) {
347 unsigned int i;
348 struct flock fl;
349 fl.l_type = F_UNLCK;
350 fl.l_whence = SEEK_SET;
351 fl.l_start = 0;
352 fl.l_len = 0;
354 trace("Releasing %u locks\n", lock_num);
355 for (i = 0; i < lock_num; i++)
356 fcntl(locks[i].fd, F_SETLK, &fl);
357 } else {
358 /* Our parent must have them; pass request up. */
359 enum info_type type = RELEASE_LOCKS;
360 assert(control_fd != -1);
361 write_all(control_fd, &type, sizeof(type));
363 lock_owner = 0;
366 /* off_t is a signed type. Getting its max is non-trivial. */
367 static off_t off_max(void)
369 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
370 if (sizeof(off_t) == 4)
371 return (off_t)0x7FFFFFF;
372 else
373 return (off_t)0x7FFFFFFFFFFFFFFULL;
376 static void get_locks(void)
378 unsigned int i;
379 struct flock fl;
381 if (lock_owner == getpid())
382 return;
384 if (lock_owner != 0) {
385 enum info_type type = RELEASE_LOCKS;
386 assert(control_fd != -1);
387 trace("Asking parent to release locks\n");
388 write_all(control_fd, &type, sizeof(type));
391 fl.l_whence = SEEK_SET;
393 for (i = 0; i < lock_num; i++) {
394 fl.l_type = locks[i].type;
395 fl.l_start = locks[i].start;
396 if (locks[i].end == off_max())
397 fl.l_len = 0;
398 else
399 fl.l_len = locks[i].end - locks[i].start + 1;
401 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
402 abort();
404 trace("Acquired %u locks\n", lock_num);
405 lock_owner = getpid();
409 static struct contents_saved *save_contents(const char *filename,
410 int fd, size_t count, off_t off,
411 const char *why)
413 struct contents_saved *s = malloc(sizeof(*s) + count);
414 ssize_t ret;
416 s->off = off;
418 ret = pread(fd, s->contents, count, off);
419 if (ret < 0) {
420 fwarn("failtest_write: failed to save old contents!");
421 s->count = 0;
422 } else
423 s->count = ret;
425 /* Use lseek to get the size of file, but we have to restore
426 * file offset */
427 off = lseek(fd, 0, SEEK_CUR);
428 s->old_len = lseek(fd, 0, SEEK_END);
429 lseek(fd, off, SEEK_SET);
431 trace("Saving %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
432 s, filename, s->count, (long long)s->off, why,
433 (long long)s->old_len, fd);
434 return s;
437 static void restore_contents(struct failtest_call *opener,
438 struct contents_saved *s,
439 bool restore_offset,
440 const char *caller)
442 int fd;
444 /* The top parent doesn't need to restore. */
445 if (control_fd == -1)
446 return;
448 /* Has the fd been closed? */
449 if (opener->u.open.closed) {
450 /* Reopen, replace fd, close silently as we clean up. */
451 fd = open(opener->u.open.pathname, O_RDWR);
452 if (fd < 0) {
453 fwarn("failtest: could not reopen %s to clean up %s!",
454 opener->u.open.pathname, caller);
455 return;
457 /* Make it clearly distinguisable from a "normal" fd. */
458 fd = move_fd_to_high(fd);
459 trace("Reopening %s to restore it (was fd %i, now %i)\n",
460 opener->u.open.pathname, opener->u.open.ret, fd);
461 opener->u.open.ret = fd;
462 opener->u.open.closed = false;
464 fd = opener->u.open.ret;
466 trace("Restoring %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
467 s, opener->u.open.pathname, s->count, (long long)s->off, caller,
468 (long long)s->old_len, fd);
469 if (pwrite(fd, s->contents, s->count, s->off) != s->count) {
470 fwarn("failtest: write failed cleaning up %s for %s!",
471 opener->u.open.pathname, caller);
474 if (ftruncate(fd, s->old_len) != 0) {
475 fwarn("failtest_write: truncate failed cleaning up %s for %s!",
476 opener->u.open.pathname, caller);
479 if (restore_offset) {
480 trace("Restoring offset of fd %i to %llu\n",
481 fd, (long long)s->off);
482 lseek(fd, s->off, SEEK_SET);
486 /* We save/restore most things on demand, but always do mmaped files. */
487 static void save_mmapped_files(void)
489 struct failtest_call *i;
490 trace("Saving mmapped files in child\n");
492 tlist_for_each_rev(&history, i, list) {
493 struct mmap_call *m = &i->u.mmap;
494 struct saved_mmapped_file *s;
496 if (i->type != FAILTEST_MMAP)
497 continue;
499 /* FIXME: We only handle mmapped files where fd is still open. */
500 if (m->opener->u.open.closed)
501 continue;
503 s = malloc(sizeof *s);
504 s->s = save_contents(m->opener->u.open.pathname,
505 m->fd, m->length, m->offset,
506 "mmapped file before fork");
507 s->opener = m->opener;
508 s->next = saved_mmapped_files;
509 saved_mmapped_files = s;
513 static void free_mmapped_files(bool restore)
515 trace("%s mmapped files in child\n",
516 restore ? "Restoring" : "Discarding");
517 while (saved_mmapped_files) {
518 struct saved_mmapped_file *next = saved_mmapped_files->next;
519 if (restore)
520 restore_contents(saved_mmapped_files->opener,
521 saved_mmapped_files->s, false,
522 "saved mmap");
523 free(saved_mmapped_files->s);
524 free(saved_mmapped_files);
525 saved_mmapped_files = next;
529 /* Returns a FAILTEST_OPEN, FAILTEST_PIPE or NULL. */
530 static struct failtest_call *opener_of(int fd)
532 struct failtest_call *i;
534 /* Don't get confused and match genuinely failed opens. */
535 if (fd < 0)
536 return NULL;
538 /* Figure out the set of live fds. */
539 tlist_for_each_rev(&history, i, list) {
540 if (i->fail)
541 continue;
542 switch (i->type) {
543 case FAILTEST_CLOSE:
544 if (i->u.close.fd == fd) {
545 return NULL;
547 break;
548 case FAILTEST_OPEN:
549 if (i->u.open.ret == fd) {
550 if (i->u.open.closed)
551 return NULL;
552 return i;
554 break;
555 case FAILTEST_PIPE:
556 if (i->u.pipe.fds[0] == fd || i->u.pipe.fds[1] == fd) {
557 return i;
559 break;
560 default:
561 break;
565 /* FIXME: socket, dup, etc are untracked! */
566 return NULL;
569 static void free_call(struct failtest_call *call)
571 /* We don't do this in cleanup: needed even for failed opens. */
572 if (call->type == FAILTEST_OPEN)
573 free((char *)call->u.open.pathname);
574 free(call->backtrace);
575 tlist_del_from(&history, call, list);
576 free(call);
579 /* Free up memory, so valgrind doesn't report leaks. */
580 static void free_everything(void)
582 struct failtest_call *i;
584 while ((i = tlist_top(&history, list)) != NULL)
585 free_call(i);
587 failtable_clear(&failtable);
590 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
592 struct failtest_call *i;
593 bool restore = true;
595 /* For children, we don't care if they "failed" the testing. */
596 if (control_fd != -1)
597 status = 0;
598 else
599 /* We don't restore contents for original parent. */
600 restore = false;
602 /* Cleanup everything, in reverse order. */
603 tlist_for_each_rev(&history, i, list) {
604 /* Don't restore things our parent did. */
605 if (i == our_history_start)
606 restore = false;
608 if (i->fail)
609 continue;
611 if (i->cleanup)
612 i->cleanup(&i->u, restore);
614 /* But their program shouldn't leak, even on failure. */
615 if (!forced_cleanup && i->can_leak) {
616 printf("Leak at %s:%u: --failpath=%s\n",
617 i->file, i->line, failpath_string());
618 status = 1;
622 /* Put back mmaped files the way our parent (if any) expects. */
623 free_mmapped_files(true);
625 free_everything();
626 if (status == 0)
627 tell_parent(SUCCESS);
628 else
629 tell_parent(FAILURE);
630 exit(status);
633 static bool following_path(void)
635 if (!failpath)
636 return false;
637 /* + means continue after end, like normal. */
638 if (*failpath == '+') {
639 failpath = NULL;
640 return false;
642 return true;
645 static bool follow_path(struct failtest_call *call)
647 if (*failpath == '\0') {
648 /* Continue, but don't inject errors. */
649 return call->fail = false;
652 if (tolower((unsigned char)*failpath) != info_to_arg[call->type])
653 errx(1, "Failpath expected '%s' got '%c'\n",
654 failpath, info_to_arg[call->type]);
655 call->fail = cisupper(*(failpath++));
656 if (call->fail)
657 call->can_leak = false;
658 return call->fail;
661 static bool should_fail(struct failtest_call *call)
663 int status;
664 int control[2], output[2];
665 enum info_type type = UNEXPECTED;
666 char *out = NULL;
667 size_t outlen = 0;
668 struct failtest_call *dup;
670 if (call == &unrecorded_call)
671 return false;
673 if (following_path())
674 return follow_path(call);
676 /* Attach debugger if they asked for it. */
677 if (debugpath) {
678 char *path;
680 /* Pretend this last call matches whatever path wanted:
681 * keeps valgrind happy. */
682 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
683 path = failpath_string();
685 if (streq(path, debugpath)) {
686 char str[80];
688 /* Don't timeout. */
689 signal(SIGUSR1, SIG_IGN);
690 sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
691 getpid(), getpid());
692 if (system(str) == 0)
693 sleep(5);
694 } else {
695 /* Ignore last character: could be upper or lower. */
696 path[strlen(path)-1] = '\0';
697 if (!strstarts(debugpath, path)) {
698 fprintf(stderr,
699 "--debugpath not followed: %s\n", path);
700 debugpath = NULL;
703 free(path);
706 /* Are we probing? If so, we never fail twice. */
707 if (probing) {
708 trace("Not failing %c due to FAIL_PROBE return\n",
709 info_to_arg[call->type]);
710 return call->fail = false;
713 /* Don't fail more than once in the same place. */
714 dup = failtable_get(&failtable, call);
715 if (dup) {
716 trace("Not failing %c due to duplicate\n",
717 info_to_arg[call->type]);
718 return call->fail = false;
721 if (failtest_hook) {
722 switch (failtest_hook(&history)) {
723 case FAIL_OK:
724 break;
725 case FAIL_PROBE:
726 probing = true;
727 break;
728 case FAIL_DONT_FAIL:
729 trace("Not failing %c due to failhook return\n",
730 info_to_arg[call->type]);
731 call->fail = false;
732 return false;
733 default:
734 abort();
738 /* Add it to our table of calls. */
739 failtable_add(&failtable, call);
741 /* We're going to fail in the child. */
742 call->fail = true;
743 if (pipe(control) != 0 || pipe(output) != 0)
744 err(1, "opening pipe");
746 /* Move out the way, to high fds. */
747 control[0] = move_fd_to_high(control[0]);
748 control[1] = move_fd_to_high(control[1]);
749 output[0] = move_fd_to_high(output[0]);
750 output[1] = move_fd_to_high(output[1]);
752 /* Prevent double-printing (in child and parent) */
753 fflush(stdout);
754 fflush(warnf);
755 if (tracef)
756 fflush(tracef);
757 child = fork();
758 if (child == -1)
759 err(1, "forking failed");
761 if (child == 0) {
762 traceindent++;
763 if (tracef) {
764 struct timeval diff;
765 const char *p;
766 char *failpath;
767 struct failtest_call *c;
769 c = tlist_tail(&history, list);
770 diff = time_sub(time_now(), start);
771 failpath = failpath_string();
772 p = strrchr(c->file, '/');
773 if (p)
774 p++;
775 else
776 p = c->file;
777 trace("%u->%u (%u.%02u): %s (%s:%u)\n",
778 getppid(), getpid(),
779 (int)diff.tv_sec, (int)diff.tv_usec / 10000,
780 failpath, p, c->line);
781 free(failpath);
783 /* From here on, we have to clean up! */
784 our_history_start = tlist_tail(&history, list);
785 close(control[0]);
786 close(output[0]);
787 /* Don't swallow stderr if we're tracing. */
788 if (!tracef) {
789 dup2(output[1], STDOUT_FILENO);
790 dup2(output[1], STDERR_FILENO);
791 if (output[1] != STDOUT_FILENO
792 && output[1] != STDERR_FILENO)
793 close(output[1]);
795 control_fd = move_fd_to_high(control[1]);
797 /* Forget any of our parent's saved files. */
798 free_mmapped_files(false);
800 /* Now, save any files we need to. */
801 save_mmapped_files();
803 /* Failed calls can't leak. */
804 call->can_leak = false;
806 return true;
809 signal(SIGUSR1, hand_down);
811 close(control[1]);
812 close(output[1]);
814 /* We grab output so we can display it; we grab writes so we
815 * can compare. */
816 do {
817 struct pollfd pfd[2];
818 int ret;
820 pfd[0].fd = output[0];
821 pfd[0].events = POLLIN|POLLHUP;
822 pfd[1].fd = control[0];
823 pfd[1].events = POLLIN|POLLHUP;
825 if (type == SUCCESS)
826 ret = poll(pfd, 1, failtest_timeout_ms);
827 else
828 ret = poll(pfd, 2, failtest_timeout_ms);
830 if (ret == 0)
831 hand_down(SIGUSR1);
832 if (ret < 0) {
833 if (errno == EINTR)
834 continue;
835 err(1, "Poll returned %i", ret);
838 if (pfd[0].revents & POLLIN) {
839 ssize_t len;
841 out = realloc(out, outlen + 8192);
842 len = read(output[0], out + outlen, 8192);
843 outlen += len;
844 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
845 if (read_all(control[0], &type, sizeof(type))) {
846 if (type == WRITE) {
847 if (!read_write_info(control[0]))
848 break;
849 } else if (type == RELEASE_LOCKS) {
850 release_locks();
851 /* FIXME: Tell them we're done... */
854 } else if (pfd[0].revents & POLLHUP) {
855 break;
857 } while (type != FAILURE);
859 close(output[0]);
860 close(control[0]);
861 waitpid(child, &status, 0);
862 if (!WIFEXITED(status)) {
863 if (WTERMSIG(status) == SIGUSR1)
864 child_fail(out, outlen, "Timed out");
865 else
866 child_fail(out, outlen, "Killed by signal %u: ",
867 WTERMSIG(status));
869 /* Child printed failure already, just pass up exit code. */
870 if (type == FAILURE) {
871 fprintf(stderr, "%.*s", (int)outlen, out);
872 tell_parent(type);
873 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
875 if (WEXITSTATUS(status) != 0)
876 child_fail(out, outlen, "Exited with status %i: ",
877 WEXITSTATUS(status));
879 free(out);
880 signal(SIGUSR1, SIG_DFL);
882 /* Only child does probe. */
883 probing = false;
885 /* We continue onwards without failing. */
886 call->fail = false;
887 return false;
890 static void cleanup_calloc(struct calloc_call *call, bool restore)
892 trace("undoing calloc %p\n", call->ret);
893 free(call->ret);
896 void *failtest_calloc(size_t nmemb, size_t size,
897 const char *file, unsigned line)
899 struct failtest_call *p;
900 struct calloc_call call;
901 call.nmemb = nmemb;
902 call.size = size;
903 p = add_history(FAILTEST_CALLOC, true, file, line, &call);
905 if (should_fail(p)) {
906 p->u.calloc.ret = NULL;
907 p->error = ENOMEM;
908 } else {
909 p->u.calloc.ret = calloc(nmemb, size);
910 set_cleanup(p, cleanup_calloc, struct calloc_call);
912 trace("calloc %zu x %zu %s:%u -> %p\n",
913 nmemb, size, file, line, p->u.calloc.ret);
914 errno = p->error;
915 return p->u.calloc.ret;
918 static void cleanup_malloc(struct malloc_call *call, bool restore)
920 trace("undoing malloc %p\n", call->ret);
921 free(call->ret);
924 void *failtest_malloc(size_t size, const char *file, unsigned line)
926 struct failtest_call *p;
927 struct malloc_call call;
928 call.size = size;
930 p = add_history(FAILTEST_MALLOC, true, file, line, &call);
931 if (should_fail(p)) {
932 p->u.malloc.ret = NULL;
933 p->error = ENOMEM;
934 } else {
935 p->u.malloc.ret = malloc(size);
936 set_cleanup(p, cleanup_malloc, struct malloc_call);
938 trace("malloc %zu %s:%u -> %p\n",
939 size, file, line, p->u.malloc.ret);
940 errno = p->error;
941 return p->u.malloc.ret;
944 static void cleanup_realloc(struct realloc_call *call, bool restore)
946 trace("undoing realloc %p\n", call->ret);
947 free(call->ret);
950 /* Walk back and find out if we got this ptr from a previous routine. */
951 static void fixup_ptr_history(void *ptr, const char *why)
953 struct failtest_call *i;
955 /* Start at end of history, work back. */
956 tlist_for_each_rev(&history, i, list) {
957 switch (i->type) {
958 case FAILTEST_REALLOC:
959 if (i->u.realloc.ret == ptr) {
960 trace("found realloc %p %s:%u matching %s\n",
961 ptr, i->file, i->line, why);
962 i->cleanup = NULL;
963 i->can_leak = false;
964 return;
966 break;
967 case FAILTEST_MALLOC:
968 if (i->u.malloc.ret == ptr) {
969 trace("found malloc %p %s:%u matching %s\n",
970 ptr, i->file, i->line, why);
971 i->cleanup = NULL;
972 i->can_leak = false;
973 return;
975 break;
976 case FAILTEST_CALLOC:
977 if (i->u.calloc.ret == ptr) {
978 trace("found calloc %p %s:%u matching %s\n",
979 ptr, i->file, i->line, why);
980 i->cleanup = NULL;
981 i->can_leak = false;
982 return;
984 break;
985 default:
986 break;
989 trace("Did not find %p matching %s\n", ptr, why);
992 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
994 struct failtest_call *p;
995 struct realloc_call call;
996 call.size = size;
997 p = add_history(FAILTEST_REALLOC, true, file, line, &call);
999 /* FIXME: Try one child moving allocation, one not. */
1000 if (should_fail(p)) {
1001 p->u.realloc.ret = NULL;
1002 p->error = ENOMEM;
1003 } else {
1004 /* Don't catch this one in the history fixup... */
1005 p->u.realloc.ret = NULL;
1006 fixup_ptr_history(ptr, "realloc");
1007 p->u.realloc.ret = realloc(ptr, size);
1008 set_cleanup(p, cleanup_realloc, struct realloc_call);
1010 trace("realloc %p %s:%u -> %p\n",
1011 ptr, file, line, p->u.realloc.ret);
1012 errno = p->error;
1013 return p->u.realloc.ret;
1016 /* FIXME: Record free, so we can terminate fixup_ptr_history correctly.
1017 * If there's an alloc we don't see, it could get confusing if it matches
1018 * a previous allocation we did see. */
1019 void failtest_free(void *ptr)
1021 fixup_ptr_history(ptr, "free");
1022 trace("free %p\n", ptr);
1023 free(ptr);
1027 static struct contents_saved *save_file(const char *pathname)
1029 int fd;
1030 struct contents_saved *s;
1032 fd = open(pathname, O_RDONLY);
1033 if (fd < 0)
1034 return NULL;
1036 s = save_contents(pathname, fd, lseek(fd, 0, SEEK_END), 0,
1037 "open with O_TRUNC");
1038 close(fd);
1039 return s;
1042 /* Optimization: don't create a child for an open which *we know*
1043 * would fail anyway. */
1044 static bool open_would_fail(const char *pathname, int flags)
1046 if ((flags & O_ACCMODE) == O_RDONLY)
1047 return access(pathname, R_OK) != 0;
1048 if (!(flags & O_CREAT)) {
1049 if ((flags & O_ACCMODE) == O_WRONLY)
1050 return access(pathname, W_OK) != 0;
1051 if ((flags & O_ACCMODE) == O_RDWR)
1052 return access(pathname, W_OK) != 0
1053 || access(pathname, R_OK) != 0;
1055 /* FIXME: We could check if it exists, for O_CREAT|O_EXCL */
1056 return false;
1059 static void cleanup_open(struct open_call *call, bool restore)
1061 if (restore && call->saved)
1062 restore_contents(container_of(call, struct failtest_call,
1063 u.open),
1064 call->saved, false, "open with O_TRUNC");
1065 if (!call->closed) {
1066 trace("Cleaning up open %s by closing fd %i\n",
1067 call->pathname, call->ret);
1068 close(call->ret);
1069 call->closed = true;
1071 free(call->saved);
1074 int failtest_open(const char *pathname,
1075 const char *file, unsigned line, ...)
1077 struct failtest_call *p;
1078 struct open_call call;
1079 va_list ap;
1081 call.pathname = strdup(pathname);
1082 va_start(ap, line);
1083 call.flags = va_arg(ap, int);
1084 call.always_save = false;
1085 call.closed = false;
1086 if (call.flags & O_CREAT) {
1087 call.mode = va_arg(ap, int);
1088 va_end(ap);
1090 p = add_history(FAILTEST_OPEN, true, file, line, &call);
1091 /* Avoid memory leak! */
1092 if (p == &unrecorded_call)
1093 free((char *)call.pathname);
1095 if (should_fail(p)) {
1096 /* Don't bother inserting failures that would happen anyway. */
1097 if (open_would_fail(pathname, call.flags)) {
1098 trace("Open would have failed anyway: stopping\n");
1099 failtest_cleanup(true, 0);
1101 p->u.open.ret = -1;
1102 /* FIXME: Play with error codes? */
1103 p->error = EACCES;
1104 } else {
1105 /* Save the old version if they're truncating it. */
1106 if (call.flags & O_TRUNC)
1107 p->u.open.saved = save_file(pathname);
1108 else
1109 p->u.open.saved = NULL;
1110 p->u.open.ret = open(pathname, call.flags, call.mode);
1111 if (p->u.open.ret == -1) {
1112 p->u.open.closed = true;
1113 p->can_leak = false;
1114 } else {
1115 set_cleanup(p, cleanup_open, struct open_call);
1118 trace("open %s %s:%u -> %i (opener %p)\n",
1119 pathname, file, line, p->u.open.ret, &p->u.open);
1120 errno = p->error;
1121 return p->u.open.ret;
1124 static void cleanup_mmap(struct mmap_call *mmap, bool restore)
1126 trace("cleaning up mmap @%p (opener %p)\n",
1127 mmap->ret, mmap->opener);
1128 if (restore)
1129 restore_contents(mmap->opener, mmap->saved, false, "mmap");
1130 free(mmap->saved);
1133 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
1134 int fd, off_t offset, const char *file, unsigned line)
1136 struct failtest_call *p;
1137 struct mmap_call call;
1139 call.addr = addr;
1140 call.length = length;
1141 call.prot = prot;
1142 call.flags = flags;
1143 call.offset = offset;
1144 call.fd = fd;
1145 call.opener = opener_of(fd);
1147 /* If we don't know what file it was, don't fail. */
1148 if (!call.opener) {
1149 if (fd != -1) {
1150 fwarnx("failtest_mmap: couldn't figure out source for"
1151 " fd %i at %s:%u", fd, file, line);
1153 addr = mmap(addr, length, prot, flags, fd, offset);
1154 trace("mmap of fd %i -> %p (opener = NULL)\n", fd, addr);
1155 return addr;
1158 p = add_history(FAILTEST_MMAP, false, file, line, &call);
1159 if (should_fail(p)) {
1160 p->u.mmap.ret = MAP_FAILED;
1161 p->error = ENOMEM;
1162 } else {
1163 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
1164 /* Save contents if we're writing to a normal file */
1165 if (p->u.mmap.ret != MAP_FAILED
1166 && (prot & PROT_WRITE)
1167 && call.opener->type == FAILTEST_OPEN) {
1168 const char *fname = call.opener->u.open.pathname;
1169 p->u.mmap.saved = save_contents(fname, fd, length,
1170 offset, "being mmapped");
1171 set_cleanup(p, cleanup_mmap, struct mmap_call);
1174 trace("mmap of fd %i %s:%u -> %p (opener = %p)\n",
1175 fd, file, line, addr, call.opener);
1176 errno = p->error;
1177 return p->u.mmap.ret;
1180 /* Since OpenBSD can't handle adding args, we use this file and line.
1181 * This will make all mmaps look the same, reducing coverage. */
1182 void *failtest_mmap_noloc(void *addr, size_t length, int prot, int flags,
1183 int fd, off_t offset)
1185 return failtest_mmap(addr, length, prot, flags, fd, offset,
1186 __FILE__, __LINE__);
1189 static void cleanup_pipe(struct pipe_call *call, bool restore)
1191 trace("cleaning up pipe fd=%i%s,%i%s\n",
1192 call->fds[0], call->closed[0] ? "(already closed)" : "",
1193 call->fds[1], call->closed[1] ? "(already closed)" : "");
1194 if (!call->closed[0])
1195 close(call->fds[0]);
1196 if (!call->closed[1])
1197 close(call->fds[1]);
1200 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
1202 struct failtest_call *p;
1203 struct pipe_call call;
1205 p = add_history(FAILTEST_PIPE, true, file, line, &call);
1206 if (should_fail(p)) {
1207 p->u.open.ret = -1;
1208 /* FIXME: Play with error codes? */
1209 p->error = EMFILE;
1210 } else {
1211 p->u.pipe.ret = pipe(p->u.pipe.fds);
1212 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
1213 set_cleanup(p, cleanup_pipe, struct pipe_call);
1216 trace("pipe %s:%u -> %i,%i\n", file, line,
1217 p->u.pipe.ret ? -1 : p->u.pipe.fds[0],
1218 p->u.pipe.ret ? -1 : p->u.pipe.fds[1]);
1220 /* This causes valgrind to notice if they use pipefd[] after failure */
1221 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
1222 errno = p->error;
1223 return p->u.pipe.ret;
1226 static void cleanup_read(struct read_call *call, bool restore)
1228 if (restore) {
1229 trace("cleaning up read on fd %i: seeking to %llu\n",
1230 call->fd, (long long)call->off);
1232 /* Read (not readv!) moves file offset! */
1233 if (lseek(call->fd, call->off, SEEK_SET) != call->off) {
1234 fwarn("Restoring lseek pointer failed (read)");
1239 static ssize_t failtest_add_read(int fd, void *buf, size_t count, off_t off,
1240 bool is_pread, const char *file, unsigned line)
1242 struct failtest_call *p;
1243 struct read_call call;
1244 call.fd = fd;
1245 call.buf = buf;
1246 call.count = count;
1247 call.off = off;
1248 p = add_history(FAILTEST_READ, false, file, line, &call);
1250 /* FIXME: Try partial read returns. */
1251 if (should_fail(p)) {
1252 p->u.read.ret = -1;
1253 p->error = EIO;
1254 } else {
1255 if (is_pread)
1256 p->u.read.ret = pread(fd, buf, count, off);
1257 else {
1258 p->u.read.ret = read(fd, buf, count);
1259 if (p->u.read.ret != -1)
1260 set_cleanup(p, cleanup_read, struct read_call);
1263 trace("%sread %s:%u fd %i %zu@%llu -> %i\n",
1264 is_pread ? "p" : "", file, line, fd, count, (long long)off,
1265 p->u.read.ret);
1266 errno = p->error;
1267 return p->u.read.ret;
1270 static void cleanup_write(struct write_call *write, bool restore)
1272 trace("cleaning up write on %s\n", write->opener->u.open.pathname);
1273 if (restore)
1274 restore_contents(write->opener, write->saved, !write->is_pwrite,
1275 "write");
1276 free(write->saved);
1279 static ssize_t failtest_add_write(int fd, const void *buf,
1280 size_t count, off_t off,
1281 bool is_pwrite,
1282 const char *file, unsigned line)
1284 struct failtest_call *p;
1285 struct write_call call;
1287 call.fd = fd;
1288 call.buf = buf;
1289 call.count = count;
1290 call.off = off;
1291 call.is_pwrite = is_pwrite;
1292 call.opener = opener_of(fd);
1293 p = add_history(FAILTEST_WRITE, false, file, line, &call);
1295 /* If we're a child, we need to make sure we write the same thing
1296 * to non-files as the parent does, so tell it. */
1297 if (control_fd != -1 && off == (off_t)-1) {
1298 enum info_type type = WRITE;
1300 write_all(control_fd, &type, sizeof(type));
1301 write_all(control_fd, &p->u.write, sizeof(p->u.write));
1302 write_all(control_fd, buf, count);
1305 /* FIXME: Try partial write returns. */
1306 if (should_fail(p)) {
1307 p->u.write.ret = -1;
1308 p->error = EIO;
1309 } else {
1310 bool is_file;
1311 assert(call.opener == p->u.write.opener);
1313 if (p->u.write.opener) {
1314 is_file = (p->u.write.opener->type == FAILTEST_OPEN);
1315 } else {
1316 /* We can't unwind it, so at least check same
1317 * in parent and child. */
1318 is_file = false;
1321 /* FIXME: We assume same write order in parent and child */
1322 if (!is_file && child_writes_num != 0) {
1323 if (child_writes[0].fd != fd)
1324 errx(1, "Child wrote to fd %u, not %u?",
1325 child_writes[0].fd, fd);
1326 if (child_writes[0].off != p->u.write.off)
1327 errx(1, "Child wrote to offset %zu, not %zu?",
1328 (size_t)child_writes[0].off,
1329 (size_t)p->u.write.off);
1330 if (child_writes[0].count != count)
1331 errx(1, "Child wrote length %zu, not %zu?",
1332 child_writes[0].count, count);
1333 if (memcmp(child_writes[0].buf, buf, count)) {
1334 child_fail(NULL, 0,
1335 "Child wrote differently to"
1336 " fd %u than we did!\n", fd);
1338 free((char *)child_writes[0].buf);
1339 child_writes_num--;
1340 memmove(&child_writes[0], &child_writes[1],
1341 sizeof(child_writes[0]) * child_writes_num);
1343 /* Child wrote it already. */
1344 trace("write %s:%i on fd %i already done by child\n",
1345 file, line, fd);
1346 p->u.write.ret = count;
1347 errno = p->error;
1348 return p->u.write.ret;
1351 if (is_file) {
1352 p->u.write.saved = save_contents(call.opener->u.open.pathname,
1353 fd, count, off,
1354 "being overwritten");
1355 set_cleanup(p, cleanup_write, struct write_call);
1358 /* Though off is current seek ptr for write case, we need to
1359 * move it. write() does that for us. */
1360 if (p->u.write.is_pwrite)
1361 p->u.write.ret = pwrite(fd, buf, count, off);
1362 else
1363 p->u.write.ret = write(fd, buf, count);
1365 trace("%swrite %s:%i %zu@%llu on fd %i -> %i\n",
1366 p->u.write.is_pwrite ? "p" : "",
1367 file, line, count, (long long)off, fd, p->u.write.ret);
1368 errno = p->error;
1369 return p->u.write.ret;
1372 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset,
1373 const char *file, unsigned line)
1375 return failtest_add_write(fd, buf, count, offset, true, file, line);
1378 ssize_t failtest_write(int fd, const void *buf, size_t count,
1379 const char *file, unsigned line)
1381 return failtest_add_write(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1382 file, line);
1385 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
1386 const char *file, unsigned line)
1388 return failtest_add_read(fd, buf, count, off, true, file, line);
1391 ssize_t failtest_read(int fd, void *buf, size_t count,
1392 const char *file, unsigned line)
1394 return failtest_add_read(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1395 file, line);
1398 static struct lock_info *WARN_UNUSED_RESULT
1399 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1401 unsigned int i;
1402 struct lock_info *l;
1404 for (i = 0; i < lock_num; i++) {
1405 l = &locks[i];
1407 if (l->fd != fd)
1408 continue;
1409 /* Four cases we care about:
1410 * Start overlap:
1411 * l = | |
1412 * new = | |
1413 * Mid overlap:
1414 * l = | |
1415 * new = | |
1416 * End overlap:
1417 * l = | |
1418 * new = | |
1419 * Total overlap:
1420 * l = | |
1421 * new = | |
1423 if (start > l->start && end < l->end) {
1424 /* Mid overlap: trim entry, add new one. */
1425 off_t new_start, new_end;
1426 new_start = end + 1;
1427 new_end = l->end;
1428 trace("splitting lock on fd %i from %llu-%llu"
1429 " to %llu-%llu\n",
1430 fd, (long long)l->start, (long long)l->end,
1431 (long long)l->start, (long long)start - 1);
1432 l->end = start - 1;
1433 locks = add_lock(locks,
1434 fd, new_start, new_end, l->type);
1435 l = &locks[i];
1436 } else if (start <= l->start && end >= l->end) {
1437 /* Total overlap: eliminate entry. */
1438 trace("erasing lock on fd %i %llu-%llu\n",
1439 fd, (long long)l->start, (long long)l->end);
1440 l->end = 0;
1441 l->start = 1;
1442 } else if (end >= l->start && end < l->end) {
1443 trace("trimming lock on fd %i from %llu-%llu"
1444 " to %llu-%llu\n",
1445 fd, (long long)l->start, (long long)l->end,
1446 (long long)end + 1, (long long)l->end);
1447 /* Start overlap: trim entry. */
1448 l->start = end + 1;
1449 } else if (start > l->start && start <= l->end) {
1450 trace("trimming lock on fd %i from %llu-%llu"
1451 " to %llu-%llu\n",
1452 fd, (long long)l->start, (long long)l->end,
1453 (long long)l->start, (long long)start - 1);
1454 /* End overlap: trim entry. */
1455 l->end = start-1;
1457 /* Nothing left? Remove it. */
1458 if (l->end < l->start) {
1459 trace("forgetting lock on fd %i\n", fd);
1460 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1461 i--;
1465 if (type != F_UNLCK) {
1466 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1467 l = &locks[lock_num++];
1468 l->fd = fd;
1469 l->start = start;
1470 l->end = end;
1471 l->type = type;
1472 trace("new lock on fd %i %llu-%llu\n",
1473 fd, (long long)l->start, (long long)l->end);
1475 return locks;
1478 /* We trap this so we can record it: we don't fail it. */
1479 int failtest_close(int fd, const char *file, unsigned line)
1481 struct close_call call;
1482 struct failtest_call *p, *opener;
1484 /* Do this before we add ourselves to history! */
1485 opener = opener_of(fd);
1487 call.fd = fd;
1488 p = add_history(FAILTEST_CLOSE, false, file, line, &call);
1489 p->fail = false;
1491 /* Consume close from failpath (shouldn't tell us to fail). */
1492 if (following_path()) {
1493 if (follow_path(p))
1494 abort();
1497 trace("close on fd %i\n", fd);
1498 if (fd < 0)
1499 return close(fd);
1501 /* Mark opener as not leaking, remove its cleanup function. */
1502 if (opener) {
1503 trace("close on fd %i found opener %p\n", fd, opener);
1504 if (opener->type == FAILTEST_PIPE) {
1505 /* From a pipe? */
1506 if (opener->u.pipe.fds[0] == fd) {
1507 assert(!opener->u.pipe.closed[0]);
1508 opener->u.pipe.closed[0] = true;
1509 } else if (opener->u.pipe.fds[1] == fd) {
1510 assert(!opener->u.pipe.closed[1]);
1511 opener->u.pipe.closed[1] = true;
1512 } else
1513 abort();
1514 opener->can_leak = (!opener->u.pipe.closed[0]
1515 || !opener->u.pipe.closed[1]);
1516 } else if (opener->type == FAILTEST_OPEN) {
1517 opener->u.open.closed = true;
1518 opener->can_leak = false;
1519 } else
1520 abort();
1523 /* Restore offset now, in case parent shared (can't do after close!). */
1524 if (control_fd != -1) {
1525 struct failtest_call *i;
1527 tlist_for_each_rev(&history, i, list) {
1528 if (i == our_history_start)
1529 break;
1530 if (i == opener)
1531 break;
1532 if (i->type == FAILTEST_LSEEK && i->u.lseek.fd == fd) {
1533 trace("close on fd %i undoes lseek\n", fd);
1534 /* This seeks back. */
1535 i->cleanup(&i->u, true);
1536 i->cleanup = NULL;
1537 } else if (i->type == FAILTEST_WRITE
1538 && i->u.write.fd == fd
1539 && !i->u.write.is_pwrite) {
1540 trace("close on fd %i undoes write"
1541 " offset change\n", fd);
1542 /* Write (not pwrite!) moves file offset! */
1543 if (lseek(fd, i->u.write.off, SEEK_SET)
1544 != i->u.write.off) {
1545 fwarn("Restoring lseek pointer failed (write)");
1547 } else if (i->type == FAILTEST_READ
1548 && i->u.read.fd == fd) {
1549 /* preads don't *have* cleanups */
1550 if (i->cleanup) {
1551 trace("close on fd %i undoes read"
1552 " offset change\n", fd);
1553 /* This seeks back. */
1554 i->cleanup(&i->u, true);
1555 i->cleanup = NULL;
1561 /* Close unlocks everything. */
1562 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1563 return close(fd);
1566 /* Zero length means "to end of file" */
1567 static off_t end_of(off_t start, off_t len)
1569 if (len == 0)
1570 return off_max();
1571 return start + len - 1;
1574 /* FIXME: This only handles locks, really. */
1575 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1577 struct failtest_call *p;
1578 struct fcntl_call call;
1579 va_list ap;
1581 call.fd = fd;
1582 call.cmd = cmd;
1584 /* Argument extraction. */
1585 switch (cmd) {
1586 case F_SETFL:
1587 case F_SETFD:
1588 va_start(ap, cmd);
1589 call.arg.l = va_arg(ap, long);
1590 va_end(ap);
1591 trace("fcntl on fd %i F_SETFL/F_SETFD\n", fd);
1592 return fcntl(fd, cmd, call.arg.l);
1593 case F_GETFD:
1594 case F_GETFL:
1595 trace("fcntl on fd %i F_GETFL/F_GETFD\n", fd);
1596 return fcntl(fd, cmd);
1597 case F_GETLK:
1598 trace("fcntl on fd %i F_GETLK\n", fd);
1599 get_locks();
1600 va_start(ap, cmd);
1601 call.arg.fl = *va_arg(ap, struct flock *);
1602 va_end(ap);
1603 return fcntl(fd, cmd, &call.arg.fl);
1604 case F_SETLK:
1605 case F_SETLKW:
1606 trace("fcntl on fd %i F_SETLK%s\n",
1607 fd, cmd == F_SETLKW ? "W" : "");
1608 va_start(ap, cmd);
1609 call.arg.fl = *va_arg(ap, struct flock *);
1610 va_end(ap);
1611 break;
1612 default:
1613 /* This means you need to implement it here. */
1614 err(1, "failtest: unknown fcntl %u", cmd);
1617 p = add_history(FAILTEST_FCNTL, false, file, line, &call);
1619 if (should_fail(p)) {
1620 p->u.fcntl.ret = -1;
1621 if (p->u.fcntl.cmd == F_SETLK)
1622 p->error = EAGAIN;
1623 else
1624 p->error = EDEADLK;
1625 } else {
1626 get_locks();
1627 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1628 &p->u.fcntl.arg.fl);
1629 if (p->u.fcntl.ret == -1)
1630 p->error = errno;
1631 else {
1632 /* We don't handle anything else yet. */
1633 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1634 locks = add_lock(locks,
1635 p->u.fcntl.fd,
1636 p->u.fcntl.arg.fl.l_start,
1637 end_of(p->u.fcntl.arg.fl.l_start,
1638 p->u.fcntl.arg.fl.l_len),
1639 p->u.fcntl.arg.fl.l_type);
1642 trace("fcntl on fd %i -> %i\n", fd, p->u.fcntl.ret);
1643 errno = p->error;
1644 return p->u.fcntl.ret;
1647 static void cleanup_lseek(struct lseek_call *call, bool restore)
1649 if (restore) {
1650 trace("cleaning up lseek on fd %i -> %llu\n",
1651 call->fd, (long long)call->old_off);
1652 if (lseek(call->fd, call->old_off, SEEK_SET) != call->old_off)
1653 fwarn("Restoring lseek pointer failed");
1657 /* We trap this so we can undo it: we don't fail it. */
1658 off_t failtest_lseek(int fd, off_t offset, int whence, const char *file,
1659 unsigned int line)
1661 struct failtest_call *p;
1662 struct lseek_call call;
1663 call.fd = fd;
1664 call.offset = offset;
1665 call.whence = whence;
1666 call.old_off = lseek(fd, 0, SEEK_CUR);
1668 p = add_history(FAILTEST_LSEEK, false, file, line, &call);
1669 p->fail = false;
1671 /* Consume lseek from failpath. */
1672 if (failpath)
1673 if (should_fail(p))
1674 abort();
1676 p->u.lseek.ret = lseek(fd, offset, whence);
1678 if (p->u.lseek.ret != (off_t)-1)
1679 set_cleanup(p, cleanup_lseek, struct lseek_call);
1681 trace("lseek %s:%u on fd %i from %llu to %llu%s\n",
1682 file, line, fd, (long long)call.old_off, (long long)offset,
1683 whence == SEEK_CUR ? " (from current off)" :
1684 whence == SEEK_END ? " (from end)" :
1685 whence == SEEK_SET ? "" : " (invalid whence)");
1686 return p->u.lseek.ret;
1690 pid_t failtest_getpid(const char *file, unsigned line)
1692 /* You must call failtest_init first! */
1693 assert(orig_pid);
1694 return orig_pid;
1697 void failtest_init(int argc, char *argv[])
1699 unsigned int i;
1701 orig_pid = getpid();
1703 warnf = fdopen(move_fd_to_high(dup(STDERR_FILENO)), "w");
1704 for (i = 1; i < argc; i++) {
1705 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1706 failpath = argv[i] + strlen("--failpath=");
1707 } else if (strcmp(argv[i], "--trace") == 0) {
1708 tracef = warnf;
1709 failtest_timeout_ms = -1;
1710 } else if (!strncmp(argv[i], "--debugpath=",
1711 strlen("--debugpath="))) {
1712 debugpath = argv[i] + strlen("--debugpath=");
1715 failtable_init(&failtable);
1716 start = time_now();
1719 bool failtest_has_failed(void)
1721 return control_fd != -1;
1724 void failtest_exit(int status)
1726 trace("failtest_exit with status %i\n", status);
1727 if (failtest_exit_check) {
1728 if (!failtest_exit_check(&history))
1729 child_fail(NULL, 0, "failtest_exit_check failed\n");
1732 failtest_cleanup(false, status);