s3:lib/events: s/EVENT_FD/TEVENT_FD
[Samba/gebeck_regimport.git] / lib / tdb / test / lock-tracking.c
blobb2f092c0c4df795bcb000137d2c99769c2374db7
1 /* We save the locks so we can reaquire them. */
2 #include "../common/tdb_private.h"
3 #include <unistd.h>
4 #include <fcntl.h>
5 #include <stdarg.h>
6 #include <stdlib.h>
7 #include "tap-interface.h"
8 #include "lock-tracking.h"
10 struct testlock {
11 struct testlock *next;
12 unsigned int off;
13 unsigned int len;
14 int type;
16 static struct testlock *testlocks;
17 int locking_errors = 0;
18 bool suppress_lockcheck = false;
19 bool nonblocking_locks;
20 int locking_would_block = 0;
21 void (*unlock_callback)(int fd);
23 int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ )
25 va_list ap;
26 int ret, arg3;
27 struct flock *fl;
28 bool may_block = false;
30 if (cmd != F_SETLK && cmd != F_SETLKW) {
31 /* This may be totally bogus, but we don't know in general. */
32 va_start(ap, cmd);
33 arg3 = va_arg(ap, int);
34 va_end(ap);
36 return fcntl(fd, cmd, arg3);
39 va_start(ap, cmd);
40 fl = va_arg(ap, struct flock *);
41 va_end(ap);
43 if (cmd == F_SETLKW && nonblocking_locks) {
44 cmd = F_SETLK;
45 may_block = true;
47 ret = fcntl(fd, cmd, fl);
49 /* Detect when we failed, but might have been OK if we waited. */
50 if (may_block && ret == -1 && (errno == EAGAIN || errno == EACCES)) {
51 locking_would_block++;
54 if (fl->l_type == F_UNLCK) {
55 struct testlock **l;
56 struct testlock *old = NULL;
58 for (l = &testlocks; *l; l = &(*l)->next) {
59 if ((*l)->off == fl->l_start
60 && (*l)->len == fl->l_len) {
61 if (ret == 0) {
62 old = *l;
63 *l = (*l)->next;
64 free(old);
66 break;
68 if (((*l)->off == fl->l_start)
69 && ((*l)->len == 0)
70 && (ret == 0)) {
72 * Remove a piece from the start of the
73 * allrecord_lock
75 old = *l;
76 (*l)->off += fl->l_len;
77 break;
80 if (!old && !suppress_lockcheck) {
81 diag("Unknown unlock %u@%u - %i",
82 (int)fl->l_len, (int)fl->l_start, ret);
83 locking_errors++;
85 } else {
86 struct testlock *new, *i;
87 unsigned int fl_end = fl->l_start + fl->l_len;
88 if (fl->l_len == 0)
89 fl_end = (unsigned int)-1;
91 /* Check for overlaps: we shouldn't do this. */
92 for (i = testlocks; i; i = i->next) {
93 unsigned int i_end = i->off + i->len;
94 if (i->len == 0)
95 i_end = (unsigned int)-1;
97 if (fl->l_start >= i->off && fl->l_start < i_end)
98 break;
99 if (fl_end >= i->off && fl_end < i_end)
100 break;
102 /* tdb_allrecord_lock does this, handle adjacent: */
103 if (fl->l_start == i_end && fl->l_type == i->type) {
104 if (ret == 0) {
105 i->len = fl->l_len
106 ? i->len + fl->l_len
107 : 0;
109 goto done;
112 if (i) {
113 /* Special case: upgrade of allrecord lock. */
114 if (i->type == F_RDLCK && fl->l_type == F_WRLCK
115 && i->off == FREELIST_TOP
116 && fl->l_start == FREELIST_TOP
117 && i->len == 0
118 && fl->l_len == 0) {
119 if (ret == 0)
120 i->type = F_WRLCK;
121 goto done;
123 if (!suppress_lockcheck) {
124 diag("%s testlock %u@%u overlaps %u@%u",
125 fl->l_type == F_WRLCK ? "write" : "read",
126 (int)fl->l_len, (int)fl->l_start,
127 i->len, (int)i->off);
128 locking_errors++;
132 if (ret == 0) {
133 new = malloc(sizeof *new);
134 new->off = fl->l_start;
135 new->len = fl->l_len;
136 new->type = fl->l_type;
137 new->next = testlocks;
138 testlocks = new;
141 done:
142 if (ret == 0 && fl->l_type == F_UNLCK && unlock_callback)
143 unlock_callback(fd);
144 return ret;
147 unsigned int forget_locking(void)
149 unsigned int num = 0;
150 while (testlocks) {
151 struct testlock *next = testlocks->next;
152 free(testlocks);
153 testlocks = next;
154 num++;
156 return num;