undo: don't run diff between last tid and tid_max
[dragonfly.git] / test / debug / umtx.c
blobaf9eccb3b512103d98af731175c0f35d09b80445
1 /*
2 * UMTX file[:offset] command
4 * $DragonFly: src/test/debug/umtx.c,v 1.1 2005/01/14 04:15:12 dillon Exp $
5 */
7 #include <sys/types.h>
8 #include <sys/mman.h>
9 #include <sys/stat.h>
10 #include <errno.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <unistd.h>
14 #include <string.h>
15 #include <fcntl.h>
16 #include <assert.h>
17 #include <signal.h>
19 struct umtx {
20 volatile int lock;
23 #define MTX_LOCKED 0x80000000
25 static int userland_get_mutex(struct umtx *mtx, int timo);
26 static int userland_get_mutex_contested(struct umtx *mtx, int timo);
27 static void userland_rel_mutex(struct umtx *mtx);
28 static void userland_rel_mutex_contested(struct umtx *mtx);
29 static void docleanup(int signo);
31 static struct umtx *cleanup_mtx_contested;
32 static struct umtx *cleanup_mtx_held;
34 int verbose_opt;
36 int
37 main(int ac, char **av)
39 char *path;
40 char *str;
41 char **newav;
42 off_t off = 0;
43 pid_t pid;
44 int i;
45 int j;
46 int ch;
47 int fd;
48 int pgsize;
49 int pgmask;
50 int timo = 0;
51 struct stat st;
52 struct umtx *mtx;
54 signal(SIGINT, docleanup);
56 while ((ch = getopt(ac, av, "t:v")) != -1) {
57 switch(ch) {
58 case 't':
59 timo = strtol(optarg, NULL, 0);
60 break;
61 case 'v':
62 verbose_opt = 1;
63 break;
64 default:
65 fprintf(stderr, "unknown option: -%c\n", optopt);
66 exit(1);
69 ac -= optind;
70 av += optind;
72 if (ac < 2) {
73 fprintf(stderr, "umtx file[:offset] command\n");
74 exit(1);
76 path = av[0];
77 if ((str = strchr(path, ':')) != NULL) {
78 *str++ = 0;
79 off = strtoull(str, NULL, 0);
81 if ((fd = open(path, O_RDWR|O_CREAT, 0666)) < 0) {
82 perror("open");
83 exit(1);
85 if (fstat(fd, &st) < 0) {
86 perror("fstat");
87 exit(1);
89 if (off + 4 > st.st_size) {
90 int v = 0;
91 lseek(fd, off, 0);
92 write(fd, &v, sizeof(v));
94 pgsize = getpagesize();
95 pgmask = pgsize - 1;
96 str = mmap(NULL, pgsize, PROT_READ|PROT_WRITE, MAP_SHARED,
97 fd, off & ~(off_t)pgmask);
98 mtx = (struct umtx *)(str + ((int)off & pgmask));
99 if (userland_get_mutex(mtx, timo) < 0) {
100 fprintf(stderr, "Mutex at %s:%lld timed out\n", path, off);
101 exit(1);
103 if (verbose_opt)
104 fprintf(stderr, "Obtained mutex at %s:%lld\n", path, off);
105 if ((pid = fork()) == 0) {
106 execvp(av[1], av + 1);
107 _exit(0);
108 } else if (pid > 0) {
109 while (waitpid(pid, NULL, 0) != pid)
111 } else {
112 fprintf(stderr, "Unable to exec %s\n", av[1]);
114 userland_rel_mutex(mtx);
115 close(fd);
116 return(0);
119 static int
120 userland_get_mutex(struct umtx *mtx, int timo)
122 int v;
124 for (;;) {
125 v = mtx->lock;
126 if ((v & MTX_LOCKED) == 0) {
128 * not locked, attempt to lock.
130 if (cmp_and_exg(&mtx->lock, v, v | MTX_LOCKED) == 0) {
131 cleanup_mtx_held = mtx;
132 return(0);
134 } else {
136 * Locked, bump the contested count and obtain the contested
137 * mutex.
139 if (cmp_and_exg(&mtx->lock, v, v + 1) == 0) {
140 cleanup_mtx_contested = mtx;
141 return(userland_get_mutex_contested(mtx, timo));
147 static int
148 userland_get_mutex_contested(struct umtx *mtx, int timo)
150 int v;
152 for (;;) {
153 v = mtx->lock;
154 assert(v & ~MTX_LOCKED); /* our contesting count still there */
155 if ((v & MTX_LOCKED) == 0) {
157 * not locked, attempt to remove our contested count and
158 * lock at the same time.
160 if (cmp_and_exg(&mtx->lock, v, (v - 1) | MTX_LOCKED) == 0) {
161 cleanup_mtx_contested = NULL;
162 cleanup_mtx_held = mtx;
163 return(0);
165 } else {
167 * Still locked, sleep and try again.
169 if (verbose_opt)
170 fprintf(stderr, "waiting on mutex timeout=%d\n", timo);
171 if (timo == 0) {
172 umtx_sleep(&mtx->lock, v, 0);
173 } else {
174 if (umtx_sleep(&mtx->lock, v, 1000000) < 0) {
175 if (errno == EAGAIN && --timo == 0) {
176 cleanup_mtx_contested = NULL;
177 userland_rel_mutex_contested(mtx);
178 return(-1);
186 static void
187 userland_rel_mutex(struct umtx *mtx)
189 int v;
191 for (;;) {
192 v = mtx->lock;
193 assert(v & MTX_LOCKED); /* we still have it locked */
194 if (v == MTX_LOCKED) {
196 * We hold an uncontested lock, try to set to an unlocked
197 * state.
199 if (cmp_and_exg(&mtx->lock, MTX_LOCKED, 0) == 0) {
200 if (verbose_opt)
201 fprintf(stderr, "releasing uncontested mutex\n");
202 return;
204 } else {
206 * We hold a contested lock, unlock and wakeup exactly
207 * one sleeper. It is possible for this to race a new
208 * thread obtaining a lock, in which case any contested
209 * sleeper we wake up will simply go back to sleep.
211 if (cmp_and_exg(&mtx->lock, v, v & ~MTX_LOCKED) == 0) {
212 umtx_wakeup(&mtx->lock, 1);
213 if (verbose_opt)
214 fprintf(stderr, "releasing contested mutex\n");
215 return;
221 static void
222 userland_rel_mutex_contested(struct umtx *mtx)
224 int v;
226 for (;;) {
227 if (cmp_and_exg(&mtx->lock, v, v - 1) == 0)
228 return;
229 v = mtx->lock;
230 assert(v & ~MTX_LOCKED);
234 static void
235 docleanup(int signo)
237 printf("cleanup\n");
238 if (cleanup_mtx_contested)
239 userland_rel_mutex_contested(cleanup_mtx_contested);
240 if (cleanup_mtx_held)
241 userland_rel_mutex(cleanup_mtx_held);
242 exit(1);
245 __asm(
246 " .text\n"
247 "cmp_and_exg:\n"
248 " movl 4(%esp),%ebx\n"
249 " movl 8(%esp),%eax\n"
250 " movl 12(%esp),%edx\n"
251 " lock cmpxchgl %edx,(%ebx)\n"
252 " jz 1f\n"
253 " movl $-1,%eax\n"
254 " ret\n"
255 "1:\n"
256 " subl %eax,%eax\n"
257 " ret\n"