2 * UMTX file[:offset] command
4 * $DragonFly: src/test/debug/umtx.c,v 1.1 2005/01/14 04:15:12 dillon Exp $
23 #define MTX_LOCKED 0x80000000
25 static int userland_get_mutex(struct umtx
*mtx
, int timo
);
26 static int userland_get_mutex_contested(struct umtx
*mtx
, int timo
);
27 static void userland_rel_mutex(struct umtx
*mtx
);
28 static void userland_rel_mutex_contested(struct umtx
*mtx
);
29 static void docleanup(int signo
);
31 static struct umtx
*cleanup_mtx_contested
;
32 static struct umtx
*cleanup_mtx_held
;
37 main(int ac
, char **av
)
54 signal(SIGINT
, docleanup
);
56 while ((ch
= getopt(ac
, av
, "t:v")) != -1) {
59 timo
= strtol(optarg
, NULL
, 0);
65 fprintf(stderr
, "unknown option: -%c\n", optopt
);
73 fprintf(stderr
, "umtx file[:offset] command\n");
77 if ((str
= strchr(path
, ':')) != NULL
) {
79 off
= strtoull(str
, NULL
, 0);
81 if ((fd
= open(path
, O_RDWR
|O_CREAT
, 0666)) < 0) {
85 if (fstat(fd
, &st
) < 0) {
89 if (off
+ 4 > st
.st_size
) {
92 write(fd
, &v
, sizeof(v
));
94 pgsize
= getpagesize();
96 str
= mmap(NULL
, pgsize
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
97 fd
, off
& ~(off_t
)pgmask
);
98 mtx
= (struct umtx
*)(str
+ ((int)off
& pgmask
));
99 if (userland_get_mutex(mtx
, timo
) < 0) {
100 fprintf(stderr
, "Mutex at %s:%lld timed out\n", path
, off
);
104 fprintf(stderr
, "Obtained mutex at %s:%lld\n", path
, off
);
105 if ((pid
= fork()) == 0) {
106 execvp(av
[1], av
+ 1);
108 } else if (pid
> 0) {
109 while (waitpid(pid
, NULL
, 0) != pid
)
112 fprintf(stderr
, "Unable to exec %s\n", av
[1]);
114 userland_rel_mutex(mtx
);
120 userland_get_mutex(struct umtx
*mtx
, int timo
)
126 if ((v
& MTX_LOCKED
) == 0) {
128 * not locked, attempt to lock.
130 if (cmp_and_exg(&mtx
->lock
, v
, v
| MTX_LOCKED
) == 0) {
131 cleanup_mtx_held
= mtx
;
136 * Locked, bump the contested count and obtain the contested
139 if (cmp_and_exg(&mtx
->lock
, v
, v
+ 1) == 0) {
140 cleanup_mtx_contested
= mtx
;
141 return(userland_get_mutex_contested(mtx
, timo
));
148 userland_get_mutex_contested(struct umtx
*mtx
, int timo
)
154 assert(v
& ~MTX_LOCKED
); /* our contesting count still there */
155 if ((v
& MTX_LOCKED
) == 0) {
157 * not locked, attempt to remove our contested count and
158 * lock at the same time.
160 if (cmp_and_exg(&mtx
->lock
, v
, (v
- 1) | MTX_LOCKED
) == 0) {
161 cleanup_mtx_contested
= NULL
;
162 cleanup_mtx_held
= mtx
;
167 * Still locked, sleep and try again.
170 fprintf(stderr
, "waiting on mutex timeout=%d\n", timo
);
172 umtx_sleep(&mtx
->lock
, v
, 0);
174 if (umtx_sleep(&mtx
->lock
, v
, 1000000) < 0) {
175 if (errno
== EAGAIN
&& --timo
== 0) {
176 cleanup_mtx_contested
= NULL
;
177 userland_rel_mutex_contested(mtx
);
187 userland_rel_mutex(struct umtx
*mtx
)
193 assert(v
& MTX_LOCKED
); /* we still have it locked */
194 if (v
== MTX_LOCKED
) {
196 * We hold an uncontested lock, try to set to an unlocked
199 if (cmp_and_exg(&mtx
->lock
, MTX_LOCKED
, 0) == 0) {
201 fprintf(stderr
, "releasing uncontested mutex\n");
206 * We hold a contested lock, unlock and wakeup exactly
207 * one sleeper. It is possible for this to race a new
208 * thread obtaining a lock, in which case any contested
209 * sleeper we wake up will simply go back to sleep.
211 if (cmp_and_exg(&mtx
->lock
, v
, v
& ~MTX_LOCKED
) == 0) {
212 umtx_wakeup(&mtx
->lock
, 1);
214 fprintf(stderr
, "releasing contested mutex\n");
222 userland_rel_mutex_contested(struct umtx
*mtx
)
227 if (cmp_and_exg(&mtx
->lock
, v
, v
- 1) == 0)
230 assert(v
& ~MTX_LOCKED
);
238 if (cleanup_mtx_contested
)
239 userland_rel_mutex_contested(cleanup_mtx_contested
);
240 if (cleanup_mtx_held
)
241 userland_rel_mutex(cleanup_mtx_held
);
248 " movl 4(%esp),%ebx\n"
249 " movl 8(%esp),%eax\n"
250 " movl 12(%esp),%edx\n"
251 " lock cmpxchgl %edx,(%ebx)\n"