2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2.
9 #include <linux/miscdevice.h>
10 #include <linux/lock_dlm_plock.h>
11 #include <linux/poll.h>
16 static spinlock_t ops_lock
;
17 static struct list_head send_list
;
18 static struct list_head recv_list
;
19 static wait_queue_head_t send_wq
;
20 static wait_queue_head_t recv_wq
;
23 struct list_head list
;
25 struct gdlm_plock_info info
;
37 static inline void set_version(struct gdlm_plock_info
*info
)
39 info
->version
[0] = GDLM_PLOCK_VERSION_MAJOR
;
40 info
->version
[1] = GDLM_PLOCK_VERSION_MINOR
;
41 info
->version
[2] = GDLM_PLOCK_VERSION_PATCH
;
44 static int check_version(struct gdlm_plock_info
*info
)
46 if ((GDLM_PLOCK_VERSION_MAJOR
!= info
->version
[0]) ||
47 (GDLM_PLOCK_VERSION_MINOR
< info
->version
[1])) {
48 log_error("plock device version mismatch: "
49 "kernel (%u.%u.%u), user (%u.%u.%u)",
50 GDLM_PLOCK_VERSION_MAJOR
,
51 GDLM_PLOCK_VERSION_MINOR
,
52 GDLM_PLOCK_VERSION_PATCH
,
61 static void send_op(struct plock_op
*op
)
63 set_version(&op
->info
);
64 INIT_LIST_HEAD(&op
->list
);
66 list_add_tail(&op
->list
, &send_list
);
67 spin_unlock(&ops_lock
);
71 int gdlm_plock(void *lockspace
, struct lm_lockname
*name
,
72 struct file
*file
, int cmd
, struct file_lock
*fl
)
74 struct gdlm_ls
*ls
= lockspace
;
76 struct plock_xop
*xop
;
79 xop
= kzalloc(sizeof(*xop
), GFP_KERNEL
);
84 op
->info
.optype
= GDLM_PLOCK_OP_LOCK
;
85 op
->info
.pid
= fl
->fl_pid
;
86 op
->info
.ex
= (fl
->fl_type
== F_WRLCK
);
87 op
->info
.wait
= IS_SETLKW(cmd
);
88 op
->info
.fsid
= ls
->id
;
89 op
->info
.number
= name
->ln_number
;
90 op
->info
.start
= fl
->fl_start
;
91 op
->info
.end
= fl
->fl_end
;
92 op
->info
.owner
= (__u64
)(long) fl
->fl_owner
;
93 if (fl
->fl_lmops
&& fl
->fl_lmops
->fl_grant
) {
94 xop
->callback
= fl
->fl_lmops
->fl_grant
;
95 locks_init_lock(&xop
->flc
);
96 locks_copy_lock(&xop
->flc
, fl
);
100 xop
->callback
= NULL
;
104 if (xop
->callback
== NULL
)
105 wait_event(recv_wq
, (op
->done
!= 0));
109 spin_lock(&ops_lock
);
110 if (!list_empty(&op
->list
)) {
111 printk(KERN_INFO
"plock op on list\n");
114 spin_unlock(&ops_lock
);
119 if (posix_lock_file_wait(file
, fl
) < 0)
120 log_error("gdlm_plock: vfs lock error %x,%llx",
122 (unsigned long long)name
->ln_number
);
129 /* Returns failure iff a succesful lock operation should be canceled */
130 static int gdlm_plock_callback(struct plock_op
*op
)
133 struct file_lock
*fl
;
134 struct file_lock
*flc
;
135 int (*notify
)(void *, void *, int) = NULL
;
136 struct plock_xop
*xop
= (struct plock_xop
*)op
;
139 spin_lock(&ops_lock
);
140 if (!list_empty(&op
->list
)) {
141 printk(KERN_INFO
"plock op on list\n");
144 spin_unlock(&ops_lock
);
146 /* check if the following 2 are still valid or make a copy */
150 notify
= xop
->callback
;
153 notify(flc
, NULL
, op
->info
.rv
);
157 /* got fs lock; bookkeep locally as well: */
158 flc
->fl_flags
&= ~FL_SLEEP
;
159 if (posix_lock_file(file
, flc
, NULL
)) {
161 * This can only happen in the case of kmalloc() failure.
162 * The filesystem's own lock is the authoritative lock,
163 * so a failure to get the lock locally is not a disaster.
164 * As long as GFS cannot reliably cancel locks (especially
165 * in a low-memory situation), we're better off ignoring
166 * this failure than trying to recover.
168 log_error("gdlm_plock: vfs lock error file %p fl %p",
172 rv
= notify(flc
, NULL
, 0);
174 /* XXX: We need to cancel the fs lock here: */
175 printk("gfs2 lock granted after lock request failed;"
176 " dangling lock!\n");
185 int gdlm_punlock(void *lockspace
, struct lm_lockname
*name
,
186 struct file
*file
, struct file_lock
*fl
)
188 struct gdlm_ls
*ls
= lockspace
;
192 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
196 if (posix_lock_file_wait(file
, fl
) < 0)
197 log_error("gdlm_punlock: vfs unlock error %x,%llx",
198 name
->ln_type
, (unsigned long long)name
->ln_number
);
200 op
->info
.optype
= GDLM_PLOCK_OP_UNLOCK
;
201 op
->info
.pid
= fl
->fl_pid
;
202 op
->info
.fsid
= ls
->id
;
203 op
->info
.number
= name
->ln_number
;
204 op
->info
.start
= fl
->fl_start
;
205 op
->info
.end
= fl
->fl_end
;
206 op
->info
.owner
= (__u64
)(long) fl
->fl_owner
;
209 wait_event(recv_wq
, (op
->done
!= 0));
211 spin_lock(&ops_lock
);
212 if (!list_empty(&op
->list
)) {
213 printk(KERN_INFO
"punlock op on list\n");
216 spin_unlock(&ops_lock
);
227 int gdlm_plock_get(void *lockspace
, struct lm_lockname
*name
,
228 struct file
*file
, struct file_lock
*fl
)
230 struct gdlm_ls
*ls
= lockspace
;
234 op
= kzalloc(sizeof(*op
), GFP_KERNEL
);
238 op
->info
.optype
= GDLM_PLOCK_OP_GET
;
239 op
->info
.pid
= fl
->fl_pid
;
240 op
->info
.ex
= (fl
->fl_type
== F_WRLCK
);
241 op
->info
.fsid
= ls
->id
;
242 op
->info
.number
= name
->ln_number
;
243 op
->info
.start
= fl
->fl_start
;
244 op
->info
.end
= fl
->fl_end
;
248 wait_event(recv_wq
, (op
->done
!= 0));
250 spin_lock(&ops_lock
);
251 if (!list_empty(&op
->list
)) {
252 printk(KERN_INFO
"plock_get op on list\n");
255 spin_unlock(&ops_lock
);
259 fl
->fl_type
= F_UNLCK
;
262 else if (rv
== 0 && op
->info
.pid
!= fl
->fl_pid
) {
263 fl
->fl_type
= (op
->info
.ex
) ? F_WRLCK
: F_RDLCK
;
264 fl
->fl_pid
= op
->info
.pid
;
265 fl
->fl_start
= op
->info
.start
;
266 fl
->fl_end
= op
->info
.end
;
273 /* a read copies out one plock request from the send list */
274 static ssize_t
dev_read(struct file
*file
, char __user
*u
, size_t count
,
277 struct gdlm_plock_info info
;
278 struct plock_op
*op
= NULL
;
280 if (count
< sizeof(info
))
283 spin_lock(&ops_lock
);
284 if (!list_empty(&send_list
)) {
285 op
= list_entry(send_list
.next
, struct plock_op
, list
);
286 list_move(&op
->list
, &recv_list
);
287 memcpy(&info
, &op
->info
, sizeof(info
));
289 spin_unlock(&ops_lock
);
294 if (copy_to_user(u
, &info
, sizeof(info
)))
299 /* a write copies in one plock result that should match a plock_op
301 static ssize_t
dev_write(struct file
*file
, const char __user
*u
, size_t count
,
304 struct gdlm_plock_info info
;
308 if (count
!= sizeof(info
))
311 if (copy_from_user(&info
, u
, sizeof(info
)))
314 if (check_version(&info
))
317 spin_lock(&ops_lock
);
318 list_for_each_entry(op
, &recv_list
, list
) {
319 if (op
->info
.fsid
== info
.fsid
&& op
->info
.number
== info
.number
&&
320 op
->info
.owner
== info
.owner
) {
321 list_del_init(&op
->list
);
324 memcpy(&op
->info
, &info
, sizeof(info
));
328 spin_unlock(&ops_lock
);
331 struct plock_xop
*xop
;
332 xop
= (struct plock_xop
*)op
;
334 count
= gdlm_plock_callback(op
);
338 printk(KERN_INFO
"gdlm dev_write no op %x %llx\n", info
.fsid
,
339 (unsigned long long)info
.number
);
343 static unsigned int dev_poll(struct file
*file
, poll_table
*wait
)
345 poll_wait(file
, &send_wq
, wait
);
347 spin_lock(&ops_lock
);
348 if (!list_empty(&send_list
)) {
349 spin_unlock(&ops_lock
);
350 return POLLIN
| POLLRDNORM
;
352 spin_unlock(&ops_lock
);
356 static const struct file_operations dev_fops
= {
363 static struct miscdevice plock_dev_misc
= {
364 .minor
= MISC_DYNAMIC_MINOR
,
365 .name
= GDLM_PLOCK_MISC_NAME
,
369 int gdlm_plock_init(void)
373 spin_lock_init(&ops_lock
);
374 INIT_LIST_HEAD(&send_list
);
375 INIT_LIST_HEAD(&recv_list
);
376 init_waitqueue_head(&send_wq
);
377 init_waitqueue_head(&recv_wq
);
379 rv
= misc_register(&plock_dev_misc
);
381 printk(KERN_INFO
"gdlm_plock_init: misc_register failed %d",
386 void gdlm_plock_exit(void)
388 if (misc_deregister(&plock_dev_misc
) < 0)
389 printk(KERN_INFO
"gdlm_plock_exit: misc_deregister failed");