Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / fs / gfs2 / locking / dlm / plock.c
blob2ebd374b3143f1f04e2c620bf3bc0fdc8d38fa92
1 /*
2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2.
7 */
9 #include <linux/miscdevice.h>
10 #include <linux/lock_dlm_plock.h>
11 #include <linux/poll.h>
13 #include "lock_dlm.h"
16 static spinlock_t ops_lock;
17 static struct list_head send_list;
18 static struct list_head recv_list;
19 static wait_queue_head_t send_wq;
20 static wait_queue_head_t recv_wq;
22 struct plock_op {
23 struct list_head list;
24 int done;
25 struct gdlm_plock_info info;
28 struct plock_xop {
29 struct plock_op xop;
30 void *callback;
31 void *fl;
32 void *file;
33 struct file_lock flc;
37 static inline void set_version(struct gdlm_plock_info *info)
39 info->version[0] = GDLM_PLOCK_VERSION_MAJOR;
40 info->version[1] = GDLM_PLOCK_VERSION_MINOR;
41 info->version[2] = GDLM_PLOCK_VERSION_PATCH;
44 static int check_version(struct gdlm_plock_info *info)
46 if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
47 (GDLM_PLOCK_VERSION_MINOR < info->version[1])) {
48 log_error("plock device version mismatch: "
49 "kernel (%u.%u.%u), user (%u.%u.%u)",
50 GDLM_PLOCK_VERSION_MAJOR,
51 GDLM_PLOCK_VERSION_MINOR,
52 GDLM_PLOCK_VERSION_PATCH,
53 info->version[0],
54 info->version[1],
55 info->version[2]);
56 return -EINVAL;
58 return 0;
61 static void send_op(struct plock_op *op)
63 set_version(&op->info);
64 INIT_LIST_HEAD(&op->list);
65 spin_lock(&ops_lock);
66 list_add_tail(&op->list, &send_list);
67 spin_unlock(&ops_lock);
68 wake_up(&send_wq);
71 int gdlm_plock(void *lockspace, struct lm_lockname *name,
72 struct file *file, int cmd, struct file_lock *fl)
74 struct gdlm_ls *ls = lockspace;
75 struct plock_op *op;
76 struct plock_xop *xop;
77 int rv;
79 xop = kzalloc(sizeof(*xop), GFP_KERNEL);
80 if (!xop)
81 return -ENOMEM;
83 op = &xop->xop;
84 op->info.optype = GDLM_PLOCK_OP_LOCK;
85 op->info.pid = fl->fl_pid;
86 op->info.ex = (fl->fl_type == F_WRLCK);
87 op->info.wait = IS_SETLKW(cmd);
88 op->info.fsid = ls->id;
89 op->info.number = name->ln_number;
90 op->info.start = fl->fl_start;
91 op->info.end = fl->fl_end;
92 if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
93 /* fl_owner is lockd which doesn't distinguish
94 processes on the nfs client */
95 op->info.owner = (__u64) fl->fl_pid;
96 xop->callback = fl->fl_lmops->fl_grant;
97 locks_init_lock(&xop->flc);
98 locks_copy_lock(&xop->flc, fl);
99 xop->fl = fl;
100 xop->file = file;
101 } else {
102 op->info.owner = (__u64)(long) fl->fl_owner;
103 xop->callback = NULL;
106 send_op(op);
108 if (xop->callback == NULL)
109 wait_event(recv_wq, (op->done != 0));
110 else
111 return -EINPROGRESS;
113 spin_lock(&ops_lock);
114 if (!list_empty(&op->list)) {
115 printk(KERN_INFO "plock op on list\n");
116 list_del(&op->list);
118 spin_unlock(&ops_lock);
120 rv = op->info.rv;
122 if (!rv) {
123 if (posix_lock_file_wait(file, fl) < 0)
124 log_error("gdlm_plock: vfs lock error %x,%llx",
125 name->ln_type,
126 (unsigned long long)name->ln_number);
129 kfree(xop);
130 return rv;
133 /* Returns failure iff a succesful lock operation should be canceled */
134 static int gdlm_plock_callback(struct plock_op *op)
136 struct file *file;
137 struct file_lock *fl;
138 struct file_lock *flc;
139 int (*notify)(void *, void *, int) = NULL;
140 struct plock_xop *xop = (struct plock_xop *)op;
141 int rv = 0;
143 spin_lock(&ops_lock);
144 if (!list_empty(&op->list)) {
145 printk(KERN_INFO "plock op on list\n");
146 list_del(&op->list);
148 spin_unlock(&ops_lock);
150 /* check if the following 2 are still valid or make a copy */
151 file = xop->file;
152 flc = &xop->flc;
153 fl = xop->fl;
154 notify = xop->callback;
156 if (op->info.rv) {
157 notify(flc, NULL, op->info.rv);
158 goto out;
161 /* got fs lock; bookkeep locally as well: */
162 flc->fl_flags &= ~FL_SLEEP;
163 if (posix_lock_file(file, flc, NULL)) {
165 * This can only happen in the case of kmalloc() failure.
166 * The filesystem's own lock is the authoritative lock,
167 * so a failure to get the lock locally is not a disaster.
168 * As long as GFS cannot reliably cancel locks (especially
169 * in a low-memory situation), we're better off ignoring
170 * this failure than trying to recover.
172 log_error("gdlm_plock: vfs lock error file %p fl %p",
173 file, fl);
176 rv = notify(flc, NULL, 0);
177 if (rv) {
178 /* XXX: We need to cancel the fs lock here: */
179 printk("gfs2 lock granted after lock request failed;"
180 " dangling lock!\n");
181 goto out;
184 out:
185 kfree(xop);
186 return rv;
189 int gdlm_punlock(void *lockspace, struct lm_lockname *name,
190 struct file *file, struct file_lock *fl)
192 struct gdlm_ls *ls = lockspace;
193 struct plock_op *op;
194 int rv;
196 op = kzalloc(sizeof(*op), GFP_KERNEL);
197 if (!op)
198 return -ENOMEM;
200 if (posix_lock_file_wait(file, fl) < 0)
201 log_error("gdlm_punlock: vfs unlock error %x,%llx",
202 name->ln_type, (unsigned long long)name->ln_number);
204 op->info.optype = GDLM_PLOCK_OP_UNLOCK;
205 op->info.pid = fl->fl_pid;
206 op->info.fsid = ls->id;
207 op->info.number = name->ln_number;
208 op->info.start = fl->fl_start;
209 op->info.end = fl->fl_end;
210 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
211 op->info.owner = (__u64) fl->fl_pid;
212 else
213 op->info.owner = (__u64)(long) fl->fl_owner;
215 send_op(op);
216 wait_event(recv_wq, (op->done != 0));
218 spin_lock(&ops_lock);
219 if (!list_empty(&op->list)) {
220 printk(KERN_INFO "punlock op on list\n");
221 list_del(&op->list);
223 spin_unlock(&ops_lock);
225 rv = op->info.rv;
227 if (rv == -ENOENT)
228 rv = 0;
230 kfree(op);
231 return rv;
234 int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
235 struct file *file, struct file_lock *fl)
237 struct gdlm_ls *ls = lockspace;
238 struct plock_op *op;
239 int rv;
241 op = kzalloc(sizeof(*op), GFP_KERNEL);
242 if (!op)
243 return -ENOMEM;
245 op->info.optype = GDLM_PLOCK_OP_GET;
246 op->info.pid = fl->fl_pid;
247 op->info.ex = (fl->fl_type == F_WRLCK);
248 op->info.fsid = ls->id;
249 op->info.number = name->ln_number;
250 op->info.start = fl->fl_start;
251 op->info.end = fl->fl_end;
252 if (fl->fl_lmops && fl->fl_lmops->fl_grant)
253 op->info.owner = (__u64) fl->fl_pid;
254 else
255 op->info.owner = (__u64)(long) fl->fl_owner;
257 send_op(op);
258 wait_event(recv_wq, (op->done != 0));
260 spin_lock(&ops_lock);
261 if (!list_empty(&op->list)) {
262 printk(KERN_INFO "plock_get op on list\n");
263 list_del(&op->list);
265 spin_unlock(&ops_lock);
267 /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
268 -ENOENT if there are no locks on the file */
270 rv = op->info.rv;
272 fl->fl_type = F_UNLCK;
273 if (rv == -ENOENT)
274 rv = 0;
275 else if (rv > 0) {
276 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
277 fl->fl_pid = op->info.pid;
278 fl->fl_start = op->info.start;
279 fl->fl_end = op->info.end;
280 rv = 0;
283 kfree(op);
284 return rv;
287 /* a read copies out one plock request from the send list */
288 static ssize_t dev_read(struct file *file, char __user *u, size_t count,
289 loff_t *ppos)
291 struct gdlm_plock_info info;
292 struct plock_op *op = NULL;
294 if (count < sizeof(info))
295 return -EINVAL;
297 spin_lock(&ops_lock);
298 if (!list_empty(&send_list)) {
299 op = list_entry(send_list.next, struct plock_op, list);
300 list_move(&op->list, &recv_list);
301 memcpy(&info, &op->info, sizeof(info));
303 spin_unlock(&ops_lock);
305 if (!op)
306 return -EAGAIN;
308 if (copy_to_user(u, &info, sizeof(info)))
309 return -EFAULT;
310 return sizeof(info);
313 /* a write copies in one plock result that should match a plock_op
314 on the recv list */
315 static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
316 loff_t *ppos)
318 struct gdlm_plock_info info;
319 struct plock_op *op;
320 int found = 0;
322 if (count != sizeof(info))
323 return -EINVAL;
325 if (copy_from_user(&info, u, sizeof(info)))
326 return -EFAULT;
328 if (check_version(&info))
329 return -EINVAL;
331 spin_lock(&ops_lock);
332 list_for_each_entry(op, &recv_list, list) {
333 if (op->info.fsid == info.fsid && op->info.number == info.number &&
334 op->info.owner == info.owner) {
335 list_del_init(&op->list);
336 found = 1;
337 op->done = 1;
338 memcpy(&op->info, &info, sizeof(info));
339 break;
342 spin_unlock(&ops_lock);
344 if (found) {
345 struct plock_xop *xop;
346 xop = (struct plock_xop *)op;
347 if (xop->callback)
348 count = gdlm_plock_callback(op);
349 else
350 wake_up(&recv_wq);
351 } else
352 printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid,
353 (unsigned long long)info.number);
354 return count;
357 static unsigned int dev_poll(struct file *file, poll_table *wait)
359 unsigned int mask = 0;
361 poll_wait(file, &send_wq, wait);
363 spin_lock(&ops_lock);
364 if (!list_empty(&send_list))
365 mask = POLLIN | POLLRDNORM;
366 spin_unlock(&ops_lock);
368 return mask;
371 static const struct file_operations dev_fops = {
372 .read = dev_read,
373 .write = dev_write,
374 .poll = dev_poll,
375 .owner = THIS_MODULE
378 static struct miscdevice plock_dev_misc = {
379 .minor = MISC_DYNAMIC_MINOR,
380 .name = GDLM_PLOCK_MISC_NAME,
381 .fops = &dev_fops
384 int gdlm_plock_init(void)
386 int rv;
388 spin_lock_init(&ops_lock);
389 INIT_LIST_HEAD(&send_list);
390 INIT_LIST_HEAD(&recv_list);
391 init_waitqueue_head(&send_wq);
392 init_waitqueue_head(&recv_wq);
394 rv = misc_register(&plock_dev_misc);
395 if (rv)
396 printk(KERN_INFO "gdlm_plock_init: misc_register failed %d",
397 rv);
398 return rv;
401 void gdlm_plock_exit(void)
403 if (misc_deregister(&plock_dev_misc) < 0)
404 printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed");